From 203d59206fe147edca749056c28c1b7dcbdffc27 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 15 Oct 2021 14:42:16 +0200 Subject: [PATCH] Bulk update AWSRetry.backoff to AWSRetry.jittered_backoff --- .../modules/aws_config_delivery_channel.py | 2 +- .../aws_direct_connect_confirm_connection.py | 4 +- .../modules/aws_direct_connect_connection.py | 10 ++--- ...s_direct_connect_link_aggregation_group.py | 2 +- .../aws_direct_connect_virtual_interface.py | 2 +- plugins/modules/aws_inspector_target.py | 2 +- plugins/modules/aws_kms.py | 16 ++++---- plugins/modules/aws_kms_info.py | 16 ++++---- plugins/modules/cloudformation_stack_set.py | 2 +- plugins/modules/dms_endpoint.py | 12 +++--- .../modules/dms_replication_subnet_group.py | 10 ++--- plugins/modules/ec2_asg.py | 38 +++++++++---------- plugins/modules/ec2_elb_info.py | 6 +-- plugins/modules/ecs_service_info.py | 4 +- plugins/modules/iam_managed_policy.py | 2 +- plugins/modules/iam_saml_federation.py | 10 ++--- plugins/modules/rds.py | 4 +- 17 files changed, 71 insertions(+), 71 deletions(-) diff --git a/plugins/modules/aws_config_delivery_channel.py b/plugins/modules/aws_config_delivery_channel.py index e6e9d40e62c..fb3851a4ecc 100644 --- a/plugins/modules/aws_config_delivery_channel.py +++ b/plugins/modules/aws_config_delivery_channel.py @@ -79,7 +79,7 @@ # this waits for an IAM role to become fully available, at the cost of # taking a long time to fail when the IAM role/policy really is invalid -retry_unavailable_iam_on_put_delivery = AWSRetry.backoff( +retry_unavailable_iam_on_put_delivery = AWSRetry.jittered_backoff( catch_extra_error_codes=['InsufficientDeliveryPolicyException'], ) diff --git a/plugins/modules/aws_direct_connect_confirm_connection.py b/plugins/modules/aws_direct_connect_confirm_connection.py index 7ea8527db72..b583def09d9 100644 --- a/plugins/modules/aws_direct_connect_confirm_connection.py +++ b/plugins/modules/aws_direct_connect_confirm_connection.py @@ -69,10 +69,10 @@ from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import DirectConnectError from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -retry_params = {"tries": 10, "delay": 5, "backoff": 1.2, "catch_extra_error_codes": ["DirectConnectClientException"]} +retry_params = {"retries": 10, "delay": 5, "backoff": 1.2, "catch_extra_error_codes": ["DirectConnectClientException"]} -@AWSRetry.backoff(**retry_params) +@AWSRetry.jittered_backoff(**retry_params) def describe_connections(client, params): return client.describe_connections(**params) diff --git a/plugins/modules/aws_direct_connect_connection.py b/plugins/modules/aws_direct_connect_connection.py index 98afd701f3d..3764b1c7802 100644 --- a/plugins/modules/aws_direct_connect_connection.py +++ b/plugins/modules/aws_direct_connect_connection.py @@ -167,7 +167,7 @@ from ansible_collections.amazon.aws.plugins.module_utils.direct_connect import disassociate_connection_and_lag from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -retry_params = {"tries": 10, "delay": 5, "backoff": 1.2, "catch_extra_error_codes": ["DirectConnectClientException"]} +retry_params = {"retries": 10, "delay": 5, "backoff": 1.2, "catch_extra_error_codes": ["DirectConnectClientException"]} def connection_status(client, connection_id): @@ -179,7 +179,7 @@ def connection_exists(client, connection_id=None, connection_name=None, verify=T if connection_id: params['connectionId'] = connection_id try: - response = AWSRetry.backoff(**retry_params)(client.describe_connections)(**params) + response = AWSRetry.jittered_backoff(**retry_params)(client.describe_connections)(**params) except (BotoCoreError, ClientError) as e: if connection_id: msg = "Failed to describe DirectConnect ID {0}".format(connection_id) @@ -227,7 +227,7 @@ def create_connection(client, location, bandwidth, name, lag_id): params['lagId'] = lag_id try: - connection = AWSRetry.backoff(**retry_params)(client.create_connection)(**params) + connection = AWSRetry.jittered_backoff(**retry_params)(client.create_connection)(**params) except (BotoCoreError, ClientError) as e: raise DirectConnectError(msg="Failed to create DirectConnect connection {0}".format(name), last_traceback=traceback.format_exc(), @@ -242,7 +242,7 @@ def changed_properties(current_status, location, bandwidth): return current_bandwidth != bandwidth or current_location != location -@AWSRetry.backoff(**retry_params) +@AWSRetry.jittered_backoff(**retry_params) def update_associations(client, latest_state, connection_id, lag_id): changed = False if 'lagId' in latest_state and lag_id != latest_state['lagId']: @@ -277,7 +277,7 @@ def ensure_present(client, connection_id, connection_name, location, bandwidth, return False, connection_id -@AWSRetry.backoff(**retry_params) +@AWSRetry.jittered_backoff(**retry_params) def ensure_absent(client, connection_id): changed = False if connection_id: diff --git a/plugins/modules/aws_direct_connect_link_aggregation_group.py b/plugins/modules/aws_direct_connect_link_aggregation_group.py index 7b287bd61f3..0567ba90288 100644 --- a/plugins/modules/aws_direct_connect_link_aggregation_group.py +++ b/plugins/modules/aws_direct_connect_link_aggregation_group.py @@ -265,7 +265,7 @@ def delete_lag(client, lag_id): exception=e) -@AWSRetry.backoff(tries=5, delay=2, backoff=2.0, catch_extra_error_codes=['DirectConnectClientException']) +@AWSRetry.jittered_backoff(retries=5, delay=2, backoff=2.0, catch_extra_error_codes=['DirectConnectClientException']) def _update_lag(client, lag_id, lag_name, min_links): params = {} if min_links: diff --git a/plugins/modules/aws_direct_connect_virtual_interface.py b/plugins/modules/aws_direct_connect_virtual_interface.py index d520f0ee84f..d2d199c5527 100644 --- a/plugins/modules/aws_direct_connect_virtual_interface.py +++ b/plugins/modules/aws_direct_connect_virtual_interface.py @@ -267,7 +267,7 @@ def try_except_ClientError(failure_msg): def wrapper(f): def run_func(*args, **kwargs): try: - result = AWSRetry.backoff(tries=8, delay=5, catch_extra_error_codes=['DirectConnectClientException'])(f)(*args, **kwargs) + result = AWSRetry.jittered_backoff(retries=8, delay=5, catch_extra_error_codes=['DirectConnectClientException'])(f)(*args, **kwargs) except (ClientError, BotoCoreError) as e: raise DirectConnectError(failure_msg, traceback.format_exc(), e) return result diff --git a/plugins/modules/aws_inspector_target.py b/plugins/modules/aws_inspector_target.py index ceb4abd63dd..a84e245d152 100644 --- a/plugins/modules/aws_inspector_target.py +++ b/plugins/modules/aws_inspector_target.py @@ -110,7 +110,7 @@ pass # caught by AnsibleAWSModule -@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def main(): argument_spec = dict( name=dict(required=True), diff --git a/plugins/modules/aws_kms.py b/plugins/modules/aws_kms.py index 05a520ac94a..13bbd7f4619 100644 --- a/plugins/modules/aws_kms.py +++ b/plugins/modules/aws_kms.py @@ -434,19 +434,19 @@ from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies -@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def get_iam_roles_with_backoff(connection): paginator = connection.get_paginator('list_roles') return paginator.paginate().build_full_result() -@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def get_kms_keys_with_backoff(connection): paginator = connection.get_paginator('list_keys') return paginator.paginate().build_full_result() -@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def get_kms_aliases_with_backoff(connection): paginator = connection.get_paginator('list_aliases') return paginator.paginate().build_full_result() @@ -465,30 +465,30 @@ def get_kms_aliases_lookup(connection): return _aliases -@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def get_kms_tags_with_backoff(connection, key_id, **kwargs): return connection.list_resource_tags(KeyId=key_id, **kwargs) -@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def get_kms_grants_with_backoff(connection, key_id): params = dict(KeyId=key_id) paginator = connection.get_paginator('list_grants') return paginator.paginate(**params).build_full_result() -@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def get_kms_metadata_with_backoff(connection, key_id): return connection.describe_key(KeyId=key_id) -@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def list_key_policies_with_backoff(connection, key_id): paginator = connection.get_paginator('list_key_policies') return paginator.paginate(KeyId=key_id).build_full_result() -@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def get_key_policy_with_backoff(connection, key_id, policy_name): return connection.get_key_policy(KeyId=key_id, PolicyName=policy_name) diff --git a/plugins/modules/aws_kms_info.py b/plugins/modules/aws_kms_info.py index 3e606481e15..a7620dad005 100644 --- a/plugins/modules/aws_kms_info.py +++ b/plugins/modules/aws_kms_info.py @@ -261,13 +261,13 @@ _aliases = dict() -@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def get_kms_keys_with_backoff(connection): paginator = connection.get_paginator('list_keys') return paginator.paginate().build_full_result() -@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def get_kms_aliases_with_backoff(connection): paginator = connection.get_paginator('list_aliases') return paginator.paginate().build_full_result() @@ -286,12 +286,12 @@ def get_kms_aliases_lookup(connection): return _aliases -@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def get_kms_tags_with_backoff(connection, key_id, **kwargs): return connection.list_resource_tags(KeyId=key_id, **kwargs) -@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def get_kms_grants_with_backoff(connection, key_id, **kwargs): params = dict(KeyId=key_id) if kwargs.get('tokens'): @@ -300,23 +300,23 @@ def get_kms_grants_with_backoff(connection, key_id, **kwargs): return paginator.paginate(**params).build_full_result() -@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def get_kms_metadata_with_backoff(connection, key_id): return connection.describe_key(KeyId=key_id) -@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def list_key_policies_with_backoff(connection, key_id): paginator = connection.get_paginator('list_key_policies') return paginator.paginate(KeyId=key_id).build_full_result() -@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def get_key_policy_with_backoff(connection, key_id, policy_name): return connection.get_key_policy(KeyId=key_id, PolicyName=policy_name) -@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def get_enable_key_rotation_with_backoff(connection, key_id): try: current_rotation_status = connection.get_key_rotation_status(KeyId=key_id) diff --git a/plugins/modules/cloudformation_stack_set.py b/plugins/modules/cloudformation_stack_set.py index b10addf7485..750dceb2bf7 100644 --- a/plugins/modules/cloudformation_stack_set.py +++ b/plugins/modules/cloudformation_stack_set.py @@ -361,7 +361,7 @@ def compare_stack_instances(cfn, stack_set_name, accounts, regions): return (desired_stack_instances - existing_stack_instances), existing_stack_instances, (existing_stack_instances - desired_stack_instances) -@AWSRetry.backoff(tries=3, delay=4) +@AWSRetry.jittered_backoff(retries=3, delay=4) def stack_set_facts(cfn, stack_set_name): try: ss = cfn.describe_stack_set(StackSetName=stack_set_name)['StackSet'] diff --git a/plugins/modules/dms_endpoint.py b/plugins/modules/dms_endpoint.py index f4ab520903a..6cc3bc3f896 100644 --- a/plugins/modules/dms_endpoint.py +++ b/plugins/modules/dms_endpoint.py @@ -175,10 +175,10 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -backoff_params = dict(tries=5, delay=1, backoff=1.5) +backoff_params = dict(retries=5, delay=1, backoff=1.5) -@AWSRetry.backoff(**backoff_params) +@AWSRetry.jittered_backoff(**backoff_params) def describe_endpoints(connection, endpoint_identifier): """ checks if the endpoint exists """ try: @@ -189,7 +189,7 @@ def describe_endpoints(connection, endpoint_identifier): return {'Endpoints': []} -@AWSRetry.backoff(**backoff_params) +@AWSRetry.jittered_backoff(**backoff_params) def dms_delete_endpoint(client, **params): """deletes the DMS endpoint based on the EndpointArn""" if module.params.get('wait'): @@ -198,19 +198,19 @@ def dms_delete_endpoint(client, **params): return client.delete_endpoint(**params) -@AWSRetry.backoff(**backoff_params) +@AWSRetry.jittered_backoff(**backoff_params) def dms_create_endpoint(client, **params): """ creates the DMS endpoint""" return client.create_endpoint(**params) -@AWSRetry.backoff(**backoff_params) +@AWSRetry.jittered_backoff(**backoff_params) def dms_modify_endpoint(client, **params): """ updates the endpoint""" return client.modify_endpoint(**params) -@AWSRetry.backoff(**backoff_params) +@AWSRetry.jittered_backoff(**backoff_params) def get_endpoint_deleted_waiter(client): return client.get_waiter('endpoint_deleted') diff --git a/plugins/modules/dms_replication_subnet_group.py b/plugins/modules/dms_replication_subnet_group.py index 305b6b5a85d..917f27438ff 100644 --- a/plugins/modules/dms_replication_subnet_group.py +++ b/plugins/modules/dms_replication_subnet_group.py @@ -66,10 +66,10 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -backoff_params = dict(tries=5, delay=1, backoff=1.5) +backoff_params = dict(retries=5, delay=1, backoff=1.5) -@AWSRetry.backoff(**backoff_params) +@AWSRetry.jittered_backoff(**backoff_params) def describe_subnet_group(connection, subnet_group): """checks if instance exists""" try: @@ -80,18 +80,18 @@ def describe_subnet_group(connection, subnet_group): return {'ReplicationSubnetGroups': []} -@AWSRetry.backoff(**backoff_params) +@AWSRetry.jittered_backoff(**backoff_params) def replication_subnet_group_create(connection, **params): """ creates the replication subnet group """ return connection.create_replication_subnet_group(**params) -@AWSRetry.backoff(**backoff_params) +@AWSRetry.jittered_backoff(**backoff_params) def replication_subnet_group_modify(connection, **modify_params): return connection.modify_replication_subnet_group(**modify_params) -@AWSRetry.backoff(**backoff_params) +@AWSRetry.jittered_backoff(**backoff_params) def replication_subnet_group_delete(module, connection): subnetid = module.params.get('identifier') delete_parameters = dict(ReplicationSubnetGroupIdentifier=subnetid) diff --git a/plugins/modules/ec2_asg.py b/plugins/modules/ec2_asg.py index 662c23873b1..46cdcbf15b8 100644 --- a/plugins/modules/ec2_asg.py +++ b/plugins/modules/ec2_asg.py @@ -639,21 +639,21 @@ INSTANCE_ATTRIBUTES = ('instance_id', 'health_status', 'lifecycle_state', 'launch_config_name') -backoff_params = dict(tries=10, delay=3, backoff=1.5) +backoff_params = dict(retries=10, delay=3, backoff=1.5) -@AWSRetry.backoff(**backoff_params) +@AWSRetry.jittered_backoff(**backoff_params) def describe_autoscaling_groups(connection, group_name): pg = connection.get_paginator('describe_auto_scaling_groups') return pg.paginate(AutoScalingGroupNames=[group_name]).build_full_result().get('AutoScalingGroups', []) -@AWSRetry.backoff(**backoff_params) +@AWSRetry.jittered_backoff(**backoff_params) def deregister_lb_instances(connection, lb_name, instance_id): connection.deregister_instances_from_load_balancer(LoadBalancerName=lb_name, Instances=[dict(InstanceId=instance_id)]) -@AWSRetry.backoff(**backoff_params) +@AWSRetry.jittered_backoff(**backoff_params) def describe_instance_health(connection, lb_name, instances): params = dict(LoadBalancerName=lb_name) if instances: @@ -661,28 +661,28 @@ def describe_instance_health(connection, lb_name, instances): return connection.describe_instance_health(**params) -@AWSRetry.backoff(**backoff_params) +@AWSRetry.jittered_backoff(**backoff_params) def describe_target_health(connection, target_group_arn, instances): return connection.describe_target_health(TargetGroupArn=target_group_arn, Targets=instances) -@AWSRetry.backoff(**backoff_params) +@AWSRetry.jittered_backoff(**backoff_params) def suspend_asg_processes(connection, asg_name, processes): connection.suspend_processes(AutoScalingGroupName=asg_name, ScalingProcesses=processes) -@AWSRetry.backoff(**backoff_params) +@AWSRetry.jittered_backoff(**backoff_params) def resume_asg_processes(connection, asg_name, processes): connection.resume_processes(AutoScalingGroupName=asg_name, ScalingProcesses=processes) -@AWSRetry.backoff(**backoff_params) +@AWSRetry.jittered_backoff(**backoff_params) def describe_launch_configurations(connection, launch_config_name): pg = connection.get_paginator('describe_launch_configurations') return pg.paginate(LaunchConfigurationNames=[launch_config_name]).build_full_result() -@AWSRetry.backoff(**backoff_params) +@AWSRetry.jittered_backoff(**backoff_params) def describe_launch_templates(connection, launch_template): if launch_template['launch_template_id'] is not None: try: @@ -698,12 +698,12 @@ def describe_launch_templates(connection, launch_template): module.fail_json(msg="No launch template found matching: %s" % launch_template) -@AWSRetry.backoff(**backoff_params) +@AWSRetry.jittered_backoff(**backoff_params) def create_asg(connection, **params): connection.create_auto_scaling_group(**params) -@AWSRetry.backoff(**backoff_params) +@AWSRetry.jittered_backoff(**backoff_params) def put_notification_config(connection, asg_name, topic_arn, notification_types): connection.put_notification_configuration( AutoScalingGroupName=asg_name, @@ -712,7 +712,7 @@ def put_notification_config(connection, asg_name, topic_arn, notification_types) ) -@AWSRetry.backoff(**backoff_params) +@AWSRetry.jittered_backoff(**backoff_params) def del_notification_config(connection, asg_name, topic_arn): connection.delete_notification_configuration( AutoScalingGroupName=asg_name, @@ -720,37 +720,37 @@ def del_notification_config(connection, asg_name, topic_arn): ) -@AWSRetry.backoff(**backoff_params) +@AWSRetry.jittered_backoff(**backoff_params) def attach_load_balancers(connection, asg_name, load_balancers): connection.attach_load_balancers(AutoScalingGroupName=asg_name, LoadBalancerNames=load_balancers) -@AWSRetry.backoff(**backoff_params) +@AWSRetry.jittered_backoff(**backoff_params) def detach_load_balancers(connection, asg_name, load_balancers): connection.detach_load_balancers(AutoScalingGroupName=asg_name, LoadBalancerNames=load_balancers) -@AWSRetry.backoff(**backoff_params) +@AWSRetry.jittered_backoff(**backoff_params) def attach_lb_target_groups(connection, asg_name, target_group_arns): connection.attach_load_balancer_target_groups(AutoScalingGroupName=asg_name, TargetGroupARNs=target_group_arns) -@AWSRetry.backoff(**backoff_params) +@AWSRetry.jittered_backoff(**backoff_params) def detach_lb_target_groups(connection, asg_name, target_group_arns): connection.detach_load_balancer_target_groups(AutoScalingGroupName=asg_name, TargetGroupARNs=target_group_arns) -@AWSRetry.backoff(**backoff_params) +@AWSRetry.jittered_backoff(**backoff_params) def update_asg(connection, **params): connection.update_auto_scaling_group(**params) -@AWSRetry.backoff(catch_extra_error_codes=['ScalingActivityInProgress'], **backoff_params) +@AWSRetry.jittered_backoff(catch_extra_error_codes=['ScalingActivityInProgress'], **backoff_params) def delete_asg(connection, asg_name, force_delete): connection.delete_auto_scaling_group(AutoScalingGroupName=asg_name, ForceDelete=force_delete) -@AWSRetry.backoff(**backoff_params) +@AWSRetry.jittered_backoff(**backoff_params) def terminate_asg_instance(connection, instance_id, decrement_capacity): connection.terminate_instance_in_auto_scaling_group(InstanceId=instance_id, ShouldDecrementDesiredCapacity=decrement_capacity) diff --git a/plugins/modules/ec2_elb_info.py b/plugins/modules/ec2_elb_info.py index add102ab87a..8b207111b60 100644 --- a/plugins/modules/ec2_elb_info.py +++ b/plugins/modules/ec2_elb_info.py @@ -109,7 +109,7 @@ def _get_tags(self, elbname): elb_tags = self.connection.get_list('DescribeTags', params, [('member', Tag)]) return dict((tag.Key, tag.Value) for tag in elb_tags if hasattr(tag, 'Key')) - @AWSRetry.backoff(tries=5, delay=5, backoff=2.0) + @AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def _get_elb_connection(self): return connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params) @@ -158,7 +158,7 @@ def _get_health_check(self, health_check): health_check_dict['ping_path'] = path return health_check_dict - @AWSRetry.backoff(tries=5, delay=5, backoff=2.0) + @AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def _get_elb_info(self, elb): elb_info = { 'name': elb.name, @@ -202,7 +202,7 @@ def _get_elb_info(self, elb): def list_elbs(self): elb_array, token = [], None - get_elb_with_backoff = AWSRetry.backoff(tries=5, delay=5, backoff=2.0)(self.connection.get_all_load_balancers) + get_elb_with_backoff = AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0)(self.connection.get_all_load_balancers) while True: all_elbs = get_elb_with_backoff(marker=token) token = all_elbs.next_marker diff --git a/plugins/modules/ecs_service_info.py b/plugins/modules/ecs_service_info.py index 9b47b02a714..79332e55702 100644 --- a/plugins/modules/ecs_service_info.py +++ b/plugins/modules/ecs_service_info.py @@ -148,7 +148,7 @@ def __init__(self, module): self.module = module self.ecs = module.client('ecs') - @AWSRetry.backoff(tries=5, delay=5, backoff=2.0) + @AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def list_services_with_backoff(self, **kwargs): paginator = self.ecs.get_paginator('list_services') try: @@ -156,7 +156,7 @@ def list_services_with_backoff(self, **kwargs): except is_boto3_error_code('ClusterNotFoundException') as e: self.module.fail_json_aws(e, "Could not find cluster to list services") - @AWSRetry.backoff(tries=5, delay=5, backoff=2.0) + @AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def describe_services_with_backoff(self, **kwargs): return self.ecs.describe_services(**kwargs) diff --git a/plugins/modules/iam_managed_policy.py b/plugins/modules/iam_managed_policy.py index a56e76d037f..d6cdd33525e 100644 --- a/plugins/modules/iam_managed_policy.py +++ b/plugins/modules/iam_managed_policy.py @@ -141,7 +141,7 @@ from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies -@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def list_policies_with_backoff(iam): paginator = iam.get_paginator('list_policies') return paginator.paginate(Scope='Local').build_full_result() diff --git a/plugins/modules/iam_saml_federation.py b/plugins/modules/iam_saml_federation.py index a78decfe625..4b41f443134 100644 --- a/plugins/modules/iam_saml_federation.py +++ b/plugins/modules/iam_saml_federation.py @@ -123,23 +123,23 @@ def __init__(self, module): self.module.fail_json_aws(e, msg="Unknown boto error") # use retry decorator for boto3 calls - @AWSRetry.backoff(tries=3, delay=5) + @AWSRetry.jittered_backoff(retries=3, delay=5) def _list_saml_providers(self): return self.conn.list_saml_providers() - @AWSRetry.backoff(tries=3, delay=5) + @AWSRetry.jittered_backoff(retries=3, delay=5) def _get_saml_provider(self, arn): return self.conn.get_saml_provider(SAMLProviderArn=arn) - @AWSRetry.backoff(tries=3, delay=5) + @AWSRetry.jittered_backoff(retries=3, delay=5) def _update_saml_provider(self, arn, metadata): return self.conn.update_saml_provider(SAMLProviderArn=arn, SAMLMetadataDocument=metadata) - @AWSRetry.backoff(tries=3, delay=5) + @AWSRetry.jittered_backoff(retries=3, delay=5) def _create_saml_provider(self, metadata, name): return self.conn.create_saml_provider(SAMLMetadataDocument=metadata, Name=name) - @AWSRetry.backoff(tries=3, delay=5) + @AWSRetry.jittered_backoff(retries=3, delay=5) def _delete_saml_provider(self, arn): return self.conn.delete_saml_provider(SAMLProviderArn=arn) diff --git a/plugins/modules/rds.py b/plugins/modules/rds.py index a59b183925b..bfbf0019f6b 100644 --- a/plugins/modules/rds.py +++ b/plugins/modules/rds.py @@ -943,13 +943,13 @@ def await_resource(conn, resource, status, module): if resource.name is None: module.fail_json(msg="There was a problem waiting for RDS snapshot %s" % resource.snapshot) # Back off if we're getting throttled, since we're just waiting anyway - resource = AWSRetry.backoff(tries=5, delay=20, backoff=1.5)(conn.get_db_snapshot)(resource.name) + resource = AWSRetry.jittered_backoff(retries=5, delay=20, backoff=1.5)(conn.get_db_snapshot)(resource.name) else: # Temporary until all the rds2 commands have their responses parsed if resource.name is None: module.fail_json(msg="There was a problem waiting for RDS instance %s" % resource.instance) # Back off if we're getting throttled, since we're just waiting anyway - resource = AWSRetry.backoff(tries=5, delay=20, backoff=1.5)(conn.get_db_instance)(resource.name) + resource = AWSRetry.jittered_backoff(retries=5, delay=20, backoff=1.5)(conn.get_db_instance)(resource.name) if resource is None: break # Some RDS resources take much longer than others to be ready. Check