diff --git a/autoscaling_group_info.py b/autoscaling_group_info.py index 4db9ac26a37..e8ec13a12ba 100644 --- a/autoscaling_group_info.py +++ b/autoscaling_group_info.py @@ -381,7 +381,7 @@ def find_asgs(conn, module, name=None, tags=None): try: elbv2 = module.client('elbv2') - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError): # This is nice to have, not essential elbv2 = None matched_asgs = [] @@ -409,7 +409,7 @@ def find_asgs(conn, module, name=None, tags=None): # workaround for https://github.com/ansible/ansible/pull/25015 if 'target_group_ar_ns' in asg: asg['target_group_arns'] = asg['target_group_ar_ns'] - del(asg['target_group_ar_ns']) + del asg['target_group_ar_ns'] if asg.get('target_group_arns'): if elbv2: try: diff --git a/cloudfront_distribution.py b/cloudfront_distribution.py index 2b58ac1e888..a2d439c7d93 100644 --- a/cloudfront_distribution.py +++ b/cloudfront_distribution.py @@ -1676,7 +1676,7 @@ def validate_origins(self, client, config, origins, default_origin_domain_name, if purge_origins: for domain in list(all_origins.keys()): if domain not in new_domains: - del(all_origins[domain]) + del all_origins[domain] return ansible_list_to_cloudfront_list(list(all_origins.values())) except Exception as e: self.module.fail_json_aws(e, msg="Error validating distribution origins") @@ -1694,7 +1694,7 @@ def validate_s3_origin_configuration(self, client, existing_config, origin): cfoai_config = dict(CloudFrontOriginAccessIdentityConfig=dict(CallerReference=caller_reference, Comment=comment)) oai = client.create_cloud_front_origin_access_identity(**cfoai_config)['CloudFrontOriginAccessIdentity']['Id'] - except Exception as e: + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: self.module.fail_json_aws(e, msg="Couldn't create Origin Access Identity for id %s" % origin['id']) return "origin-access-identity/cloudfront/%s" % oai @@ -1717,7 +1717,7 @@ def validate_origin(self, client, existing_config, origin, default_origin_path): else: s3_origin_config = None - del(origin["s3_origin_access_identity_enabled"]) + del origin["s3_origin_access_identity_enabled"] if s3_origin_config: oai = s3_origin_config @@ -1766,7 +1766,7 @@ def validate_cache_behaviors(self, config, cache_behaviors, valid_origins, purge all_cache_behaviors[cache_behavior['path_pattern']] = valid_cache_behavior if purge_cache_behaviors: for target_origin_id in set(all_cache_behaviors.keys()) - set([cb['path_pattern'] for cb in cache_behaviors]): - del(all_cache_behaviors[target_origin_id]) + del all_cache_behaviors[target_origin_id] return ansible_list_to_cloudfront_list(list(all_cache_behaviors.values())) except Exception as e: self.module.fail_json_aws(e, msg="Error validating distribution cache behaviors") @@ -1954,7 +1954,7 @@ def validate_custom_error_responses(self, config, custom_error_responses, purge_ if 'response_code' in custom_error_response: custom_error_response['response_code'] = str(custom_error_response['response_code']) if custom_error_response['error_code'] in existing_responses: - del(existing_responses[custom_error_response['error_code']]) + del existing_responses[custom_error_response['error_code']] result.append(custom_error_response) if not purge_custom_error_responses: result.extend(existing_responses.values()) @@ -2261,7 +2261,7 @@ def main(): if 'distribution_config' in result: result.update(result['distribution_config']) - del(result['distribution_config']) + del result['distribution_config'] module.exit_json(changed=changed, **result) diff --git a/cloudfront_origin_access_identity.py b/cloudfront_origin_access_identity.py index 9fc83f64820..dc79c9bd1b2 100644 --- a/cloudfront_origin_access_identity.py +++ b/cloudfront_origin_access_identity.py @@ -256,8 +256,7 @@ def main(): else: result = service_mgr.create_origin_access_identity(caller_reference, comment) changed = True - elif(state == 'absent' and origin_access_identity_id is not None and - e_tag is not None): + elif state == 'absent' and origin_access_identity_id is not None and e_tag is not None: result = service_mgr.delete_origin_access_identity(origin_access_identity_id, e_tag) changed = True diff --git a/cloudtrail.py b/cloudtrail.py index df95d5bfb7b..aa3b637cee5 100644 --- a/cloudtrail.py +++ b/cloudtrail.py @@ -266,7 +266,7 @@ def get_kms_key_aliases(module, client, keyId): """ try: key_resp = client.list_aliases(KeyId=keyId) - except (BotoCoreError, ClientError) as err: + except (BotoCoreError, ClientError): # Don't fail here, just return [] to maintain backwards compat # in case user doesn't have kms:ListAliases permissions return [] @@ -558,9 +558,7 @@ def main(): # all aliases for a match. initial_aliases = get_kms_key_aliases(module, module.client('kms'), initial_kms_key_id) for a in initial_aliases: - if(a['AliasName'] == new_key or - a['AliasArn'] == new_key or - a['TargetKeyId'] == new_key): + if a['AliasName'] == new_key or a['AliasArn'] == new_key or a['TargetKeyId'] == new_key: results['changed'] = False # Check if we need to start/stop logging diff --git a/ec2_vpc_nacl.py b/ec2_vpc_nacl.py index 9968e2929ff..03cdef89c39 100644 --- a/ec2_vpc_nacl.py +++ b/ec2_vpc_nacl.py @@ -235,7 +235,6 @@ def tags_changed(nacl_id, client, module): tags = module.params.get('tags') name = module.params.get('name') purge_tags = module.params.get('purge_tags') - changed = False if name is None and tags is None: return False @@ -337,14 +336,14 @@ def setup_network_acl(client, module): replace_network_acl_association(nacl_id, subnets, client, module) construct_acl_entries(nacl, client, module) changed = True - return(changed, nacl['NetworkAcl']['NetworkAclId']) + return changed, nacl['NetworkAcl']['NetworkAclId'] else: changed = False nacl_id = nacl['NetworkAcls'][0]['NetworkAclId'] changed |= subnets_changed(nacl, client, module) changed |= nacls_changed(nacl, client, module) changed |= tags_changed(nacl_id, client, module) - return (changed, nacl_id) + return changed, nacl_id def remove_network_acl(client, module): diff --git a/eks_fargate_profile.py b/eks_fargate_profile.py index 72164a36fea..4eae0983acc 100644 --- a/eks_fargate_profile.py +++ b/eks_fargate_profile.py @@ -185,7 +185,7 @@ def validate_tags(client, module, fargate_profile): try: existing_tags = client.list_tags_for_resource(resourceArn=fargate_profile['fargateProfileArn'])['tags'] tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, desired_tags, module.params.get('purge_tags')) - except(botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Unable to list or compare tags for Fargate Profile %s' % module.params.get('name')) if tags_to_remove: diff --git a/redshift.py b/redshift.py index ca3e1a45052..475c2101a5e 100644 --- a/redshift.py +++ b/redshift.py @@ -453,7 +453,7 @@ def create_cluster(module, redshift): changed = True resource = _describe_cluster(redshift, identifier) - return(changed, _collect_facts(resource)) + return changed, _collect_facts(resource) def describe_cluster(module, redshift): @@ -470,7 +470,7 @@ def describe_cluster(module, redshift): except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Error describing cluster") - return(True, _collect_facts(resource)) + return True, _collect_facts(resource) def delete_cluster(module, redshift): @@ -499,7 +499,7 @@ def delete_cluster(module, redshift): ClusterIdentifier=identifier, **snake_dict_to_camel_dict(params, capitalize_first=True)) except is_boto3_error_code('ClusterNotFound'): - return(False, {}) + return False, {} except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to delete cluster") @@ -514,7 +514,7 @@ def delete_cluster(module, redshift): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Timeout deleting the cluster") - return(True, {}) + return True, {} def modify_cluster(module, redshift): @@ -528,9 +528,6 @@ def modify_cluster(module, redshift): identifier = module.params.get('identifier') wait = module.params.get('wait') wait_timeout = module.params.get('wait_timeout') - tags = module.params.get('tags') - purge_tags = module.params.get('purge_tags') - region = region = module.params.get('region') # Package up the optional parameters params = {} @@ -594,7 +591,7 @@ def modify_cluster(module, redshift): if _ensure_tags(redshift, identifier, resource['Tags'], module): resource = redshift.describe_clusters(ClusterIdentifier=identifier)['Clusters'][0] - return(True, _collect_facts(resource)) + return True, _collect_facts(resource) def main(): diff --git a/s3_bucket_info.py b/s3_bucket_info.py index d164fde5d16..81e1cd7217b 100644 --- a/s3_bucket_info.py +++ b/s3_bucket_info.py @@ -444,7 +444,7 @@ def get_bucket_list(module, connection, name="", name_filter=""): final_buckets = filtered_buckets else: final_buckets = buckets - return(final_buckets) + return final_buckets def get_buckets_facts(connection, buckets, requested_facts, transform_location): @@ -457,7 +457,7 @@ def get_buckets_facts(connection, buckets, requested_facts, transform_location): bucket.update(get_bucket_details(connection, bucket['name'], requested_facts, transform_location)) full_bucket_list.append(bucket) - return(full_bucket_list) + return full_bucket_list def get_bucket_details(connection, name, requested_facts, transform_location): @@ -490,7 +490,7 @@ def get_bucket_details(connection, name, requested_facts, transform_location): except botocore.exceptions.ClientError: pass - return(all_facts) + return all_facts @AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) @@ -508,11 +508,8 @@ def get_bucket_location(name, connection, transform_location=False): except KeyError: pass # Strip response metadata (not needed) - try: - data.pop('ResponseMetadata') - return(data) - except KeyError: - return(data) + data.pop('ResponseMetadata', None) + return data @AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) @@ -524,14 +521,11 @@ def get_bucket_tagging(name, connection): try: bucket_tags = boto3_tag_list_to_ansible_dict(data['TagSet']) - return(bucket_tags) + return bucket_tags except KeyError: # Strip response metadata (not needed) - try: - data.pop('ResponseMetadata') - return(data) - except KeyError: - return(data) + data.pop('ResponseMetadata', None) + return data @AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) @@ -544,11 +538,8 @@ def get_bucket_property(name, connection, get_api_name): data = api_function(Bucket=name) # Strip response metadata (not needed) - try: - data.pop('ResponseMetadata') - return(data) - except KeyError: - return(data) + data.pop('ResponseMetadata', None) + return data def main():