From c192261764201ebd3dabd816ac219a08a4555d34 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Tue, 4 Feb 2020 15:56:09 -0500 Subject: [PATCH] resource/aws_s3_bucket: Retry read after creation for 404 status code Reference: https://github.com/terraform-providers/terraform-provider-aws/issues/11891 Previously in the acceptance testing (inconsistently across various testing due to eventual consistency): ``` --- FAIL: TestAccAWSAthenaNamedQuery_basic (3.56s) testing.go:640: Step 0 error: errors during apply: Error: Provider produced inconsistent result after apply When applying changes to aws_s3_bucket.test, provider "aws" produced an unexpected new value for was present, but now absent. This is a bug in the provider, which should be reported in the provider's own issue tracker. --- FAIL: TestAccAWSCodeBuildProject_Environment_Certificate (7.72s) testing.go:640: Step 0 error: errors during apply: Error: Provider produced inconsistent result after apply When applying changes to aws_s3_bucket.test, provider "aws" produced an unexpected new value for was present, but now absent. This is a bug in the provider, which should be reported in the provider's own issue tracker. ``` The HeadBucket/HeadObject S3 APIs work differently than other AWS APIs where they can exclusively return only status code information and not a relevant error code. This update accounts for that discrepency by retrying on 404 status codes on the resource read immediately after bucket creation. Output from acceptance testing (failures from other eventual consistency issues): ``` --- PASS: TestAccAWSS3Bucket_shouldFailNotFound (18.57s) --- PASS: TestAccAWSS3Bucket_Cors_Delete (31.55s) --- PASS: TestAccAWSS3Bucket_forceDestroyWithEmptyPrefixes (33.16s) --- PASS: TestAccAWSS3Bucket_forceDestroy (34.90s) --- PASS: TestAccAWSS3Bucket_forceDestroyWithObjectLockEnabled (36.16s) --- PASS: TestAccAWSS3Bucket_basic (36.41s) --- PASS: TestAccAWSS3Bucket_enableDefaultEncryption_whenAES256IsUsed (36.72s) --- PASS: TestAccAWSS3Bucket_Cors_EmptyOrigin (37.42s) --- PASS: TestAccAWSS3Bucket_ReplicationExpectVersioningValidationError (39.28s) --- PASS: TestAccAWSS3Bucket_objectLock (62.84s) --- FAIL: TestAccAWSS3Bucket_Cors_Update (70.43s) testing.go:640: Step 2 error: After applying this step, the plan was not empty: --- PASS: TestAccAWSS3Bucket_namePrefix (34.06s) --- PASS: TestAccAWSS3Bucket_ReplicationWithoutStorageClass (74.89s) --- PASS: TestAccAWSS3Bucket_Logging (75.40s) --- PASS: TestAccAWSS3Bucket_ReplicationWithoutPrefix (77.09s) --- PASS: TestAccAWSS3Bucket_disableDefaultEncryption_whenDefaultEncryptionIsEnabled (59.98s) --- PASS: TestAccAWSS3Bucket_LifecycleExpireMarkerOnly (80.69s) --- FAIL: TestAccAWSS3Bucket_RequestPayer (50.88s) testing.go:640: Step 2 error: Check failed: Check 2/3 error: aws_s3_bucket.bucket: Attribute 'request_payer' expected "Requester", got "BucketOwner" --- FAIL: TestAccAWSS3Bucket_acceleration (21.28s) testing.go:640: Step 0 error: Check failed: Check 2/2 error: aws_s3_bucket.bucket: Attribute 'acceleration_status' expected "Enabled", got "" --- FAIL: TestAccAWSS3Bucket_LifecycleBasic (86.20s) testing.go:640: Step 3 error: After applying this step and refreshing, the plan was not empty: --- PASS: TestAccAWSS3Bucket_WebsiteRoutingRules (60.54s) --- PASS: TestAccAWSS3Bucket_enableDefaultEncryption_whenTypical (62.41s) --- PASS: TestAccAWSS3Bucket_UpdateAcl (61.70s) --- PASS: TestAccAWSS3Bucket_region (30.65s) --- PASS: TestAccAWSS3Bucket_Versioning (105.29s) --- PASS: TestAccAWSS3Bucket_generatedName (35.22s) --- PASS: TestAccAWSS3Bucket_Bucket_EmptyString (34.39s) --- PASS: TestAccAWSS3Bucket_Policy (76.26s) --- PASS: TestAccAWSS3Bucket_WebsiteRedirect (83.43s) --- PASS: TestAccAWSS3Bucket_Website_Simple (83.85s) --- PASS: TestAccAWSS3Bucket_ReplicationConfiguration_Rule_Destination_AccessControlTranslation (149.59s) --- PASS: TestAccAWSS3Bucket_tagsWithNoSystemTags (112.51s) --- PASS: TestAccAWSS3Bucket_ReplicationSchemaV2 (234.41s) --- PASS: TestAccAWSS3Bucket_Replication (242.83s) --- PASS: TestAccAWSS3Bucket_tagsWithSystemTags (182.05s) ``` Re-run of failed tests: ``` --- PASS: TestAccAWSS3Bucket_RequestPayer (59.87s) --- PASS: TestAccAWSS3Bucket_Cors_Update (61.90s) --- PASS: TestAccAWSS3Bucket_acceleration (62.81s) --- PASS: TestAccAWSS3Bucket_LifecycleBasic (89.19s) ``` --- aws/awserr.go | 13 +++++++++++ aws/resource_aws_s3_bucket.go | 42 ++++++++++++++++++++++++++--------- 2 files changed, 45 insertions(+), 10 deletions(-) diff --git a/aws/awserr.go b/aws/awserr.go index 0dc343bf36b..8c2be2c7502 100644 --- a/aws/awserr.go +++ b/aws/awserr.go @@ -21,6 +21,19 @@ func isAWSErr(err error, code string, message string) bool { return false } +// Returns true if the error matches all these conditions: +// * err is of type awserr.RequestFailure +// * RequestFailure.StatusCode() matches status code +// It is always preferable to use isAWSErr() except in older APIs (e.g. S3) +// that sometimes only respond with status codes. +func isAWSErrRequestFailureStatusCode(err error, statusCode int) bool { + var awsErr awserr.RequestFailure + if errors.As(err, &awsErr) { + return awsErr.StatusCode() == statusCode + } + return false +} + func retryOnAwsCode(code string, f func() (interface{}, error)) (interface{}, error) { var resp interface{} err := resource.Retry(2*time.Minute, func() *resource.RetryError { diff --git a/aws/resource_aws_s3_bucket.go b/aws/resource_aws_s3_bucket.go index 02002eb4e40..2af7dbb7726 100644 --- a/aws/resource_aws_s3_bucket.go +++ b/aws/resource_aws_s3_bucket.go @@ -22,6 +22,8 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) +const s3BucketCreationTimeout = 2 * time.Minute + func resourceAwsS3Bucket() *schema.Resource { return &schema.Resource{ Create: resourceAwsS3BucketCreate, @@ -757,19 +759,39 @@ func resourceAwsS3BucketUpdate(d *schema.ResourceData, meta interface{}) error { func resourceAwsS3BucketRead(d *schema.ResourceData, meta interface{}) error { s3conn := meta.(*AWSClient).s3conn - var err error + input := &s3.HeadBucketInput{ + Bucket: aws.String(d.Id()), + } - _, err = retryOnAwsCode(s3.ErrCodeNoSuchBucket, func() (interface{}, error) { - return s3conn.HeadBucket(&s3.HeadBucketInput{ - Bucket: aws.String(d.Id()), - }) + err := resource.Retry(s3BucketCreationTimeout, func() *resource.RetryError { + _, err := s3conn.HeadBucket(input) + + if d.IsNewResource() && isAWSErrRequestFailureStatusCode(err, 404) { + return resource.RetryableError(err) + } + + if d.IsNewResource() && isAWSErr(err, s3.ErrCodeNoSuchBucket, "") { + return resource.RetryableError(err) + } + + if err != nil { + return resource.NonRetryableError(err) + } + + return nil }) + + if isResourceTimeoutError(err) { + _, err = s3conn.HeadBucket(input) + } + + if isAWSErrRequestFailureStatusCode(err, 404) || isAWSErr(err, s3.ErrCodeNoSuchBucket, "") { + log.Printf("[WARN] S3 Bucket (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + if err != nil { - if awsError, ok := err.(awserr.RequestFailure); ok && awsError.StatusCode() == 404 { - log.Printf("[WARN] S3 Bucket (%s) not found, error code (404)", d.Id()) - d.SetId("") - return nil - } return fmt.Errorf("error reading S3 Bucket (%s): %s", d.Id(), err) }