diff --git a/.changelog/34612.txt b/.changelog/34612.txt new file mode 100644 index 00000000000..0bc2c173e9c --- /dev/null +++ b/.changelog/34612.txt @@ -0,0 +1,7 @@ +```release-note:new-resource +aws_s3_directory_bucket +``` + +```release-note:new-data-source +aws_s3_directory_buckets +``` \ No newline at end of file diff --git a/.github/labeler-issue-triage.yml b/.github/labeler-issue-triage.yml index 71cb9fcfd68..59dab11ec93 100644 --- a/.github/labeler-issue-triage.yml +++ b/.github/labeler-issue-triage.yml @@ -554,7 +554,7 @@ service/route53resolver: service/rum: - '((\*|-)\s*`?|(data|resource)\s+"?)aws_rum_' service/s3: - - '((\*|-)\s*`?|(data|resource)\s+"?)aws_(canonical_user_id|s3_bucket|s3_object)' + - '((\*|-)\s*`?|(data|resource)\s+"?)aws_(canonical_user_id|s3_bucket|s3_object|s3_directory_bucket)' service/s3control: - '((\*|-)\s*`?|(data|resource)\s+"?)aws_(s3_account_|s3control_|s3_access_)' service/s3outposts: diff --git a/.github/labeler-pr-triage.yml b/.github/labeler-pr-triage.yml index 4bbaa9b9f54..8444acc0274 100644 --- a/.github/labeler-pr-triage.yml +++ b/.github/labeler-pr-triage.yml @@ -908,6 +908,7 @@ service/rum: service/s3: - 'internal/service/s3/**/*' - 'website/**/s3_bucket*' + - 'website/**/s3_directory_bucket*' - 'website/**/s3_object*' - 'website/**/canonical_user_id*' service/s3control: diff --git a/go.mod b/go.mod index db379bdde74..6de34051f8d 100644 --- a/go.mod +++ b/go.mod @@ -92,6 +92,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/vpclattice v1.4.5 github.com/aws/aws-sdk-go-v2/service/workspaces v1.34.2 github.com/aws/aws-sdk-go-v2/service/xray v1.22.5 + github.com/aws/smithy-go v1.17.0 github.com/beevik/etree v1.2.0 github.com/davecgh/go-spew v1.1.1 github.com/gertd/go-pluralize v0.2.1 @@ -154,7 +155,6 @@ require ( github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.5 // indirect github.com/aws/aws-sdk-go-v2/service/sso v1.17.5 // indirect github.com/aws/aws-sdk-go-v2/service/ssooidc v1.20.3 // indirect - github.com/aws/smithy-go v1.17.0 // indirect github.com/bgentry/speakeasy v0.1.0 // indirect github.com/boombuler/barcode v1.0.1 // indirect github.com/bufbuild/protocompile v0.6.0 // indirect diff --git a/internal/service/s3/bucket.go b/internal/service/s3/bucket.go index 70e41d09098..ac5184bff25 100644 --- a/internal/service/s3/bucket.go +++ b/internal/service/s3/bucket.go @@ -23,6 +23,7 @@ import ( "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3/s3manager" + smithy "github.com/aws/smithy-go" "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" tfawserr_sdkv2 "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -33,6 +34,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" @@ -95,7 +97,10 @@ func ResourceBucket() *schema.Resource { Computed: true, ForceNew: true, ConflictsWith: []string{"bucket_prefix"}, - ValidateFunc: validation.StringLenBetween(0, 63), + ValidateFunc: validation.All( + validation.StringLenBetween(0, 63), + validation.StringDoesNotMatch(directoryBucketNameRegex, `must not be in the format [bucket_name]--[azid]--x-s3. Use the aws_s3_directory_bucket resource to manage S3 Express buckets`), + ), }, "bucket_domain_name": { Type: schema.TypeString, @@ -107,7 +112,9 @@ func ResourceBucket() *schema.Resource { Computed: true, ForceNew: true, ConflictsWith: []string{"bucket"}, - ValidateFunc: validation.StringLenBetween(0, 63-id.UniqueIDSuffixLength), + ValidateFunc: validation.All( + validation.StringLenBetween(0, 63-id.UniqueIDSuffixLength), + ), }, "bucket_regional_domain_name": { Type: schema.TypeString, @@ -1428,12 +1435,12 @@ func resourceBucketDelete(ctx context.Context, d *schema.ResourceData, meta inte return nil } -func findBucket(ctx context.Context, conn *s3_sdkv2.Client, bucket string) error { +func findBucket(ctx context.Context, conn *s3_sdkv2.Client, bucket string, optFns ...func(*s3_sdkv2.Options)) error { input := &s3_sdkv2.HeadBucketInput{ Bucket: aws_sdkv2.String(bucket), } - _, err := conn.HeadBucket(ctx, input) + _, err := conn.HeadBucket(ctx, input, optFns...) if tfawserr_sdkv2.ErrHTTPStatusCodeEquals(err, http.StatusNotFound) || tfawserr_sdkv2.ErrCodeEquals(err, errCodeNoSuchBucket) { return &retry.NotFoundError{ @@ -1442,6 +1449,18 @@ func findBucket(ctx context.Context, conn *s3_sdkv2.Client, bucket string) error } } + // FIXME Move to aws-sdk-go-base + // FIXME &smithy.OperationError{ServiceID:"S3", OperationName:"HeadBucket", Err:(*errors.errorString)(0xc00202bb60)} + // FIXME "operation error S3: HeadBucket, get identity: get credentials: operation error S3: CreateSession, https response error StatusCode: 404, RequestID: 0033eada6b00018c17de82890509d9eada65ba39, HostID: F31dBn, NoSuchBucket:" + if operationErr, ok := errs.As[*smithy.OperationError](err); ok { + if strings.Contains(operationErr.Err.Error(), errCodeNoSuchBucket) { + return &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + } + return err } diff --git a/internal/service/s3/bucket_accelerate_configuration.go b/internal/service/s3/bucket_accelerate_configuration.go index 0a9d32ccdaa..b1499261ac6 100644 --- a/internal/service/s3/bucket_accelerate_configuration.go +++ b/internal/service/s3/bucket_accelerate_configuration.go @@ -74,6 +74,10 @@ func resourceBucketAccelerateConfigurationCreate(ctx context.Context, d *schema. return conn.PutBucketAccelerateConfiguration(ctx, input) }, errCodeNoSuchBucket) + if tfawserr.ErrMessageContains(err, errCodeInvalidArgument, "AccelerateConfiguration is not valid, expected CreateBucketConfiguration") { + err = errDirectoryBucket(err) + } + if err != nil { return diag.Errorf("creating S3 Bucket (%s) Accelerate Configuration: %s", bucket, err) } diff --git a/internal/service/s3/bucket_accelerate_configuration_test.go b/internal/service/s3/bucket_accelerate_configuration_test.go index 6e87a810481..f998f3e8655 100644 --- a/internal/service/s3/bucket_accelerate_configuration_test.go +++ b/internal/service/s3/bucket_accelerate_configuration_test.go @@ -8,6 +8,7 @@ import ( "fmt" "testing" + "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/service/s3/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" @@ -168,6 +169,24 @@ func TestAccS3BucketAccelerateConfiguration_migrate_withChange(t *testing.T) { }) } +func TestAccS3BucketAccelerateConfiguration_directoryBucket(t *testing.T) { + ctx := acctest.Context(t) + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckBucketAccelerateConfigurationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccBucketAccelerateConfigurationConfig_directoryBucket(bucketName, string(types.BucketAccelerateStatusEnabled)), + ExpectError: regexache.MustCompile(`directory buckets are not supported`), + }, + }, + }) +} + func testAccCheckBucketAccelerateConfigurationDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) @@ -231,3 +250,20 @@ resource "aws_s3_bucket_accelerate_configuration" "test" { } `, bucketName, status) } + +func testAccBucketAccelerateConfigurationConfig_directoryBucket(bucketName, status string) string { + return acctest.ConfigCompose(testAccDirectoryBucketConfig_base(bucketName), fmt.Sprintf(` +resource "aws_s3_directory_bucket" "test" { + bucket = local.bucket + + location { + name = local.location_name + } +} + +resource "aws_s3_bucket_accelerate_configuration" "test" { + bucket = aws_s3_directory_bucket.test.id + status = %[1]q +} +`, status)) +} diff --git a/internal/service/s3/bucket_acl.go b/internal/service/s3/bucket_acl.go index 5b4d7a5121a..feb63829d38 100644 --- a/internal/service/s3/bucket_acl.go +++ b/internal/service/s3/bucket_acl.go @@ -7,6 +7,7 @@ import ( "context" "fmt" "log" + "net/http" "strings" "github.com/YakDriver/regexache" @@ -158,6 +159,10 @@ func resourceBucketACLCreate(ctx context.Context, d *schema.ResourceData, meta i return conn.PutBucketAcl(ctx, input) }, errCodeNoSuchBucket) + if tfawserr.ErrHTTPStatusCodeEquals(err, http.StatusNotImplemented) { + err = errDirectoryBucket(err) + } + if err != nil { return diag.Errorf("creating S3 Bucket (%s) ACL: %s", bucket, err) } diff --git a/internal/service/s3/bucket_acl_test.go b/internal/service/s3/bucket_acl_test.go index b6fae42eeab..23788b5c07d 100644 --- a/internal/service/s3/bucket_acl_test.go +++ b/internal/service/s3/bucket_acl_test.go @@ -11,7 +11,6 @@ import ( "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/aws-sdk-go/service/s3" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -87,22 +86,22 @@ func TestBucketACLParseResourceID(t *testing.T) { }, { TestName: "valid ID with bucket and acl", - InputID: tfs3.BucketACLCreateResourceID("example", "", s3.BucketCannedACLPrivate), - ExpectedACL: s3.BucketCannedACLPrivate, + InputID: tfs3.BucketACLCreateResourceID("example", "", string(types.BucketCannedACLPrivate)), + ExpectedACL: string(types.BucketCannedACLPrivate), ExpectedBucket: "example", ExpectedBucketOwner: "", }, { TestName: "valid ID with bucket and acl that has hyphens", - InputID: tfs3.BucketACLCreateResourceID("example", "", s3.BucketCannedACLPublicReadWrite), - ExpectedACL: s3.BucketCannedACLPublicReadWrite, + InputID: tfs3.BucketACLCreateResourceID("example", "", string(types.BucketCannedACLPublicReadWrite)), + ExpectedACL: string(types.BucketCannedACLPublicReadWrite), ExpectedBucket: "example", ExpectedBucketOwner: "", }, { TestName: "valid ID with bucket that has dot, hyphen, and number and acl that has hyphens", - InputID: tfs3.BucketACLCreateResourceID("my-example.bucket.4000", "", s3.BucketCannedACLPublicReadWrite), - ExpectedACL: s3.BucketCannedACLPublicReadWrite, + InputID: tfs3.BucketACLCreateResourceID("my-example.bucket.4000", "", string(types.BucketCannedACLPublicReadWrite)), + ExpectedACL: string(types.BucketCannedACLPublicReadWrite), ExpectedBucket: "my-example.bucket.4000", ExpectedBucketOwner: "", }, @@ -122,22 +121,22 @@ func TestBucketACLParseResourceID(t *testing.T) { }, { TestName: "valid ID with bucket, bucket owner, and acl", - InputID: tfs3.BucketACLCreateResourceID("example", "123456789012", s3.BucketCannedACLPrivate), - ExpectedACL: s3.BucketCannedACLPrivate, + InputID: tfs3.BucketACLCreateResourceID("example", "123456789012", string(types.BucketCannedACLPrivate)), + ExpectedACL: string(types.BucketCannedACLPrivate), ExpectedBucket: "example", ExpectedBucketOwner: "123456789012", }, { TestName: "valid ID with bucket, bucket owner, and acl that has hyphens", - InputID: tfs3.BucketACLCreateResourceID("example", "123456789012", s3.BucketCannedACLPublicReadWrite), - ExpectedACL: s3.BucketCannedACLPublicReadWrite, + InputID: tfs3.BucketACLCreateResourceID("example", "123456789012", string(types.BucketCannedACLPublicReadWrite)), + ExpectedACL: string(types.BucketCannedACLPublicReadWrite), ExpectedBucket: "example", ExpectedBucketOwner: "123456789012", }, { TestName: "valid ID with bucket that has dot, hyphen, and numbers, bucket owner, and acl that has hyphens", - InputID: tfs3.BucketACLCreateResourceID("my-example.bucket.4000", "123456789012", s3.BucketCannedACLPublicReadWrite), - ExpectedACL: s3.BucketCannedACLPublicReadWrite, + InputID: tfs3.BucketACLCreateResourceID("my-example.bucket.4000", "123456789012", string(types.BucketCannedACLPublicReadWrite)), + ExpectedACL: string(types.BucketCannedACLPublicReadWrite), ExpectedBucket: "my-example.bucket.4000", ExpectedBucketOwner: "123456789012", }, @@ -171,22 +170,22 @@ func TestBucketACLParseResourceID(t *testing.T) { }, { TestName: "valid ID with bucket (pre-2018, us-east-1) and acl", //lintignore:AWSAT003 - InputID: tfs3.BucketACLCreateResourceID("Example", "", s3.BucketCannedACLPrivate), - ExpectedACL: s3.BucketCannedACLPrivate, + InputID: tfs3.BucketACLCreateResourceID("Example", "", string(types.BucketCannedACLPrivate)), + ExpectedACL: string(types.BucketCannedACLPrivate), ExpectedBucket: "Example", ExpectedBucketOwner: "", }, { TestName: "valid ID with bucket (pre-2018, us-east-1) and acl that has underscores", //lintignore:AWSAT003 - InputID: tfs3.BucketACLCreateResourceID("My_Example_Bucket", "", s3.BucketCannedACLPublicReadWrite), - ExpectedACL: s3.BucketCannedACLPublicReadWrite, + InputID: tfs3.BucketACLCreateResourceID("My_Example_Bucket", "", string(types.BucketCannedACLPublicReadWrite)), + ExpectedACL: string(types.BucketCannedACLPublicReadWrite), ExpectedBucket: "My_Example_Bucket", ExpectedBucketOwner: "", }, { TestName: "valid ID with bucket (pre-2018, us-east-1) that has underscore, dot, hyphen, and number and acl that has hyphens", //lintignore:AWSAT003 - InputID: tfs3.BucketACLCreateResourceID("My_Example-Bucket.4000", "", s3.BucketCannedACLPublicReadWrite), - ExpectedACL: s3.BucketCannedACLPublicReadWrite, + InputID: tfs3.BucketACLCreateResourceID("My_Example-Bucket.4000", "", string(types.BucketCannedACLPublicReadWrite)), + ExpectedACL: string(types.BucketCannedACLPublicReadWrite), ExpectedBucket: "My_Example-Bucket.4000", ExpectedBucketOwner: "", }, @@ -206,22 +205,22 @@ func TestBucketACLParseResourceID(t *testing.T) { }, { TestName: "valid ID with bucket (pre-2018, us-east-1), bucket owner, and acl", //lintignore:AWSAT003 - InputID: tfs3.BucketACLCreateResourceID("Example", "123456789012", s3.BucketCannedACLPrivate), - ExpectedACL: s3.BucketCannedACLPrivate, + InputID: tfs3.BucketACLCreateResourceID("Example", "123456789012", string(types.BucketCannedACLPrivate)), + ExpectedACL: string(types.BucketCannedACLPrivate), ExpectedBucket: "Example", ExpectedBucketOwner: "123456789012", }, { TestName: "valid ID with bucket (pre-2018, us-east-1), bucket owner, and acl that has hyphens", //lintignore:AWSAT003 - InputID: tfs3.BucketACLCreateResourceID("Example", "123456789012", s3.BucketCannedACLPublicReadWrite), - ExpectedACL: s3.BucketCannedACLPublicReadWrite, + InputID: tfs3.BucketACLCreateResourceID("Example", "123456789012", string(types.BucketCannedACLPublicReadWrite)), + ExpectedACL: string(types.BucketCannedACLPublicReadWrite), ExpectedBucket: "Example", ExpectedBucketOwner: "123456789012", }, { TestName: "valid ID with bucket (pre-2018, us-east-1) that has underscore, dot, hyphen, and numbers, bucket owner, and acl that has hyphens", //lintignore:AWSAT003 - InputID: tfs3.BucketACLCreateResourceID("My_Example-bucket.4000", "123456789012", s3.BucketCannedACLPublicReadWrite), - ExpectedACL: s3.BucketCannedACLPublicReadWrite, + InputID: tfs3.BucketACLCreateResourceID("My_Example-bucket.4000", "123456789012", string(types.BucketCannedACLPublicReadWrite)), + ExpectedACL: string(types.BucketCannedACLPublicReadWrite), ExpectedBucket: "My_Example-bucket.4000", ExpectedBucketOwner: "123456789012", }, @@ -269,16 +268,16 @@ func TestAccS3BucketACL_basic(t *testing.T) { CheckDestroy: acctest.CheckDestroyNoop, Steps: []resource.TestStep{ { - Config: testAccBucketACLConfig_basic(bucketName, s3.BucketCannedACLPrivate), + Config: testAccBucketACLConfig_basic(bucketName, string(types.BucketCannedACLPrivate)), Check: resource.ComposeTestCheckFunc( testAccCheckBucketACLExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, "acl", s3.BucketCannedACLPrivate), + resource.TestCheckResourceAttr(resourceName, "acl", string(types.BucketCannedACLPrivate)), resource.TestCheckResourceAttr(resourceName, "access_control_policy.#", "1"), resource.TestCheckResourceAttr(resourceName, "access_control_policy.0.owner.#", "1"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "access_control_policy.0.grant.*", map[string]string{ "grantee.#": "1", - "grantee.0.type": s3.TypeCanonicalUser, - "permission": s3.PermissionFullControl, + "grantee.0.type": string(types.TypeCanonicalUser), + "permission": string(types.PermissionFullControl), }), ), }, @@ -303,7 +302,7 @@ func TestAccS3BucketACL_disappears(t *testing.T) { CheckDestroy: acctest.CheckDestroyNoop, Steps: []resource.TestStep{ { - Config: testAccBucketACLConfig_basic(bucketName, s3.BucketCannedACLPrivate), + Config: testAccBucketACLConfig_basic(bucketName, string(types.BucketCannedACLPrivate)), Check: resource.ComposeTestCheckFunc( testAccCheckBucketACLExists(ctx, resourceName), // Bucket ACL cannot be destroyed, but we can verify Bucket deletion @@ -599,6 +598,24 @@ func TestAccS3BucketACL_grantToACL(t *testing.T) { }) } +func TestAccS3BucketACL_directoryBucket(t *testing.T) { + ctx := acctest.Context(t) + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: acctest.CheckDestroyNoop, + Steps: []resource.TestStep{ + { + Config: testAccBucketACLConfig_directoryBucket(bucketName, string(types.BucketCannedACLPrivate)), + ExpectError: regexache.MustCompile(`directory buckets are not supported`), + }, + }, + }) +} + func testAccCheckBucketACLExists(ctx context.Context, n string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -810,3 +827,20 @@ resource "aws_s3_bucket_acl" "test" { } `, rName) } + +func testAccBucketACLConfig_directoryBucket(rName, acl string) string { + return acctest.ConfigCompose(testAccDirectoryBucketConfig_base(rName), fmt.Sprintf(` +resource "aws_s3_directory_bucket" "test" { + bucket = local.bucket + + location { + name = local.location_name + } +} + +resource "aws_s3_bucket_acl" "test" { + bucket = aws_s3_directory_bucket.test.id + acl = %[1]q +} +`, acl)) +} diff --git a/internal/service/s3/bucket_analytics_configuration.go b/internal/service/s3/bucket_analytics_configuration.go index 4c15b36d9b5..ea13d406a7c 100644 --- a/internal/service/s3/bucket_analytics_configuration.go +++ b/internal/service/s3/bucket_analytics_configuration.go @@ -158,6 +158,10 @@ func resourceBucketAnalyticsConfigurationPut(ctx context.Context, d *schema.Reso return conn.PutBucketAnalyticsConfiguration(ctx, input) }, errCodeNoSuchBucket) + if tfawserr.ErrMessageContains(err, errCodeInvalidArgument, "AnalyticsConfiguration is not valid, expected CreateBucketConfiguration") { + err = errDirectoryBucket(err) + } + if err != nil { return diag.Errorf("creating S3 Bucket (%s) Analytics Configuration (%s): %s", bucket, name, err) } diff --git a/internal/service/s3/bucket_analytics_configuration_test.go b/internal/service/s3/bucket_analytics_configuration_test.go index bfa16612ed9..1db8ea0513b 100644 --- a/internal/service/s3/bucket_analytics_configuration_test.go +++ b/internal/service/s3/bucket_analytics_configuration_test.go @@ -463,6 +463,24 @@ func TestAccS3BucketAnalyticsConfiguration_WithStorageClassAnalysis_full(t *test }) } +func TestAccS3BucketAnalyticsConfiguration_directoryBucket(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckBucketAnalyticsConfigurationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccBucketAnalyticsConfigurationConfig_directoryBucket(rName, rName), + ExpectError: regexache.MustCompile(`directory buckets are not supported`), + }, + }, + }) +} + func testAccCheckBucketAnalyticsConfigurationDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) @@ -717,3 +735,20 @@ resource "aws_s3_bucket" "destination" { } `, name, prefix, bucket) } + +func testAccBucketAnalyticsConfigurationConfig_directoryBucket(bucket, name string) string { + return acctest.ConfigCompose(testAccDirectoryBucketConfig_base(bucket), fmt.Sprintf(` +resource "aws_s3_directory_bucket" "test" { + bucket = local.bucket + + location { + name = local.location_name + } +} + +resource "aws_s3_bucket_analytics_configuration" "test" { + bucket = aws_s3_directory_bucket.test.bucket + name = %[1]q +} +`, name)) +} diff --git a/internal/service/s3/bucket_cors_configuration.go b/internal/service/s3/bucket_cors_configuration.go index f4a418da9f5..b3801e59708 100644 --- a/internal/service/s3/bucket_cors_configuration.go +++ b/internal/service/s3/bucket_cors_configuration.go @@ -107,6 +107,10 @@ func resourceBucketCorsConfigurationCreate(ctx context.Context, d *schema.Resour return conn.PutBucketCors(ctx, input) }, errCodeNoSuchBucket) + if tfawserr.ErrMessageContains(err, errCodeInvalidArgument, "CORSConfiguration is not valid, expected CreateBucketConfiguration") { + err = errDirectoryBucket(err) + } + if err != nil { return diag.Errorf("creating S3 Bucket (%s) CORS Configuration: %s", bucket, err) } diff --git a/internal/service/s3/bucket_cors_configuration_test.go b/internal/service/s3/bucket_cors_configuration_test.go index 825ba616c33..3be6840e4f0 100644 --- a/internal/service/s3/bucket_cors_configuration_test.go +++ b/internal/service/s3/bucket_cors_configuration_test.go @@ -8,6 +8,7 @@ import ( "fmt" "testing" + "github.com/YakDriver/regexache" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -311,6 +312,24 @@ func TestAccS3BucketCORSConfiguration_migrate_corsRuleWithChange(t *testing.T) { }) } +func TestAccS3BucketCORSConfiguration_directoryBucket(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckBucketCORSConfigurationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccBucketCORSConfigurationConfig_directoryBucket(rName), + ExpectError: regexache.MustCompile(`directory buckets are not supported`), + }, + }, + }) +} + func testAccCheckBucketCORSConfigurationDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) @@ -459,3 +478,24 @@ resource "aws_s3_bucket_cors_configuration" "test" { } `, rName) } + +func testAccBucketCORSConfigurationConfig_directoryBucket(rName string) string { + return acctest.ConfigCompose(testAccDirectoryBucketConfig_base(rName), ` +resource "aws_s3_directory_bucket" "test" { + bucket = local.bucket + + location { + name = local.location_name + } +} + +resource "aws_s3_bucket_cors_configuration" "test" { + bucket = aws_s3_directory_bucket.test.id + + cors_rule { + allowed_methods = ["PUT"] + allowed_origins = ["https://www.example.com"] + } +} +`) +} diff --git a/internal/service/s3/bucket_intelligent_tiering_configuration.go b/internal/service/s3/bucket_intelligent_tiering_configuration.go index 4d57c935637..ac19a77b5cb 100644 --- a/internal/service/s3/bucket_intelligent_tiering_configuration.go +++ b/internal/service/s3/bucket_intelligent_tiering_configuration.go @@ -123,6 +123,10 @@ func resourceBucketIntelligentTieringConfigurationPut(ctx context.Context, d *sc return conn.PutBucketIntelligentTieringConfiguration(ctx, input) }, errCodeNoSuchBucket) + if tfawserr.ErrMessageContains(err, errCodeInvalidArgument, "IntelligentTieringConfiguration is not valid, expected CreateBucketConfiguration") { + err = errDirectoryBucket(err) + } + if err != nil { return sdkdiag.AppendErrorf(diags, "creating S3 Bucket (%s) Intelligent-Tiering Configuration (%s): %s", bucket, name, err) } diff --git a/internal/service/s3/bucket_intelligent_tiering_configuration_test.go b/internal/service/s3/bucket_intelligent_tiering_configuration_test.go index 9cb32d606e4..69608608c26 100644 --- a/internal/service/s3/bucket_intelligent_tiering_configuration_test.go +++ b/internal/service/s3/bucket_intelligent_tiering_configuration_test.go @@ -8,6 +8,7 @@ import ( "fmt" "testing" + "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/service/s3/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" @@ -195,6 +196,24 @@ func TestAccS3BucketIntelligentTieringConfiguration_Filter(t *testing.T) { }) } +func TestAccS3BucketIntelligentTieringConfiguration_directoryBucket(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckBucketIntelligentTieringConfigurationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccBucketIntelligentTieringConfigurationConfig_directoryBucket(rName), + ExpectError: regexache.MustCompile(`directory buckets are not supported`), + }, + }, + }) +} + func testAccCheckBucketIntelligentTieringConfigurationExists(ctx context.Context, n string, v *types.IntelligentTieringConfiguration) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -404,3 +423,25 @@ resource "aws_s3_bucket" "test" { } `, rName) } + +func testAccBucketIntelligentTieringConfigurationConfig_directoryBucket(rName string) string { + return acctest.ConfigCompose(testAccDirectoryBucketConfig_base(rName), fmt.Sprintf(` +resource "aws_s3_directory_bucket" "test" { + bucket = local.bucket + + location { + name = local.location_name + } +} + +resource "aws_s3_bucket_intelligent_tiering_configuration" "test" { + bucket = aws_s3_directory_bucket.test.bucket + name = %[1]q + + tiering { + access_tier = "DEEP_ARCHIVE_ACCESS" + days = 180 + } +} +`, rName)) +} diff --git a/internal/service/s3/bucket_inventory.go b/internal/service/s3/bucket_inventory.go index 79058d9be34..38d89603718 100644 --- a/internal/service/s3/bucket_inventory.go +++ b/internal/service/s3/bucket_inventory.go @@ -219,6 +219,10 @@ func resourceBucketInventoryPut(ctx context.Context, d *schema.ResourceData, met return conn.PutBucketInventoryConfiguration(ctx, input) }, errCodeNoSuchBucket) + if tfawserr.ErrMessageContains(err, errCodeInvalidArgument, "InventoryConfiguration is not valid, expected CreateBucketConfiguration") { + err = errDirectoryBucket(err) + } + if err != nil { return diag.Errorf("creating S3 Bucket (%s) Inventory: %s", bucket, err) } diff --git a/internal/service/s3/bucket_inventory_test.go b/internal/service/s3/bucket_inventory_test.go index e83e0bbc2ce..3b592b70a2e 100644 --- a/internal/service/s3/bucket_inventory_test.go +++ b/internal/service/s3/bucket_inventory_test.go @@ -125,6 +125,25 @@ func TestAccS3BucketInventory_encryptWithSSEKMS(t *testing.T) { }) } +func TestAccS3BucketInventory_directoryBucket(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + inventoryName := t.Name() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckBucketInventoryDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccBucketInventoryConfig_directoryBucket(rName, inventoryName), + ExpectError: regexache.MustCompile(`directory buckets are not supported`), + }, + }, + }) +} + func testAccCheckBucketInventoryExists(ctx context.Context, n string, v *types.InventoryConfiguration) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -283,3 +302,46 @@ resource "aws_s3_bucket_inventory" "test" { } `, bucketName, inventoryName)) } + +func testAccBucketInventoryConfig_directoryBucket(bucketName, inventoryName string) string { + return acctest.ConfigCompose(testAccDirectoryBucketConfig_base(bucketName), fmt.Sprintf(` +data "aws_caller_identity" "current" {} + +resource "aws_s3_directory_bucket" "test" { + bucket = local.bucket + + location { + name = local.location_name + } +} + +resource "aws_s3_bucket_inventory" "test" { + bucket = aws_s3_directory_bucket.test.id + name = %[1]q + + included_object_versions = "All" + + optional_fields = [ + "Size", + "LastModifiedDate", + ] + + filter { + prefix = "documents/" + } + + schedule { + frequency = "Weekly" + } + + destination { + bucket { + format = "ORC" + bucket_arn = aws_s3_directory_bucket.test.arn + account_id = data.aws_caller_identity.current.account_id + prefix = "inventory" + } + } +} +`, inventoryName)) +} diff --git a/internal/service/s3/bucket_lifecycle_configuration.go b/internal/service/s3/bucket_lifecycle_configuration.go index caf78a69289..10018f1340c 100644 --- a/internal/service/s3/bucket_lifecycle_configuration.go +++ b/internal/service/s3/bucket_lifecycle_configuration.go @@ -274,6 +274,10 @@ func resourceBucketLifecycleConfigurationCreate(ctx context.Context, d *schema.R return conn.PutBucketLifecycleConfiguration(ctx, input) }, errCodeNoSuchBucket) + if tfawserr.ErrMessageContains(err, errCodeInvalidArgument, "LifecycleConfiguration is not valid, expected CreateBucketConfiguration") { + err = errDirectoryBucket(err) + } + if err != nil { return diag.Errorf("creating S3 Bucket (%s) Lifecycle Configuration: %s", bucket, err) } diff --git a/internal/service/s3/bucket_lifecycle_configuration_test.go b/internal/service/s3/bucket_lifecycle_configuration_test.go index b6f46424dc4..515dab961ef 100644 --- a/internal/service/s3/bucket_lifecycle_configuration_test.go +++ b/internal/service/s3/bucket_lifecycle_configuration_test.go @@ -9,7 +9,9 @@ import ( "testing" "time" + "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/aws-sdk-go/service/s3" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -1028,6 +1030,24 @@ func TestAccS3BucketLifecycleConfiguration_Update_filterWithAndToFilterWithPrefi }) } +func TestAccS3BucketLifecycleConfiguration_directoryBucket(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckBucketLifecycleConfigurationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccBucketLifecycleConfigurationConfig_directoryBucket(rName), + ExpectError: regexache.MustCompile(`directory buckets are not supported`), + }, + }, + }) +} + func testAccCheckBucketLifecycleConfigurationDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) @@ -1734,3 +1754,27 @@ resource "aws_s3_bucket_lifecycle_configuration" "test" { } `, rName) } + +func testAccBucketLifecycleConfigurationConfig_directoryBucket(rName string) string { + return acctest.ConfigCompose(testAccDirectoryBucketConfig_base(rName), fmt.Sprintf(` +resource "aws_s3_directory_bucket" "test" { + bucket = local.bucket + + location { + name = local.location_name + } +} + +resource "aws_s3_bucket_lifecycle_configuration" "test" { + bucket = aws_s3_directory_bucket.test.bucket + rule { + id = %[1]q + status = "Enabled" + + expiration { + days = 365 + } + } +} +`, rName)) +} diff --git a/internal/service/s3/bucket_logging.go b/internal/service/s3/bucket_logging.go index 914405be089..677ef9b8d03 100644 --- a/internal/service/s3/bucket_logging.go +++ b/internal/service/s3/bucket_logging.go @@ -166,6 +166,10 @@ func resourceBucketLoggingCreate(ctx context.Context, d *schema.ResourceData, me return conn.PutBucketLogging(ctx, input) }, errCodeNoSuchBucket) + if tfawserr.ErrMessageContains(err, errCodeInvalidArgument, "BucketLoggingStatus is not valid, expected CreateBucketConfiguration") { + err = errDirectoryBucket(err) + } + if err != nil { return sdkdiag.AppendErrorf(diags, "creating S3 Bucket (%s) Logging: %s", bucket, err) } diff --git a/internal/service/s3/bucket_logging_test.go b/internal/service/s3/bucket_logging_test.go index 54b9de7920c..7b17df25606 100644 --- a/internal/service/s3/bucket_logging_test.go +++ b/internal/service/s3/bucket_logging_test.go @@ -8,6 +8,7 @@ import ( "fmt" "testing" + "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/service/s3/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" @@ -444,6 +445,24 @@ func TestAccS3BucketLogging_withTargetObjectKeyFormat(t *testing.T) { }) } +func TestAccS3BucketLogging_directoryBucket(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckBucketLoggingDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccBucketLoggingConfig_directoryBucket(rName), + ExpectError: regexache.MustCompile(`directory buckets are not supported`), + }, + }, + }) +} + func testAccCheckBucketLoggingDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) @@ -677,3 +696,19 @@ resource "aws_s3_bucket_logging" "test" { } `) } + +func testAccBucketLoggingConfig_directoryBucket(rName string) string { + return acctest.ConfigCompose(testAccBucketLoggingConfig_base(rName), testAccDirectoryBucketConfig_base(rName), ` +resource "aws_s3_directory_bucket" "test" { + bucket = local.bucket + location { + name = local.location_name + } +} +resource "aws_s3_bucket_logging" "test" { + bucket = aws_s3_directory_bucket.test.bucket + target_bucket = aws_s3_bucket.log_bucket.id + target_prefix = "log/" +} +`) +} diff --git a/internal/service/s3/bucket_metric.go b/internal/service/s3/bucket_metric.go index 4af96a792ff..2263fca3bfd 100644 --- a/internal/service/s3/bucket_metric.go +++ b/internal/service/s3/bucket_metric.go @@ -97,6 +97,10 @@ func resourceBucketMetricPut(ctx context.Context, d *schema.ResourceData, meta i return conn.PutBucketMetricsConfiguration(ctx, input) }, errCodeNoSuchBucket) + if tfawserr.ErrMessageContains(err, errCodeInvalidArgument, "MetricsConfiguration is not valid, expected CreateBucketConfiguration") { + err = errDirectoryBucket(err) + } + if err != nil { return sdkdiag.AppendErrorf(diags, "creating S3 Bucket (%s) Metric: %s", bucket, err) } diff --git a/internal/service/s3/bucket_metric_test.go b/internal/service/s3/bucket_metric_test.go index 161d1a0a120..bac72fc837c 100644 --- a/internal/service/s3/bucket_metric_test.go +++ b/internal/service/s3/bucket_metric_test.go @@ -312,6 +312,25 @@ func TestAccS3BucketMetric_withFilterSingleTag(t *testing.T) { }) } +func TestAccS3BucketMetric_directoryBucket(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + metricName := t.Name() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckBucketMetricDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccBucketMetricConfig_directoryBucket(rName, metricName), + ExpectError: regexache.MustCompile(`directory buckets are not supported`), + }, + }, + }) +} + func testAccCheckBucketMetricDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) @@ -475,3 +494,20 @@ resource "aws_s3_bucket_metric" "test" { } `, metricName)) } + +func testAccBucketMetricConfig_directoryBucket(bucketName, metricName string) string { + return acctest.ConfigCompose(testAccDirectoryBucketConfig_base(bucketName), fmt.Sprintf(` +resource "aws_s3_directory_bucket" "test" { + bucket = local.bucket + + location { + name = local.location_name + } +} + +resource "aws_s3_bucket_metric" "test" { + bucket = aws_s3_directory_bucket.test.bucket + name = %[1]q +} +`, metricName)) +} diff --git a/internal/service/s3/bucket_notification.go b/internal/service/s3/bucket_notification.go index e3caf0e5859..d08943afe50 100644 --- a/internal/service/s3/bucket_notification.go +++ b/internal/service/s3/bucket_notification.go @@ -304,6 +304,10 @@ func resourceBucketNotificationPut(ctx context.Context, d *schema.ResourceData, return conn.PutBucketNotificationConfiguration(ctx, input) }, errCodeNoSuchBucket) + if tfawserr.ErrMessageContains(err, errCodeInvalidArgument, "NotificationConfiguration is not valid, expected CreateBucketConfiguration") { + err = errDirectoryBucket(err) + } + if err != nil { return diag.Errorf("creating S3 Bucket (%s) Notification: %s", bucket, err) } diff --git a/internal/service/s3/bucket_notification_test.go b/internal/service/s3/bucket_notification_test.go index 19b90debad2..92832b6bd89 100644 --- a/internal/service/s3/bucket_notification_test.go +++ b/internal/service/s3/bucket_notification_test.go @@ -8,6 +8,7 @@ import ( "fmt" "testing" + "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/service/s3" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" @@ -251,6 +252,24 @@ func TestAccS3BucketNotification_update(t *testing.T) { }) } +func TestAccS3BucketNotification_directoryBucket(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckBucketNotificationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccBucketNotificationConfig_directoryBucket(rName), + ExpectError: regexache.MustCompile(`directory buckets are not supported`), + }, + }, + }) +} + func testAccCheckBucketNotificationDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) @@ -731,3 +750,21 @@ resource "aws_s3_bucket_notification" "test" { } `, rName) } + +func testAccBucketNotificationConfig_directoryBucket(rName string) string { + return acctest.ConfigCompose(testAccDirectoryBucketConfig_base(rName), ` +resource "aws_s3_directory_bucket" "test" { + bucket = local.bucket + + location { + name = local.location_name + } +} + +resource "aws_s3_bucket_notification" "test" { + bucket = aws_s3_directory_bucket.test.bucket + + eventbridge = true +} +`) +} diff --git a/internal/service/s3/bucket_object_lock_configuration.go b/internal/service/s3/bucket_object_lock_configuration.go index b224809ceb2..cda36a61368 100644 --- a/internal/service/s3/bucket_object_lock_configuration.go +++ b/internal/service/s3/bucket_object_lock_configuration.go @@ -6,6 +6,7 @@ package s3 import ( "context" "log" + "net/http" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/s3" @@ -125,6 +126,10 @@ func resourceBucketObjectLockConfigurationCreate(ctx context.Context, d *schema. return conn.PutObjectLockConfiguration(ctx, input) }, errCodeNoSuchBucket) + if tfawserr.ErrHTTPStatusCodeEquals(err, http.StatusNotImplemented) { + err = errDirectoryBucket(err) + } + if err != nil { return diag.Errorf("creating S3 Bucket (%s) Object Lock Configuration: %s", bucket, err) } diff --git a/internal/service/s3/bucket_object_lock_configuration_test.go b/internal/service/s3/bucket_object_lock_configuration_test.go index 615868a3031..2fe5140a243 100644 --- a/internal/service/s3/bucket_object_lock_configuration_test.go +++ b/internal/service/s3/bucket_object_lock_configuration_test.go @@ -8,6 +8,7 @@ import ( "fmt" "testing" + "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/service/s3/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" @@ -211,6 +212,24 @@ func TestAccS3BucketObjectLockConfiguration_noRule(t *testing.T) { }) } +func TestAccS3BucketObjectLockConfiguration_directoryBucket(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckBucketObjectLockConfigurationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccBucketObjectLockConfigurationConfig_directoryBucket(rName), + ExpectError: regexache.MustCompile(`directory buckets are not supported`), + }, + }, + }) +} + func testAccCheckBucketObjectLockConfigurationDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) @@ -317,3 +336,26 @@ resource "aws_s3_bucket_object_lock_configuration" "test" { } `, bucketName) } + +func testAccBucketObjectLockConfigurationConfig_directoryBucket(bucketName string) string { + return acctest.ConfigCompose(testAccDirectoryBucketConfig_base(bucketName), fmt.Sprintf(` +resource "aws_s3_directory_bucket" "test" { + bucket = local.bucket + + location { + name = local.location_name + } +} + +resource "aws_s3_bucket_object_lock_configuration" "test" { + bucket = aws_s3_directory_bucket.test.bucket + + rule { + default_retention { + mode = %[1]q + days = 3 + } + } +} +`, types.ObjectLockRetentionModeCompliance)) +} diff --git a/internal/service/s3/bucket_ownership_controls.go b/internal/service/s3/bucket_ownership_controls.go index 43b83a658bd..2c1d777d54b 100644 --- a/internal/service/s3/bucket_ownership_controls.go +++ b/internal/service/s3/bucket_ownership_controls.go @@ -74,6 +74,10 @@ func resourceBucketOwnershipControlsCreate(ctx context.Context, d *schema.Resour _, err := conn.PutBucketOwnershipControls(ctx, input) + if tfawserr.ErrMessageContains(err, errCodeInvalidArgument, "OwnershipControls is not valid, expected CreateBucketConfiguration") { + err = errDirectoryBucket(err) + } + if err != nil { return sdkdiag.AppendErrorf(diags, "creating S3 Bucket (%s) Ownership Controls: %s", bucket, err) } diff --git a/internal/service/s3/bucket_ownership_controls_test.go b/internal/service/s3/bucket_ownership_controls_test.go index e56e978136e..51e022560ab 100644 --- a/internal/service/s3/bucket_ownership_controls_test.go +++ b/internal/service/s3/bucket_ownership_controls_test.go @@ -8,6 +8,7 @@ import ( "fmt" "testing" + "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/service/s3/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" @@ -133,6 +134,24 @@ func TestAccS3BucketOwnershipControls_Rule_objectOwnership(t *testing.T) { }) } +func TestAccS3BucketOwnershipControls_directoryBucket(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckBucketOwnershipControlsDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccBucketOwnershipControlsConfig_directoryBucket(rName, string(types.ObjectOwnershipBucketOwnerPreferred)), + ExpectError: regexache.MustCompile(`directory buckets are not supported`), + }, + }, + }) +} + func testAccCheckBucketOwnershipControlsDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) @@ -189,3 +208,23 @@ resource "aws_s3_bucket_ownership_controls" "test" { } `, rName, objectOwnership) } + +func testAccBucketOwnershipControlsConfig_directoryBucket(rName, objectOwnership string) string { + return acctest.ConfigCompose(testAccDirectoryBucketConfig_base(rName), fmt.Sprintf(` +resource "aws_s3_directory_bucket" "test" { + bucket = local.bucket + + location { + name = local.location_name + } +} + +resource "aws_s3_bucket_ownership_controls" "test" { + bucket = aws_s3_directory_bucket.test.bucket + + rule { + object_ownership = %[1]q + } +} +`, objectOwnership)) +} diff --git a/internal/service/s3/bucket_policy_test.go b/internal/service/s3/bucket_policy_test.go index 3decf8c2113..d60f13b1a6e 100644 --- a/internal/service/s3/bucket_policy_test.go +++ b/internal/service/s3/bucket_policy_test.go @@ -430,6 +430,27 @@ func TestAccS3BucketPolicy_migrate_withChange(t *testing.T) { }) } +func TestAccS3BucketPolicy_directoryBucket(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_bucket_policy.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckBucketPolicyDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccBucketPolicyConfig_directoryBucket(rName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet(resourceName, "policy"), + ), + }, + }, + }) +} + func testAccCheckBucketPolicyDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) @@ -480,7 +501,7 @@ func testAccCheckBucketHasPolicy(ctx context.Context, n string, expectedPolicyTe } if !equivalent { return fmt.Errorf("Non-equivalent policy error:\n\nexpected: %s\n\n got: %s\n", - expectedPolicyTemplate, policy) + expectedPolicyText, policy) } return nil @@ -911,3 +932,42 @@ resource "aws_s3_bucket_policy" "test" { } `, rName) } + +func testAccBucketPolicyConfig_directoryBucket(rName string) string { + return acctest.ConfigCompose(testAccDirectoryBucketConfig_base(rName), ` +data "aws_partition" "current" {} +data "aws_caller_identity" "current" {} + +resource "aws_s3_directory_bucket" "test" { + bucket = local.bucket + + location { + name = local.location_name + } +} + +resource "aws_s3_bucket_policy" "test" { + bucket = aws_s3_directory_bucket.test.bucket + policy = data.aws_iam_policy_document.test.json +} + +data "aws_iam_policy_document" "test" { + statement { + effect = "Allow" + + actions = [ + "s3express:*", + ] + + resources = [ + aws_s3_directory_bucket.test.arn, + ] + + principals { + type = "AWS" + identifiers = ["arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:root"] + } + } +} +`) +} diff --git a/internal/service/s3/bucket_public_access_block.go b/internal/service/s3/bucket_public_access_block.go index 6e779162b24..2339cec6715 100644 --- a/internal/service/s3/bucket_public_access_block.go +++ b/internal/service/s3/bucket_public_access_block.go @@ -80,6 +80,10 @@ func resourceBucketPublicAccessBlockCreate(ctx context.Context, d *schema.Resour return conn.PutPublicAccessBlock(ctx, input) }, errCodeNoSuchBucket) + if tfawserr.ErrMessageContains(err, errCodeInvalidArgument, "PublicAccessBlockConfiguration is not valid, expected CreateBucketConfiguration") { + err = errDirectoryBucket(err) + } + if err != nil { return sdkdiag.AppendErrorf(diags, "creating S3 Bucket (%s) Public Access Block: %s", bucket, err) } diff --git a/internal/service/s3/bucket_public_access_block_test.go b/internal/service/s3/bucket_public_access_block_test.go index ff411e026bf..5ca0991b148 100644 --- a/internal/service/s3/bucket_public_access_block_test.go +++ b/internal/service/s3/bucket_public_access_block_test.go @@ -8,7 +8,9 @@ import ( "fmt" "testing" + "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/aws-sdk-go/service/s3" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -269,6 +271,24 @@ func TestAccS3BucketPublicAccessBlock_restrictPublicBuckets(t *testing.T) { }) } +func TestAccS3BucketPublicAccessBlock_directoryBucket(t *testing.T) { + ctx := acctest.Context(t) + name := fmt.Sprintf("tf-test-bucket-%d", sdkacctest.RandInt()) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckBucketDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccBucketPublicAccessBlockConfig_directoryBucket(name, "false", "false", "false", "false"), + ExpectError: regexache.MustCompile(`directory buckets are not supported`), + }, + }, + }) +} + func testAccCheckBucketPublicAccessBlockDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) @@ -332,3 +352,21 @@ resource "aws_s3_bucket_public_access_block" "test" { } `, bucketName, blockPublicAcls, blockPublicPolicy, ignorePublicAcls, restrictPublicBuckets) } + +func testAccBucketPublicAccessBlockConfig_directoryBucket(bucketName, blockPublicAcls, blockPublicPolicy, ignorePublicAcls, restrictPublicBuckets string) string { + return acctest.ConfigCompose(testAccDirectoryBucketConfig_base(bucketName), fmt.Sprintf(` +resource "aws_s3_directory_bucket" "test" { + bucket = local.bucket + location { + name = local.location_name + } +} +resource "aws_s3_bucket_public_access_block" "bucket" { + bucket = aws_s3_directory_bucket.test.bucket + block_public_acls = %[1]q + block_public_policy = %[2]q + ignore_public_acls = %[3]q + restrict_public_buckets = %[4]q +} +`, blockPublicAcls, blockPublicPolicy, ignorePublicAcls, restrictPublicBuckets)) +} diff --git a/internal/service/s3/bucket_replication_configuration.go b/internal/service/s3/bucket_replication_configuration.go index e6c0f8e4e9d..577eb1d9945 100644 --- a/internal/service/s3/bucket_replication_configuration.go +++ b/internal/service/s3/bucket_replication_configuration.go @@ -346,6 +346,10 @@ func resourceBucketReplicationConfigurationCreate(ctx context.Context, d *schema _, err = conn.PutBucketReplication(ctx, input) } + if tfawserr.ErrMessageContains(err, errCodeInvalidArgument, "ReplicationConfiguration is not valid, expected CreateBucketConfiguration") { + err = errDirectoryBucket(err) + } + if err != nil { return sdkdiag.AppendErrorf(diags, "creating S3 Bucket (%s) Replication Configuration: %s", bucket, err) } diff --git a/internal/service/s3/bucket_replication_configuration_test.go b/internal/service/s3/bucket_replication_configuration_test.go index d68c33e7300..58fd6535291 100644 --- a/internal/service/s3/bucket_replication_configuration_test.go +++ b/internal/service/s3/bucket_replication_configuration_test.go @@ -8,7 +8,9 @@ import ( "fmt" "testing" + "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/aws-sdk-go/service/s3" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" @@ -1177,6 +1179,29 @@ func TestAccS3BucketReplicationConfiguration_migrate_withChange(t *testing.T) { }) } +func TestAccS3BucketReplicationConfiguration_directoryBucket(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckMultipleRegion(t, 2) + }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesPlusProvidersAlternate(ctx, t, &providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketReplicationConfigurationDestroyWithProvider(ctx), &providers), + Steps: []resource.TestStep{ + { + Config: testAccBucketReplicationConfigurationConfig_directoryBucket(rName, s3.StorageClassStandard), + ExpectError: regexache.MustCompile(`directory buckets are not supported`), + }, + }, + }) +} + // testAccCheckBucketReplicationConfigurationDestroy is the equivalent of the "WithProvider" // version, but for use with "same region" tests requiring only one provider. func testAccCheckBucketReplicationConfigurationDestroy(ctx context.Context) resource.TestCheckFunc { @@ -2391,3 +2416,30 @@ resource "aws_s3_bucket_replication_configuration" "test" { } }`) } + +func testAccBucketReplicationConfigurationConfig_directoryBucket(rName, storageClass string) string { + return acctest.ConfigCompose(testAccBucketReplicationConfigurationConfig_base(rName), testAccDirectoryBucketConfig_base(rName), fmt.Sprintf(` +resource "aws_s3_directory_bucket" "test" { + bucket = local.bucket + location { + name = local.location_name + } +} +resource "aws_s3_bucket_replication_configuration" "test" { + depends_on = [ + aws_s3_bucket_versioning.source, + aws_s3_bucket_versioning.destination + ] + bucket = aws_s3_directory_bucket.test.bucket + role = aws_iam_role.test.arn + rule { + id = "foobar" + prefix = "foo" + status = "Enabled" + destination { + bucket = aws_s3_bucket.destination.arn + storage_class = %[1]q + } + } +}`, storageClass)) +} diff --git a/internal/service/s3/bucket_request_payment_configuration.go b/internal/service/s3/bucket_request_payment_configuration.go index c41120129b9..f9db85f1e13 100644 --- a/internal/service/s3/bucket_request_payment_configuration.go +++ b/internal/service/s3/bucket_request_payment_configuration.go @@ -74,6 +74,10 @@ func resourceBucketRequestPaymentConfigurationCreate(ctx context.Context, d *sch return conn.PutBucketRequestPayment(ctx, input) }, errCodeNoSuchBucket) + if tfawserr.ErrMessageContains(err, errCodeInvalidArgument, "RequestPaymentConfiguration is not valid, expected CreateBucketConfiguration") { + err = errDirectoryBucket(err) + } + if err != nil { return diag.Errorf("creating S3 Bucket (%s) Request Payment Configuration: %s", bucket, err) } diff --git a/internal/service/s3/bucket_request_payment_configuration_test.go b/internal/service/s3/bucket_request_payment_configuration_test.go index 5a337de15e2..b5748916189 100644 --- a/internal/service/s3/bucket_request_payment_configuration_test.go +++ b/internal/service/s3/bucket_request_payment_configuration_test.go @@ -8,6 +8,7 @@ import ( "fmt" "testing" + "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/service/s3/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" @@ -176,6 +177,24 @@ func TestAccS3BucketRequestPaymentConfiguration_migrate_withChange(t *testing.T) }) } +func TestAccS3BucketRequestPaymentConfiguration_directoryBucket(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckBucketRequestPaymentConfigurationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccBucketRequestPaymentConfigurationConfig_directoryBucket(rName, string(types.PayerBucketOwner)), + ExpectError: regexache.MustCompile(`directory buckets are not supported`), + }, + }, + }) +} + func testAccCheckBucketRequestPaymentConfigurationDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) @@ -239,3 +258,20 @@ resource "aws_s3_bucket_request_payment_configuration" "test" { } `, rName, payer) } + +func testAccBucketRequestPaymentConfigurationConfig_directoryBucket(rName, payer string) string { + return acctest.ConfigCompose(testAccDirectoryBucketConfig_base(rName), fmt.Sprintf(` +resource "aws_s3_directory_bucket" "test" { + bucket = local.bucket + + location { + name = local.location_name + } +} + +resource "aws_s3_bucket_request_payment_configuration" "test" { + bucket = aws_s3_directory_bucket.test.bucket + payer = %[1]q +} +`, payer)) +} diff --git a/internal/service/s3/bucket_server_side_encryption_configuration.go b/internal/service/s3/bucket_server_side_encryption_configuration.go index 3f14da15a6a..fe99e984580 100644 --- a/internal/service/s3/bucket_server_side_encryption_configuration.go +++ b/internal/service/s3/bucket_server_side_encryption_configuration.go @@ -99,6 +99,10 @@ func resourceBucketServerSideEncryptionConfigurationCreate(ctx context.Context, return conn.PutBucketEncryption(ctx, input) }, errCodeNoSuchBucket, errCodeOperationAborted) + if tfawserr.ErrMessageContains(err, errCodeInvalidArgument, "ServerSideEncryptionConfiguration is not valid, expected CreateBucketConfiguration") { + err = errDirectoryBucket(err) + } + if err != nil { return diag.Errorf("creating S3 Bucket (%s) Server-side Encryption Configuration: %s", bucket, err) } diff --git a/internal/service/s3/bucket_server_side_encryption_configuration_test.go b/internal/service/s3/bucket_server_side_encryption_configuration_test.go index 2460aa58581..01fb94e672c 100644 --- a/internal/service/s3/bucket_server_side_encryption_configuration_test.go +++ b/internal/service/s3/bucket_server_side_encryption_configuration_test.go @@ -8,6 +8,7 @@ import ( "fmt" "testing" + "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/aws-sdk-go/service/s3" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" @@ -411,6 +412,24 @@ func TestAccS3BucketServerSideEncryptionConfiguration_migrate_withChange(t *test }) } +func TestAccS3BucketServerSideEncryptionConfiguration_directoryBucket(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: acctest.CheckDestroyNoop, + Steps: []resource.TestStep{ + { + Config: testAccBucketServerSideEncryptionConfigurationConfig_directoryBucket(rName), + ExpectError: regexache.MustCompile(`directory buckets are not supported`), + }, + }, + }) +} + func testAccCheckBucketServerSideEncryptionConfigurationExists(ctx context.Context, n string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -579,3 +598,26 @@ resource "aws_s3_bucket_server_side_encryption_configuration" "test" { } `, rName) } + +func testAccBucketServerSideEncryptionConfigurationConfig_directoryBucket(rName string) string { + return acctest.ConfigCompose(testAccDirectoryBucketConfig_base(rName), ` +resource "aws_s3_directory_bucket" "test" { + bucket = local.bucket + + location { + name = local.location_name + } +} + +resource "aws_s3_bucket_server_side_encryption_configuration" "test" { + bucket = aws_s3_directory_bucket.test.bucket + + rule { + # This is Amazon S3 bucket default encryption. + apply_server_side_encryption_by_default { + sse_algorithm = "AES256" + } + } +} +`) +} diff --git a/internal/service/s3/bucket_versioning.go b/internal/service/s3/bucket_versioning.go index a3830914332..bb289823521 100644 --- a/internal/service/s3/bucket_versioning.go +++ b/internal/service/s3/bucket_versioning.go @@ -127,6 +127,10 @@ func resourceBucketVersioningCreate(ctx context.Context, d *schema.ResourceData, return conn.PutBucketVersioning(ctx, input) }, errCodeNoSuchBucket) + if tfawserr.ErrMessageContains(err, errCodeInvalidArgument, "VersioningConfiguration is not valid, expected CreateBucketConfiguration") { + err = errDirectoryBucket(err) + } + if err != nil { return diag.Errorf("creating S3 Bucket (%s) Versioning: %s", bucket, err) } diff --git a/internal/service/s3/bucket_versioning_test.go b/internal/service/s3/bucket_versioning_test.go index 27c69c02a13..f0314c360c1 100644 --- a/internal/service/s3/bucket_versioning_test.go +++ b/internal/service/s3/bucket_versioning_test.go @@ -483,6 +483,24 @@ func TestAccS3BucketVersioning_Status_suspendedToDisabled(t *testing.T) { }) } +func TestAccS3BucketVersioning_directoryBucket(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckBucketVersioningDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccBucketVersioningConfig_directoryBucket(rName, string(types.BucketVersioningStatusEnabled)), + ExpectError: regexache.MustCompile(`directory buckets are not supported`), + }, + }, + }) +} + func testAccCheckBucketVersioningDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) @@ -595,3 +613,22 @@ resource "aws_s3_bucket_versioning" "test" { } `, rName, mfaDelete) } + +func testAccBucketVersioningConfig_directoryBucket(rName, status string) string { + return acctest.ConfigCompose(testAccDirectoryBucketConfig_base(rName), fmt.Sprintf(` +resource "aws_s3_directory_bucket" "test" { + bucket = local.bucket + + location { + name = local.location_name + } +} + +resource "aws_s3_bucket_versioning" "test" { + bucket = aws_s3_directory_bucket.test.bucket + versioning_configuration { + status = %[1]q + } +} +`, status)) +} diff --git a/internal/service/s3/bucket_website_configuration.go b/internal/service/s3/bucket_website_configuration.go index 48e80a6b57c..ef9c5445c13 100644 --- a/internal/service/s3/bucket_website_configuration.go +++ b/internal/service/s3/bucket_website_configuration.go @@ -221,6 +221,10 @@ func resourceBucketWebsiteConfigurationCreate(ctx context.Context, d *schema.Res return conn.PutBucketWebsite(ctx, input) }, errCodeNoSuchBucket) + if tfawserr.ErrMessageContains(err, errCodeInvalidArgument, "WebsiteConfiguration is not valid, expected CreateBucketConfiguration") { + err = errDirectoryBucket(err) + } + if err != nil { return diag.Errorf("creating S3 Bucket (%s) Website Configuration: %s", bucket, err) } diff --git a/internal/service/s3/bucket_website_configuration_test.go b/internal/service/s3/bucket_website_configuration_test.go index 103d2549382..87ff864be41 100644 --- a/internal/service/s3/bucket_website_configuration_test.go +++ b/internal/service/s3/bucket_website_configuration_test.go @@ -8,6 +8,7 @@ import ( "fmt" "testing" + "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/service/s3/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" @@ -537,6 +538,24 @@ func TestAccS3BucketWebsiteConfiguration_migrate_websiteWithRoutingRuleWithChang }) } +func TestAccS3BucketWebsiteConfiguration_directoryBucket(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckBucketWebsiteConfigurationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccBucketWebsiteConfigurationConfig_directoryBucket(rName), + ExpectError: regexache.MustCompile(`directory buckets are not supported`), + }, + }, + }) +} + func testAccCheckBucketWebsiteConfigurationDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) @@ -912,3 +931,22 @@ resource "aws_s3_bucket_website_configuration" "test" { } `, rName) } + +func testAccBucketWebsiteConfigurationConfig_directoryBucket(rName string) string { + return acctest.ConfigCompose(testAccDirectoryBucketConfig_base(rName), ` +resource "aws_s3_directory_bucket" "test" { + bucket = local.bucket + + location { + name = local.location_name + } +} + +resource "aws_s3_bucket_website_configuration" "test" { + bucket = aws_s3_directory_bucket.test.bucket + index_document { + suffix = "index.html" + } +} +`) +} diff --git a/internal/service/s3/directory_bucket.go b/internal/service/s3/directory_bucket.go new file mode 100644 index 00000000000..ecdc077bfc0 --- /dev/null +++ b/internal/service/s3/directory_bucket.go @@ -0,0 +1,309 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package s3 + +import ( + "context" + "fmt" + + "github.com/YakDriver/regexache" + "github.com/aws/aws-sdk-go-v2/service/s3" + awstypes "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/booldefault" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +var ( + // e.g. example--usw2-az2--x-s3 + directoryBucketNameRegex = regexache.MustCompile(`^([0-9a-z.-]+)--([a-z]+\d+-az\d+)--x-s3$`) +) + +// @FrameworkResource(name="Directory Bucket") +func newDirectoryBucketResource(context.Context) (resource.ResourceWithConfigure, error) { + r := &directoryBucketResource{} + + return r, nil +} + +type directoryBucketResource struct { + framework.ResourceWithConfigure + framework.WithImportByID +} + +func (r *directoryBucketResource) Metadata(_ context.Context, request resource.MetadataRequest, response *resource.MetadataResponse) { + response.TypeName = "aws_s3_directory_bucket" +} + +func (r *directoryBucketResource) Schema(ctx context.Context, request resource.SchemaRequest, response *resource.SchemaResponse) { + dataRedundancyType := fwtypes.StringEnumType[awstypes.DataRedundancy]() + bucketTypeType := fwtypes.StringEnumType[awstypes.BucketType]() + locationTypeType := fwtypes.StringEnumType[awstypes.LocationType]() + + response.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + names.AttrARN: framework.ARNAttributeComputedOnly(), + "bucket": schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Validators: []validator.String{ + stringvalidator.RegexMatches(directoryBucketNameRegex, `must be in the format [bucket_name]--[azid]--x-s3. Use the aws_s3_bucket resource to manage general purpose buckets`), + }, + }, + "data_redundancy": schema.StringAttribute{ + CustomType: dataRedundancyType, + Optional: true, + Computed: true, + Default: dataRedundancyType.AttributeDefault(awstypes.DataRedundancySingleAvailabilityZone), + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "force_destroy": schema.BoolAttribute{ + Optional: true, + Computed: true, + Default: booldefault.StaticBool(false), + }, + names.AttrID: framework.IDAttribute(), + "type": schema.StringAttribute{ + CustomType: bucketTypeType, + Optional: true, + Computed: true, + Default: bucketTypeType.AttributeDefault(awstypes.BucketTypeDirectory), + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + }, + Blocks: map[string]schema.Block{ + "location": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[locationInfoModel](ctx), + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "name": schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "type": schema.StringAttribute{ + CustomType: locationTypeType, + Optional: true, + Computed: true, + Default: locationTypeType.AttributeDefault(awstypes.LocationTypeAvailabilityZone), + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + }, + }, + Validators: []validator.List{ + listvalidator.SizeAtMost(1), + listvalidator.IsRequired(), + }, + }, + }, + } +} + +func (r *directoryBucketResource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { + var data directoryBucketResourceModel + + response.Diagnostics.Append(request.Plan.Get(ctx, &data)...) + + if response.Diagnostics.HasError() { + return + } + + locationInfoData, diags := data.Location.ToPtr(ctx) + + response.Diagnostics.Append(diags...) + + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().S3Client(ctx) + + input := &s3.CreateBucketInput{ + Bucket: flex.StringFromFramework(ctx, data.Bucket), + CreateBucketConfiguration: &awstypes.CreateBucketConfiguration{ + Bucket: &awstypes.BucketInfo{ + DataRedundancy: data.DataRedundancy.ValueEnum(), + Type: awstypes.BucketType(data.Type.ValueString()), + }, + Location: &awstypes.LocationInfo{ + Name: flex.StringFromFramework(ctx, locationInfoData.Name), + Type: locationInfoData.Type.ValueEnum(), + }, + }, + } + + _, err := conn.CreateBucket(ctx, input, useRegionalEndpointInUSEast1) + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("creating S3 Directory Bucket (%s)", data.Bucket.ValueString()), err.Error()) + + return + } + + // Set values for unknowns. + data.ARN = types.StringValue(r.arn(data.Bucket.ValueString())) + data.setID() + + response.Diagnostics.Append(response.State.Set(ctx, &data)...) +} + +func (r *directoryBucketResource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { + var data directoryBucketResourceModel + + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + + if response.Diagnostics.HasError() { + return + } + + if err := data.InitFromID(); err != nil { + response.Diagnostics.AddError("parsing resource ID", err.Error()) + + return + } + + conn := r.Meta().S3Client(ctx) + + err := findBucket(ctx, conn, data.Bucket.ValueString(), useRegionalEndpointInUSEast1) + + if tfresource.NotFound(err) { + response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) + response.State.RemoveResource(ctx) + + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("reading S3 Directory Bucket (%s)", data.ID.ValueString()), err.Error()) + + return + } + + // Set attributes for import. + data.ARN = types.StringValue(r.arn(data.Bucket.ValueString())) + + // No API to return bucket type, location etc. + data.DataRedundancy = fwtypes.StringEnumValue(awstypes.DataRedundancySingleAvailabilityZone) + if matches := directoryBucketNameRegex.FindStringSubmatch(data.ID.ValueString()); len(matches) == 3 { + data.Location = fwtypes.NewListNestedObjectValueOfPtr(ctx, &locationInfoModel{ + Name: flex.StringValueToFramework(ctx, matches[2]), + Type: fwtypes.StringEnumValue(awstypes.LocationTypeAvailabilityZone), + }) + } + data.Type = fwtypes.StringEnumValue(awstypes.BucketTypeDirectory) + + response.Diagnostics.Append(response.State.Set(ctx, &data)...) +} + +func (r *directoryBucketResource) Update(ctx context.Context, request resource.UpdateRequest, response *resource.UpdateResponse) { + var old, new directoryBucketResourceModel + + response.Diagnostics.Append(request.State.Get(ctx, &old)...) + + if response.Diagnostics.HasError() { + return + } + + response.Diagnostics.Append(request.Plan.Get(ctx, &new)...) + + if response.Diagnostics.HasError() { + return + } + + response.Diagnostics.Append(response.State.Set(ctx, &new)...) +} + +func (r *directoryBucketResource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { + var data directoryBucketResourceModel + + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().S3Client(ctx) + + _, err := conn.DeleteBucket(ctx, &s3.DeleteBucketInput{ + Bucket: flex.StringFromFramework(ctx, data.ID), + }, useRegionalEndpointInUSEast1) + + if tfawserr.ErrCodeEquals(err, errCodeBucketNotEmpty) { + if data.ForceDestroy.ValueBool() { + // Empty the bucket and try again. + _, err = emptyBucket(ctx, conn, data.ID.ValueString(), false) + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("emptying S3 Directory Bucket (%s)", data.ID.ValueString()), err.Error()) + + return + } + + _, err = conn.DeleteBucket(ctx, &s3.DeleteBucketInput{ + Bucket: flex.StringFromFramework(ctx, data.ID), + }) + } + } + + if tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket) { + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("deleting S3 Directory Bucket (%s)", data.ID.ValueString()), err.Error()) + + return + } +} + +// arn returns the ARN of the specified bucket. +func (r *directoryBucketResource) arn(bucket string) string { + return r.RegionalARN("s3express", fmt.Sprintf("bucket/%s", bucket)) +} + +type directoryBucketResourceModel struct { + ARN types.String `tfsdk:"arn"` + Bucket types.String `tfsdk:"bucket"` + DataRedundancy fwtypes.StringEnum[awstypes.DataRedundancy] `tfsdk:"data_redundancy"` + ForceDestroy types.Bool `tfsdk:"force_destroy"` + Location fwtypes.ListNestedObjectValueOf[locationInfoModel] `tfsdk:"location"` + ID types.String `tfsdk:"id"` + Type fwtypes.StringEnum[awstypes.BucketType] `tfsdk:"type"` +} + +func (data *directoryBucketResourceModel) InitFromID() error { + data.Bucket = data.ID + return nil +} + +func (data *directoryBucketResourceModel) setID() { + data.ID = data.Bucket +} + +type locationInfoModel struct { + Name types.String `tfsdk:"name"` + Type fwtypes.StringEnum[awstypes.LocationType] `tfsdk:"type"` +} diff --git a/internal/service/s3/directory_bucket_test.go b/internal/service/s3/directory_bucket_test.go new file mode 100644 index 00000000000..598f9853087 --- /dev/null +++ b/internal/service/s3/directory_bucket_test.go @@ -0,0 +1,141 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package s3_test + +import ( + "context" + "fmt" + "testing" + + "github.com/YakDriver/regexache" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfs3 "github.com/hashicorp/terraform-provider-aws/internal/service/s3" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccS3DirectoryBucket_basic(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_directory_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDirectoryBucketDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccDirectoryBucketConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDirectoryBucketExists(ctx, resourceName), + acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "s3express", regexache.MustCompile(fmt.Sprintf(`bucket/%s--.*-x-s3`, rName))), + resource.TestCheckResourceAttr(resourceName, "data_redundancy", "SingleAvailabilityZone"), + resource.TestCheckResourceAttr(resourceName, "location.#", "1"), + resource.TestCheckResourceAttrSet(resourceName, "location.0.name"), + resource.TestCheckResourceAttr(resourceName, "location.0.type", "AvailabilityZone"), + resource.TestCheckResourceAttr(resourceName, "type", "Directory"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + }, + }) +} + +func TestAccS3DirectoryBucket_disappears(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_directory_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDirectoryBucketDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccDirectoryBucketConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDirectoryBucketExists(ctx, resourceName), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfs3.ResourceDirectoryBucket, resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccCheckDirectoryBucketDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_s3_directory_bucket" { + continue + } + + err := tfs3.FindBucket(ctx, conn, rs.Primary.ID) + + if tfresource.NotFound(err) { + continue + } + + if err != nil { + return err + } + + return fmt.Errorf("S3 Bucket %s still exists", rs.Primary.ID) + } + + return nil + } +} + +func testAccCheckDirectoryBucketExists(ctx context.Context, n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) + + return tfs3.FindBucket(ctx, conn, rs.Primary.ID) + } +} + +func testAccConfigAvailableAZsDirectoryBucket() string { + // https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-endpoints. + return acctest.ConfigAvailableAZsNoOptInExclude("use1-az1", "use1-az2", "use1-az3", "usw2-az2", "apne1-az2") +} + +func testAccDirectoryBucketConfig_base(rName string) string { + return acctest.ConfigCompose(testAccConfigAvailableAZsDirectoryBucket(), fmt.Sprintf(` +locals { + location_name = data.aws_availability_zones.available.zone_ids[0] + bucket = "%[1]s--${local.location_name}--x-s3" +} +`, rName)) +} + +func testAccDirectoryBucketConfig_basic(rName string) string { + return acctest.ConfigCompose(testAccDirectoryBucketConfig_base(rName), ` +resource "aws_s3_directory_bucket" "test" { + bucket = local.bucket + + location { + name = local.location_name + } +} +`) +} diff --git a/internal/service/s3/directory_buckets_data_source.go b/internal/service/s3/directory_buckets_data_source.go new file mode 100644 index 00000000000..67d408d919d --- /dev/null +++ b/internal/service/s3/directory_buckets_data_source.go @@ -0,0 +1,93 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package s3 + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkDataSource +func newDirectoryBucketsDataSource(context.Context) (datasource.DataSourceWithConfigure, error) { + d := &directoryBucketsDataSource{} + + return d, nil +} + +type directoryBucketsDataSource struct { + framework.DataSourceWithConfigure +} + +func (d *directoryBucketsDataSource) Metadata(_ context.Context, request datasource.MetadataRequest, response *datasource.MetadataResponse) { + response.TypeName = "aws_s3_directory_buckets" +} + +func (d *directoryBucketsDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "arns": schema.ListAttribute{ + ElementType: types.StringType, + Computed: true, + }, + "buckets": schema.ListAttribute{ + ElementType: types.StringType, + Computed: true, + }, + names.AttrID: framework.IDAttribute(), + }, + } +} + +func (d *directoryBucketsDataSource) Read(ctx context.Context, request datasource.ReadRequest, response *datasource.ReadResponse) { + var data directoryBucketsDataSourceModel + + response.Diagnostics.Append(request.Config.Get(ctx, &data)...) + + if response.Diagnostics.HasError() { + return + } + + conn := d.Meta().S3Client(ctx) + + input := &s3.ListDirectoryBucketsInput{} + var buckets []string + pages := s3.NewListDirectoryBucketsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if err != nil { + response.Diagnostics.AddError("listing S3 Directory Buckets", err.Error()) + + return + } + + for _, v := range page.Buckets { + buckets = append(buckets, aws.ToString(v.Name)) + } + } + + data.ARNs = flex.FlattenFrameworkStringValueList(ctx, tfslices.ApplyToAll(buckets, func(v string) string { + return d.RegionalARN("s3express", fmt.Sprintf("bucket/%s", v)) + })) + data.Buckets = flex.FlattenFrameworkStringValueList(ctx, buckets) + data.ID = types.StringValue(d.Meta().Region) + + response.Diagnostics.Append(response.State.Set(ctx, &data)...) +} + +type directoryBucketsDataSourceModel struct { + ARNs types.List `tfsdk:"arns"` + Buckets types.List `tfsdk:"buckets"` + ID types.String `tfsdk:"id"` +} diff --git a/internal/service/s3/directory_buckets_data_source_test.go b/internal/service/s3/directory_buckets_data_source_test.go new file mode 100644 index 00000000000..ac542c945e7 --- /dev/null +++ b/internal/service/s3/directory_buckets_data_source_test.go @@ -0,0 +1,51 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package s3_test + +import ( + "testing" + + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccS3DirectoryBucketsDataSource_basic(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + dataSourceName := "data.aws_s3_directory_buckets.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + PreventPostDestroyRefresh: true, + Steps: []resource.TestStep{ + { + Config: testAccDirectoryBucketsDataSourceConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + acctest.CheckResourceAttrGreaterThanOrEqualValue(dataSourceName, "arns.#", 1), + acctest.CheckResourceAttrGreaterThanOrEqualValue(dataSourceName, "buckets.#", 1), + ), + }, + }, + }) +} + +func testAccDirectoryBucketsDataSourceConfig_basic(rName string) string { + return acctest.ConfigCompose(testAccDirectoryBucketConfig_base(rName), ` +resource "aws_s3_directory_bucket" "test" { + bucket = local.bucket + + location { + name = local.location_name + } +} + +data "aws_s3_directory_buckets" "test" { + depends_on = [aws_s3_directory_bucket.test] +} +`) +} diff --git a/internal/service/s3/errors.go b/internal/service/s3/errors.go index 7014f940976..a65049ca479 100644 --- a/internal/service/s3/errors.go +++ b/internal/service/s3/errors.go @@ -3,12 +3,17 @@ package s3 +import ( + "fmt" +) + // Error code constants missing from AWS Go SDK: // https://docs.aws.amazon.com/sdk-for-go/api/service/s3/#pkg-constants const ( errCodeAccessDenied = "AccessDenied" errCodeBucketNotEmpty = "BucketNotEmpty" + errCodeInvalidArgument = "InvalidArgument" errCodeInvalidBucketState = "InvalidBucketState" errCodeInvalidRequest = "InvalidRequest" errCodeMalformedPolicy = "MalformedPolicy" @@ -42,3 +47,7 @@ const ( const ( ErrMessageBucketAlreadyExists = "bucket already exists" ) + +func errDirectoryBucket(err error) error { + return fmt.Errorf("directory buckets are not supported: %w", err) +} diff --git a/internal/service/s3/exports_test.go b/internal/service/s3/exports_test.go index d14ea077d4f..a8073d65d0d 100644 --- a/internal/service/s3/exports_test.go +++ b/internal/service/s3/exports_test.go @@ -5,6 +5,8 @@ package s3 // Exports for use in tests only. var ( + ResourceDirectoryBucket = newDirectoryBucketResource + DeleteAllObjectVersions = deleteAllObjectVersions EmptyBucket = emptyBucket FindAnalyticsConfiguration = findAnalyticsConfiguration diff --git a/internal/service/s3/object.go b/internal/service/s3/object.go index d0caecaab9c..b1a88a4f998 100644 --- a/internal/service/s3/object.go +++ b/internal/service/s3/object.go @@ -297,14 +297,12 @@ func resourceObjectRead(ctx context.Context, d *schema.ResourceData, meta interf return sdkdiag.AppendFromErr(diags, err) } - tags, err := ObjectListTags(ctx, conn, bucket, key) - - if err != nil { + if tags, err := ObjectListTags(ctx, conn, bucket, key); err == nil { + setTagsOut(ctx, Tags(tags)) + } else if !tfawserr.ErrHTTPStatusCodeEquals(err, http.StatusNotImplemented) { // Directory buckets return HTTP status code 501, NotImplemented. return sdkdiag.AppendErrorf(diags, "listing tags for S3 Bucket (%s) Object (%s): %s", bucket, key, err) } - setTagsOut(ctx, Tags(tags)) - return diags } diff --git a/internal/service/s3/object_copy.go b/internal/service/s3/object_copy.go index 6a12a960ff5..4e5e2f99b0b 100644 --- a/internal/service/s3/object_copy.go +++ b/internal/service/s3/object_copy.go @@ -8,12 +8,14 @@ import ( "context" "fmt" "log" + "net/http" "net/url" "strings" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -380,14 +382,12 @@ func resourceObjectCopyRead(ctx context.Context, d *schema.ResourceData, meta in return sdkdiag.AppendFromErr(diags, err) } - tags, err := ObjectListTags(ctx, conn, bucket, key) - - if err != nil { + if tags, err := ObjectListTags(ctx, conn, bucket, key); err == nil { + setTagsOut(ctx, Tags(tags)) + } else if !tfawserr.ErrHTTPStatusCodeEquals(err, http.StatusNotImplemented) { // Directory buckets return HTTP status code 501, NotImplemented. return sdkdiag.AppendErrorf(diags, "listing tags for S3 Bucket (%s) Object (%s): %s", bucket, key, err) } - setTagsOut(ctx, Tags(tags)) - return diags } diff --git a/internal/service/s3/object_copy_test.go b/internal/service/s3/object_copy_test.go index def0d35abb6..c5f6a225fe2 100644 --- a/internal/service/s3/object_copy_test.go +++ b/internal/service/s3/object_copy_test.go @@ -435,6 +435,80 @@ func TestAccS3ObjectCopy_targetWithMultipleSlashesMigrated(t *testing.T) { }) } +func TestAccS3ObjectCopy_directoryBucket(t *testing.T) { + ctx := acctest.Context(t) + rName1 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName2 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_object_copy.test" + sourceKey := "source" + targetKey := "target" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + // FIXME "Error running post-test destroy, there may be dangling resources: operation error S3: HeadObject, https response error StatusCode: 403, RequestID: 0033eada6b00018c1826f0b80509eee5684ca4b6, HostID: T7lA2Yxglq, api error Forbidden: Forbidden" + // CheckDestroy: testAccCheckObjectCopyDestroy(ctx), + CheckDestroy: acctest.CheckDestroyNoop, + Steps: []resource.TestStep{ + { + Config: testAccObjectCopyConfig_directoryBucket(rName1, sourceKey, rName2, targetKey), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckObjectCopyExists(ctx, resourceName), + resource.TestCheckNoResourceAttr(resourceName, "acl"), + resource.TestCheckResourceAttrSet(resourceName, "bucket"), + resource.TestCheckResourceAttr(resourceName, "bucket_key_enabled", "false"), + resource.TestCheckResourceAttr(resourceName, "cache_control", ""), + resource.TestCheckNoResourceAttr(resourceName, "checksum_algorithm"), + resource.TestCheckResourceAttr(resourceName, "checksum_crc32", ""), + resource.TestCheckResourceAttr(resourceName, "checksum_crc32c", ""), + resource.TestCheckResourceAttr(resourceName, "checksum_sha1", ""), + resource.TestCheckResourceAttr(resourceName, "checksum_sha256", ""), + resource.TestCheckResourceAttr(resourceName, "content_disposition", ""), + resource.TestCheckResourceAttr(resourceName, "content_encoding", ""), + resource.TestCheckResourceAttr(resourceName, "content_language", ""), + resource.TestCheckResourceAttr(resourceName, "content_type", "application/octet-stream"), + resource.TestCheckNoResourceAttr(resourceName, "copy_if_match"), + resource.TestCheckNoResourceAttr(resourceName, "copy_if_modified_since"), + resource.TestCheckNoResourceAttr(resourceName, "copy_if_none_match"), + resource.TestCheckNoResourceAttr(resourceName, "copy_if_unmodified_since"), + resource.TestCheckResourceAttr(resourceName, "customer_algorithm", ""), + resource.TestCheckNoResourceAttr(resourceName, "customer_key"), + resource.TestCheckResourceAttr(resourceName, "customer_key_md5", ""), + resource.TestCheckNoResourceAttr(resourceName, "expected_bucket_owner"), + resource.TestCheckNoResourceAttr(resourceName, "expected_source_bucket_owner"), + resource.TestCheckResourceAttr(resourceName, "expiration", ""), + resource.TestCheckNoResourceAttr(resourceName, "expires"), + resource.TestCheckResourceAttr(resourceName, "force_destroy", "false"), + resource.TestCheckResourceAttr(resourceName, "grant.#", "0"), + resource.TestCheckResourceAttr(resourceName, "key", targetKey), + resource.TestCheckResourceAttr(resourceName, "kms_encryption_context", ""), + resource.TestCheckResourceAttr(resourceName, "kms_key_id", ""), + resource.TestCheckResourceAttrSet(resourceName, "last_modified"), + resource.TestCheckResourceAttr(resourceName, "metadata.%", "0"), + resource.TestCheckNoResourceAttr(resourceName, "metadata_directive"), + resource.TestCheckResourceAttr(resourceName, "object_lock_legal_hold_status", ""), + resource.TestCheckResourceAttr(resourceName, "object_lock_mode", ""), + resource.TestCheckResourceAttr(resourceName, "object_lock_retain_until_date", ""), + resource.TestCheckResourceAttr(resourceName, "request_charged", "false"), + resource.TestCheckNoResourceAttr(resourceName, "request_payer"), + resource.TestCheckResourceAttr(resourceName, "server_side_encryption", "AES256"), + resource.TestCheckResourceAttrSet(resourceName, "source"), + resource.TestCheckNoResourceAttr(resourceName, "source_customer_algorithm"), + resource.TestCheckNoResourceAttr(resourceName, "source_customer_key"), + resource.TestCheckNoResourceAttr(resourceName, "source_customer_key_md5"), + resource.TestCheckResourceAttr(resourceName, "source_version_id", ""), + resource.TestCheckResourceAttr(resourceName, "storage_class", "EXPRESS_ONEZONE"), + resource.TestCheckNoResourceAttr(resourceName, "tagging_directive"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttr(resourceName, "version_id", ""), + resource.TestCheckResourceAttr(resourceName, "website_redirect", ""), + ), + }, + }, + }) +} + func testAccCheckObjectCopyDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) @@ -704,3 +778,41 @@ resource "aws_s3_object_copy" "test" { } `, sourceBucket, sourceKey, targetBucket, targetKey, legalHoldStatus) } + +func testAccObjectCopyConfig_directoryBucket(sourceBucket, sourceKey, targetBucket, targetKey string) string { + return acctest.ConfigCompose(testAccConfigAvailableAZsDirectoryBucket(), fmt.Sprintf(` +locals { + location_name = data.aws_availability_zones.available.zone_ids[0] + source_bucket = "%[1]s--${local.location_name}--x-s3" + target_bucket = "%[3]s--${local.location_name}--x-s3" +} + +resource "aws_s3_directory_bucket" "source" { + bucket = local.source_bucket + + location { + name = local.location_name + } +} + +resource "aws_s3_directory_bucket" "test" { + bucket = local.target_bucket + + location { + name = local.location_name + } +} + +resource "aws_s3_object" "source" { + bucket = aws_s3_directory_bucket.source.bucket + key = %[2]q + content = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" +} + +resource "aws_s3_object_copy" "test" { + bucket = aws_s3_directory_bucket.test.bucket + key = %[4]q + source = "${aws_s3_object.source.bucket}/${aws_s3_object.source.key}" +} +`, sourceBucket, sourceKey, targetBucket, targetKey)) +} diff --git a/internal/service/s3/object_data_source.go b/internal/service/s3/object_data_source.go index 1fb0f806a08..7d1830a703d 100644 --- a/internal/service/s3/object_data_source.go +++ b/internal/service/s3/object_data_source.go @@ -5,6 +5,7 @@ package s3 import ( "context" + "net/http" "regexp" "strings" "time" @@ -14,6 +15,7 @@ import ( "github.com/aws/aws-sdk-go-v2/feature/s3/manager" "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -249,16 +251,14 @@ func dataSourceObjectRead(ctx context.Context, d *schema.ResourceData, meta inte d.Set("body", string(buf.Bytes())) } - tags, err := ObjectListTags(ctx, conn, bucket, key) - - if err != nil { + if tags, err := ObjectListTags(ctx, conn, bucket, key); err == nil { + if err := d.Set("tags", tags.IgnoreAWS().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { + return sdkdiag.AppendErrorf(diags, "setting tags: %s", err) + } + } else if !tfawserr.ErrHTTPStatusCodeEquals(err, http.StatusNotImplemented) { // Directory buckets return HTTP status code 501, NotImplemented. return sdkdiag.AppendErrorf(diags, "listing tags for S3 Bucket (%s) Object (%s): %s", bucket, key, err) } - if err := d.Set("tags", tags.IgnoreAWS().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return sdkdiag.AppendErrorf(diags, "setting tags: %s", err) - } - return diags } diff --git a/internal/service/s3/object_data_source_test.go b/internal/service/s3/object_data_source_test.go index e690926fa45..fe1acf7e068 100644 --- a/internal/service/s3/object_data_source_test.go +++ b/internal/service/s3/object_data_source_test.go @@ -462,6 +462,42 @@ func TestAccS3ObjectDataSource_metadataUppercaseKey(t *testing.T) { }) } +func TestAccS3ObjectDataSource_directoryBucket(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_object.test" + dataSourceName := "data.aws_s3_object.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + PreventPostDestroyRefresh: true, + Steps: []resource.TestStep{ + { + Config: testAccObjectDataSourceConfig_directoryBucket(rName), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckNoResourceAttr(dataSourceName, "body"), + resource.TestCheckNoResourceAttr(dataSourceName, "checksum_mode"), + resource.TestCheckResourceAttr(resourceName, "checksum_crc32", ""), + resource.TestCheckResourceAttr(resourceName, "checksum_crc32c", ""), + resource.TestCheckResourceAttr(resourceName, "checksum_sha1", ""), + resource.TestCheckResourceAttr(resourceName, "checksum_sha256", ""), + resource.TestCheckResourceAttr(dataSourceName, "content_length", "11"), + resource.TestCheckResourceAttrPair(dataSourceName, "content_type", resourceName, "content_type"), + resource.TestCheckResourceAttrPair(dataSourceName, "etag", resourceName, "etag"), + resource.TestMatchResourceAttr(dataSourceName, "last_modified", regexache.MustCompile(rfc1123RegexPattern)), + resource.TestCheckResourceAttr(dataSourceName, "metadata.%", "0"), + resource.TestCheckResourceAttrPair(dataSourceName, "object_lock_legal_hold_status", resourceName, "object_lock_legal_hold_status"), + resource.TestCheckResourceAttrPair(dataSourceName, "object_lock_mode", resourceName, "object_lock_mode"), + resource.TestCheckResourceAttrPair(dataSourceName, "object_lock_retain_until_date", resourceName, "object_lock_retain_until_date"), + resource.TestCheckResourceAttr(dataSourceName, "tags.%", "0"), + ), + }, + }, + }) +} + func testAccObjectDataSourceConfig_basic(rName string) string { return fmt.Sprintf(` resource "aws_s3_bucket" "test" { @@ -842,3 +878,26 @@ data "aws_s3_object" "test" { } `, rName, key) } + +func testAccObjectDataSourceConfig_directoryBucket(rName string) string { + return acctest.ConfigCompose(testAccDirectoryBucketConfig_base(rName), fmt.Sprintf(` +resource "aws_s3_directory_bucket" "test" { + bucket = local.bucket + + location { + name = local.location_name + } +} + +resource "aws_s3_object" "test" { + bucket = aws_s3_directory_bucket.test.bucket + key = "%[1]s-key" + content = "Hello World" +} + +data "aws_s3_object" "test" { + bucket = aws_s3_object.test.bucket + key = aws_s3_object.test.key +} +`, rName)) +} diff --git a/internal/service/s3/object_test.go b/internal/service/s3/object_test.go index 806b7dd076b..d7f20759146 100644 --- a/internal/service/s3/object_test.go +++ b/internal/service/s3/object_test.go @@ -1651,6 +1651,104 @@ func TestAccS3Object_keyWithSlashesMigrated(t *testing.T) { }) } +func TestAccS3Object_directoryBucket(t *testing.T) { + ctx := acctest.Context(t) + var obj s3.GetObjectOutput + resourceName := "aws_s3_object.object" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + // FIXME "Error running post-test destroy, there may be dangling resources: operation error S3: HeadObject, https response error StatusCode: 403, RequestID: 0033eada6b00018c1804fda905093646dd76f12a, HostID: SfKUL8OB, api error Forbidden: Forbidden" + // CheckDestroy: testAccCheckObjectDestroy(ctx), + CheckDestroy: acctest.CheckDestroyNoop, + Steps: []resource.TestStep{ + { + Config: testAccObjectConfig_directoryBucket(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckObjectExists(ctx, resourceName, &obj), + testAccCheckObjectBody(&obj, ""), + resource.TestCheckNoResourceAttr(resourceName, "acl"), + resource.TestCheckResourceAttrSet(resourceName, "bucket"), + resource.TestCheckResourceAttr(resourceName, "bucket_key_enabled", "false"), + resource.TestCheckResourceAttr(resourceName, "cache_control", ""), + resource.TestCheckNoResourceAttr(resourceName, "checksum_algorithm"), + resource.TestCheckResourceAttr(resourceName, "checksum_crc32", ""), + resource.TestCheckResourceAttr(resourceName, "checksum_crc32c", ""), + resource.TestCheckResourceAttr(resourceName, "checksum_sha1", ""), + resource.TestCheckResourceAttr(resourceName, "checksum_sha256", ""), + resource.TestCheckNoResourceAttr(resourceName, "content"), + resource.TestCheckNoResourceAttr(resourceName, "content_base64"), + resource.TestCheckResourceAttr(resourceName, "content_disposition", ""), + resource.TestCheckResourceAttr(resourceName, "content_encoding", ""), + resource.TestCheckResourceAttr(resourceName, "content_language", ""), + resource.TestCheckResourceAttr(resourceName, "content_type", "application/octet-stream"), + resource.TestCheckResourceAttrSet(resourceName, "etag"), + resource.TestCheckResourceAttr(resourceName, "force_destroy", "false"), + resource.TestCheckResourceAttr(resourceName, "key", "test-key"), + resource.TestCheckNoResourceAttr(resourceName, "kms_key_id"), + resource.TestCheckResourceAttr(resourceName, "metadata.%", "0"), + resource.TestCheckResourceAttr(resourceName, "object_lock_legal_hold_status", ""), + resource.TestCheckResourceAttr(resourceName, "object_lock_mode", ""), + resource.TestCheckResourceAttr(resourceName, "object_lock_retain_until_date", ""), + resource.TestCheckResourceAttr(resourceName, "override_provider.#", "1"), + resource.TestCheckResourceAttr(resourceName, "override_provider.#", "1"), + resource.TestCheckResourceAttr(resourceName, "override_provider.0.default_tags.#", "1"), + resource.TestCheckResourceAttr(resourceName, "override_provider.0.default_tags.0.tags.%", "0"), + resource.TestCheckResourceAttr(resourceName, "server_side_encryption", "AES256"), + resource.TestCheckNoResourceAttr(resourceName, "source"), + resource.TestCheckNoResourceAttr(resourceName, "source_hash"), + resource.TestCheckResourceAttr(resourceName, "storage_class", "EXPRESS_ONEZONE"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttr(resourceName, "version_id", ""), + resource.TestCheckResourceAttr(resourceName, "website_redirect", ""), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "override_provider"}, + ImportStateIdFunc: func(s *terraform.State) (string, error) { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return "", fmt.Errorf("Not Found: %s", resourceName) + } + + return fmt.Sprintf("s3://%s/test-key", rs.Primary.Attributes["bucket"]), nil + }, + }, + }, + }) +} + +func TestAccS3Object_DirectoryBucket_DefaultTags_providerOnly(t *testing.T) { + ctx := acctest.Context(t) + var obj s3.GetObjectOutput + resourceName := "aws_s3_object.object" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckObjectDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: acctest.ConfigCompose( + acctest.ConfigDefaultTags_Tags1("providerkey1", "providervalue1"), + testAccObjectConfig_directoryBucket(rName), + ), + Check: resource.ComposeTestCheckFunc( + testAccCheckObjectExists(ctx, resourceName, &obj), + ), + }, + }, + }) +} + func testAccCheckObjectVersionIDDiffers(first, second *s3.GetObjectOutput) resource.TestCheckFunc { return func(s *terraform.State) error { if aws.ToString(first.VersionId) == aws.ToString(second.VersionId) { @@ -2524,3 +2622,26 @@ resource "aws_s3_object" "object" { } `, rName) } + +func testAccObjectConfig_directoryBucket(rName string) string { + return acctest.ConfigCompose(testAccDirectoryBucketConfig_base(rName), ` +resource "aws_s3_directory_bucket" "test" { + bucket = local.bucket + + location { + name = local.location_name + } +} + +resource "aws_s3_object" "object" { + bucket = aws_s3_directory_bucket.test.bucket + key = "test-key" + + override_provider { + default_tags { + tags = {} + } + } +} +`) +} diff --git a/internal/service/s3/objects_data_source_test.go b/internal/service/s3/objects_data_source_test.go index 52ee0abfafc..92896fc7cc9 100644 --- a/internal/service/s3/objects_data_source_test.go +++ b/internal/service/s3/objects_data_source_test.go @@ -220,6 +220,30 @@ func TestAccS3ObjectsDataSource_fetchOwner(t *testing.T) { }) } +func TestAccS3ObjectsDataSource_directoryBucket(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + dataSourceName := "data.aws_s3_objects.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + PreventPostDestroyRefresh: true, + Steps: []resource.TestStep{ + { + Config: testAccObjectsDataSourceConfig_directoryBucket(rName, 1), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(dataSourceName, "common_prefixes.#", "0"), + resource.TestCheckResourceAttr(dataSourceName, "keys.#", "3"), + resource.TestCheckResourceAttr(dataSourceName, "owners.#", "0"), + resource.TestCheckResourceAttr(dataSourceName, "request_charged", ""), + ), + }, + }, + }) +} + func testAccObjectsDataSourceConfig_base(rName string, n int) string { return fmt.Sprintf(` resource "aws_s3_bucket" "test" { @@ -357,3 +381,45 @@ data "aws_s3_objects" "test" { } `) } + +func testAccObjectsDataSourceConfig_directoryBucket(rName string, n int) string { + return acctest.ConfigCompose(testAccDirectoryBucketConfig_base(rName), fmt.Sprintf(` +resource "aws_s3_directory_bucket" "test" { + bucket = local.bucket + + location { + name = local.location_name + } +} + +resource "aws_s3_object" "test1" { + count = %[1]d + + bucket = aws_s3_directory_bucket.test.bucket + key = "prefix1/sub1/${count.index}" + content = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" +} + +resource "aws_s3_object" "test2" { + count = %[1]d + + bucket = aws_s3_directory_bucket.test.bucket + key = "prefix1/sub2/${count.index}" + content = "0123456789" +} + +resource "aws_s3_object" "test3" { + count = %[1]d + + bucket = aws_s3_directory_bucket.test.bucket + key = "prefix2/${count.index}" + content = "abcdefghijklmnopqrstuvwxyz" +} + +data "aws_s3_objects" "test" { + bucket = aws_s3_directory_bucket.test.bucket + + depends_on = [aws_s3_object.test1, aws_s3_object.test2, aws_s3_object.test3] +} +`, n)) +} diff --git a/internal/service/s3/service_package.go b/internal/service/s3/service_package.go index 04b32070e85..690936383f2 100644 --- a/internal/service/s3/service_package.go +++ b/internal/service/s3/service_package.go @@ -17,6 +17,7 @@ import ( tfawserr_sdkv1 "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" tfawserr_sdkv2 "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/names" ) // NewConn returns a new AWS SDK for Go v1 client for this service package's AWS API. @@ -52,10 +53,10 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( return s3_sdkv2.NewFromConfig(cfg, func(o *s3_sdkv2.Options) { if endpoint := config["endpoint"].(string); endpoint != "" { o.BaseEndpoint = aws_sdkv2.String(endpoint) - } else if o.Region == endpoints_sdkv1.UsEast1RegionID && config["s3_us_east_1_regional_endpoint"].(endpoints_sdkv1.S3UsEast1RegionalEndpoint) != endpoints_sdkv1.RegionalS3UsEast1Endpoint { + } else if o.Region == names.USEast1RegionID && config["s3_us_east_1_regional_endpoint"].(endpoints_sdkv1.S3UsEast1RegionalEndpoint) != endpoints_sdkv1.RegionalS3UsEast1Endpoint { // Maintain the AWS SDK for Go v1 default of using the global endpoint in us-east-1. // See https://github.com/hashicorp/terraform-provider-aws/issues/33028. - o.Region = "aws-global" + o.Region = names.GlobalRegionID } o.UsePathStyle = config["s3_use_path_style"].(bool) @@ -67,3 +68,10 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( })) }), nil } + +// Functional options to force the regional endpoint in us-east-1 if the client is configured to use the global endpoint. +func useRegionalEndpointInUSEast1(o *s3_sdkv2.Options) { + if o.Region == names.GlobalRegionID { + o.Region = names.USEast1RegionID + } +} diff --git a/internal/service/s3/service_package_gen.go b/internal/service/s3/service_package_gen.go index f0ff0323eec..519077c7552 100644 --- a/internal/service/s3/service_package_gen.go +++ b/internal/service/s3/service_package_gen.go @@ -13,11 +13,20 @@ import ( type servicePackage struct{} func (p *servicePackage) FrameworkDataSources(ctx context.Context) []*types.ServicePackageFrameworkDataSource { - return []*types.ServicePackageFrameworkDataSource{} + return []*types.ServicePackageFrameworkDataSource{ + { + Factory: newDirectoryBucketsDataSource, + }, + } } func (p *servicePackage) FrameworkResources(ctx context.Context) []*types.ServicePackageFrameworkResource { - return []*types.ServicePackageFrameworkResource{} + return []*types.ServicePackageFrameworkResource{ + { + Factory: newDirectoryBucketResource, + Name: "Directory Bucket", + }, + } } func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePackageSDKDataSource { diff --git a/internal/service/s3/sweep.go b/internal/service/s3/sweep.go index 49b52c2d8ce..a86164dfc42 100644 --- a/internal/service/s3/sweep.go +++ b/internal/service/s3/sweep.go @@ -20,6 +20,7 @@ import ( tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" "github.com/hashicorp/terraform-provider-aws/internal/sweep" "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv2" + "github.com/hashicorp/terraform-provider-aws/internal/sweep/framework" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) @@ -39,6 +40,14 @@ func RegisterSweepers() { "aws_s3control_multi_region_access_point", }, }) + + resource.AddTestSweepers("aws_s3_directory_bucket", &resource.Sweeper{ + Name: "aws_s3_directory_bucket", + F: sweepDirectoryBuckets, + Dependencies: []string{ + "aws_s3_object", + }, + }) } func sweepObjects(region string) error { @@ -48,9 +57,9 @@ func sweepObjects(region string) error { return fmt.Errorf("getting client: %s", err) } conn := client.S3Client(ctx) - input := &s3.ListBucketsInput{} - output, err := conn.ListBuckets(ctx, input) + // General-purpose buckets. + output, err := conn.ListBuckets(ctx, &s3.ListBucketsInput{}) if awsv2.SkipSweepError(err) { log.Printf("[WARN] Skipping S3 Objects sweep for %s: %s", region, err) @@ -58,12 +67,7 @@ func sweepObjects(region string) error { } if err != nil { - return fmt.Errorf("listing S3 Buckets: %w", err) - } - - if len(output.Buckets) == 0 { - log.Print("[DEBUG] No S3 Objects to sweep") - return nil + return fmt.Errorf("error listing S3 Buckets: %w", err) } buckets := tfslices.Filter(output.Buckets, bucketRegionFilter(ctx, conn, region, client.S3UsePathStyle())) @@ -91,6 +95,32 @@ func sweepObjects(region string) error { }) } + // Directory buckets. + pages := s3.NewListDirectoryBucketsPaginator(conn, &s3.ListDirectoryBucketsInput{}) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping S3 Objects sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("error listing S3 Directory Buckets (%s): %w", region, err) + } + + for _, v := range page.Buckets { + if !bucketNameFilter(v) { + continue + } + + sweepables = append(sweepables, objectSweeper{ + conn: conn, + bucket: aws.ToString(v.Name), + }) + } + } + err = sweep.SweepOrchestrator(ctx, sweepables) if err != nil { @@ -226,3 +256,46 @@ func bucketRegionFilter(ctx context.Context, conn *s3.Client, region string, s3U return true } } + +func sweepDirectoryBuckets(region string) error { + ctx := sweep.Context(region) + client, err := sweep.SharedRegionalSweepClient(ctx, region) + if err != nil { + return fmt.Errorf("getting client: %s", err) + } + conn := client.S3Client(ctx) + input := &s3.ListDirectoryBucketsInput{} + sweepResources := make([]sweep.Sweepable, 0) + + pages := s3.NewListDirectoryBucketsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping S3 Directory Bucket sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("error listing S3 Directory Buckets (%s): %w", region, err) + } + + for _, v := range page.Buckets { + if !bucketNameFilter(v) { + continue + } + + sweepResources = append(sweepResources, framework.NewSweepResource(newDirectoryBucketResource, client, + framework.NewAttribute("id", aws.ToString(v.Name)), + )) + } + } + + err = sweep.SweepOrchestrator(ctx, sweepResources) + + if err != nil { + return fmt.Errorf("error sweeping S3 Directory Buckets (%s): %w", region, err) + } + + return nil +} diff --git a/names/names.go b/names/names.go index ecb39d2343a..11e9c15e572 100644 --- a/names/names.go +++ b/names/names.go @@ -98,6 +98,8 @@ const ( ) const ( + GlobalRegionID = "aws-global" // AWS Standard global region. + USEast1RegionID = "us-east-1" // US East (N. Virginia). USWest1RegionID = "us-west-1" // US West (N. California). USWest2RegionID = "us-west-2" // US West (Oregon). diff --git a/names/names_data.csv b/names/names_data.csv index 86b56755e43..c7a119a288c 100644 --- a/names/names_data.csv +++ b/names/names_data.csv @@ -306,7 +306,7 @@ route53-recovery-cluster,route53recoverycluster,route53recoverycluster,route53re route53-recovery-control-config,route53recoverycontrolconfig,route53recoverycontrolconfig,route53recoverycontrolconfig,,route53recoverycontrolconfig,,,Route53RecoveryControlConfig,Route53RecoveryControlConfig,x,1,,,aws_route53recoverycontrolconfig_,,route53recoverycontrolconfig_,Route 53 Recovery Control Config,Amazon,,,,,,, route53-recovery-readiness,route53recoveryreadiness,route53recoveryreadiness,route53recoveryreadiness,,route53recoveryreadiness,,,Route53RecoveryReadiness,Route53RecoveryReadiness,x,1,,,aws_route53recoveryreadiness_,,route53recoveryreadiness_,Route 53 Recovery Readiness,Amazon,,,,,,, route53resolver,route53resolver,route53resolver,route53resolver,,route53resolver,,,Route53Resolver,Route53Resolver,,1,,aws_route53_resolver_,aws_route53resolver_,,route53_resolver_,Route 53 Resolver,Amazon,,,,,,, -s3api,s3api,s3,s3,,s3,,s3api,S3,S3,x,1,2,aws_(canonical_user_id|s3_bucket|s3_object),aws_s3_,,s3_bucket;s3_object;canonical_user_id,S3 (Simple Storage),Amazon,,,,,AWS_S3_ENDPOINT,TF_AWS_S3_ENDPOINT, +s3api,s3api,s3,s3,,s3,,s3api,S3,S3,x,1,2,aws_(canonical_user_id|s3_bucket|s3_object|s3_directory_bucket),aws_s3_,,s3_bucket;s3_directory_bucket;s3_object;canonical_user_id,S3 (Simple Storage),Amazon,,,,,AWS_S3_ENDPOINT,TF_AWS_S3_ENDPOINT, s3control,s3control,s3control,s3control,,s3control,,,S3Control,S3Control,,,2,aws_(s3_account_|s3control_|s3_access_),aws_s3control_,,s3control;s3_account_;s3_access_,S3 Control,Amazon,,,,,,, glacier,glacier,glacier,glacier,,glacier,,,Glacier,Glacier,,,2,,aws_glacier_,,glacier_,S3 Glacier,Amazon,,,,,,, s3outposts,s3outposts,s3outposts,s3outposts,,s3outposts,,,S3Outposts,S3Outposts,,1,,,aws_s3outposts_,,s3outposts_,S3 on Outposts,Amazon,,,,,,, diff --git a/website/docs/d/s3_directory_buckets.html.markdown b/website/docs/d/s3_directory_buckets.html.markdown new file mode 100644 index 00000000000..24f623ec179 --- /dev/null +++ b/website/docs/d/s3_directory_buckets.html.markdown @@ -0,0 +1,28 @@ +--- +subcategory: "S3 (Simple Storage)" +layout: "aws" +page_title: "AWS: aws_s3_directory_buckets" +description: |- + Lists Amazon S3 Express directory buckets. +--- + +# Data Source: aws_s3_directory_buckets + +Lists Amazon S3 Express directory buckets. + +## Example Usage + +```terraform +data "aws_s3_directory_buckets" "example" {} +``` + +## Argument Reference + +There are no arguments available for this data source. + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `arns` - Bucket ARNs. +* `buckets` - Buckets names. diff --git a/website/docs/r/s3_access_point.html.markdown b/website/docs/r/s3_access_point.html.markdown index da8c71d5e94..7b7c666e9f4 100644 --- a/website/docs/r/s3_access_point.html.markdown +++ b/website/docs/r/s3_access_point.html.markdown @@ -14,9 +14,11 @@ Provides a resource to manage an S3 Access Point. -> Advanced usage: To use a custom API endpoint for this Terraform resource, use the [`s3control` endpoint provider configuration](/docs/providers/aws/index.html#s3control), not the `s3` endpoint provider configuration. +-> This resource cannot be used with S3 directory buckets. + ## Example Usage -### AWS Partition Bucket +### AWS Partition General Purpose Bucket ```terraform resource "aws_s3_bucket" "example" { @@ -55,7 +57,7 @@ resource "aws_vpc" "example" { The following arguments are required: -* `bucket` - (Required) Name of an AWS Partition S3 Bucket or the ARN of S3 on Outposts Bucket that you want to associate this access point with. +* `bucket` - (Required) Name of an AWS Partition S3 General Purpose Bucket or the ARN of S3 on Outposts Bucket that you want to associate this access point with. * `name` - (Required) Name you want to assign to this access point. The following arguments are optional: diff --git a/website/docs/r/s3_bucket.html.markdown b/website/docs/r/s3_bucket.html.markdown index 043c5eaad96..89212e1a4d9 100644 --- a/website/docs/r/s3_bucket.html.markdown +++ b/website/docs/r/s3_bucket.html.markdown @@ -10,9 +10,7 @@ description: |- Provides a S3 bucket resource. --> This functionality is for managing S3 in an AWS Partition. To manage [S3 on Outposts](https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html), see the [`aws_s3control_bucket`](/docs/providers/aws/r/s3control_bucket.html) resource. - --> In April 2023, [AWS introduced](https://aws.amazon.com/about-aws/whats-new/2022/12/amazon-s3-automatically-enable-block-public-access-disable-access-control-lists-buckets-april-2023/) updated security defaults for new S3 buckets. See [this issue](https://github.com/hashicorp/terraform-provider-aws/issues/28353) for a information on how this affects the `aws_s3_bucket` resource. +-> This resource provides functionality for managing S3 general purpose buckets in an AWS Partition. To manage Amazon S3 Express directory buckets, use the [`aws_directory_bucket`](/docs/providers/aws/r/s3_directory_bucket.html) resource. To manage [S3 on Outposts](https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html), use the [`aws_s3control_bucket`](/docs/providers/aws/r/s3control_bucket.html) resource. ## Example Usage @@ -29,56 +27,11 @@ resource "aws_s3_bucket" "example" { } ``` -### Static Website Hosting - --> **NOTE:** The `website` attribute is deprecated. -See [`aws_s3_bucket_website_configuration`](s3_bucket_website_configuration.html.markdown) for examples with static website hosting configured. - -### CORS Rules - --> **NOTE:** The `cors_rule` attribute is deprecated. -See [`aws_s3_bucket_cors_configuration`](s3_bucket_cors_configuration.html.markdown) for examples with CORS rules configured. - -### Versioning - --> **NOTE:** The `versioning` attribute is deprecated. -See [`aws_s3_bucket_versioning`](s3_bucket_versioning.html.markdown) for examples with versioning configured. - -### Logging - --> **NOTE:** The `logging` attribute is deprecated. -See [`aws_s3_bucket_logging`](s3_bucket_logging.html.markdown) for examples with logging enabled. - -### Object Lifecycle Rules - --> **NOTE:** The `lifecycle_rule` attribute is deprecated. -See [`aws_s3_bucket_lifecycle_configuration`](s3_bucket_lifecycle_configuration.html.markdown) for examples with object lifecycle rules. - -### Object Lock Configuration - --> **NOTE:** The `object_lock_configuration` attribute is deprecated. -See [`aws_s3_bucket_object_lock_configuration`](s3_bucket_object_lock_configuration.html.markdown) for examples with object lock configurations on both new and existing buckets. - -### Replication Configuration - --> **NOTE:** The `replication_configuration` attribute is deprecated. -See [`aws_s3_bucket_replication_configuration`](s3_bucket_replication_configuration.html.markdown) for examples with replication configured. - -### Enable SSE-KMS Server Side Encryption - --> **NOTE:** The `server_side_encryption_configuration` attribute is deprecated. -See [`aws_s3_bucket_server_side_encryption_configuration`](s3_bucket_server_side_encryption_configuration.html.markdown) for examples with server side encryption configured. - -### ACL Policy Grants - --> **NOTE:** The `acl` and `grant` attributes are deprecated. -See [`aws_s3_bucket_acl`](s3_bucket_acl.html.markdown) for examples with ACL grants. - ## Argument Reference This resource supports the following arguments: -* `bucket` - (Optional, Forces new resource) Name of the bucket. If omitted, Terraform will assign a random, unique name. Must be lowercase and less than or equal to 63 characters in length. A full list of bucket naming rules [may be found here](https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html). +* `bucket` - (Optional, Forces new resource) Name of the bucket. If omitted, Terraform will assign a random, unique name. Must be lowercase and less than or equal to 63 characters in length. A full list of bucket naming rules [may be found here](https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html). The name must not be in the format `[bucket_name]--[azid]--x-s3`. Use the [`aws_s3_directory_bucket`](s3_directory_bucket.html) resource to manage S3 Express buckets. * `bucket_prefix` - (Optional, Forces new resource) Creates a unique bucket name beginning with the specified prefix. Conflicts with `bucket`. Must be lowercase and less than or equal to 37 characters in length. A full list of bucket naming rules [may be found here](https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html). * `force_destroy` - (Optional, Default:`false`) Boolean that indicates all objects (including any [locked objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html)) should be deleted from the bucket *when the bucket is destroyed* so that the bucket can be destroyed without error. These objects are *not* recoverable. This only deletes objects when the bucket is destroyed, *not* when setting this parameter to `true`. Once this parameter is set to `true`, there must be a successful `terraform apply` run before a destroy is required to update this value in the resource state. Without a successful `terraform apply` after this parameter is set, this flag will have no effect. If setting this field in the same operation that would require replacing the bucket or destroying the bucket, this flag will not work. Additionally when importing a bucket, a successful `terraform apply` is required to set this value in state before it will take effect on a destroy operation. * `object_lock_enabled` - (Optional, Forces new resource) Indicates whether this bucket has an Object Lock configuration enabled. Valid values are `true` or `false`. This argument is not supported in all regions or partitions. diff --git a/website/docs/r/s3_bucket_accelerate_configuration.html.markdown b/website/docs/r/s3_bucket_accelerate_configuration.html.markdown index e22baf5c8e8..abec1ae72f8 100644 --- a/website/docs/r/s3_bucket_accelerate_configuration.html.markdown +++ b/website/docs/r/s3_bucket_accelerate_configuration.html.markdown @@ -10,6 +10,8 @@ description: |- Provides an S3 bucket accelerate configuration resource. See the [Requirements for using Transfer Acceleration](https://docs.aws.amazon.com/AmazonS3/latest/userguide/transfer-acceleration.html#transfer-acceleration-requirements) for more details. +-> This resource cannot be used with S3 directory buckets. + ## Example Usage ```terraform diff --git a/website/docs/r/s3_bucket_acl.html.markdown b/website/docs/r/s3_bucket_acl.html.markdown index f8ee66d2a98..7154c39749c 100644 --- a/website/docs/r/s3_bucket_acl.html.markdown +++ b/website/docs/r/s3_bucket_acl.html.markdown @@ -12,6 +12,8 @@ Provides an S3 bucket ACL resource. ~> **Note:** `terraform destroy` does not delete the S3 Bucket ACL but does remove the resource from Terraform state. +-> This resource cannot be used with S3 directory buckets. + ## Example Usage ### With `private` ACL diff --git a/website/docs/r/s3_bucket_analytics_configuration.html.markdown b/website/docs/r/s3_bucket_analytics_configuration.html.markdown index 1a7320cd18a..d4498dc906c 100644 --- a/website/docs/r/s3_bucket_analytics_configuration.html.markdown +++ b/website/docs/r/s3_bucket_analytics_configuration.html.markdown @@ -10,6 +10,8 @@ description: |- Provides a S3 bucket [analytics configuration](https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) resource. +-> This resource cannot be used with S3 directory buckets. + ## Example Usage ### Add analytics configuration for entire S3 bucket and export results to a second S3 bucket diff --git a/website/docs/r/s3_bucket_cors_configuration.html.markdown b/website/docs/r/s3_bucket_cors_configuration.html.markdown index 8f8c05c1089..6331ffc6718 100644 --- a/website/docs/r/s3_bucket_cors_configuration.html.markdown +++ b/website/docs/r/s3_bucket_cors_configuration.html.markdown @@ -12,6 +12,8 @@ Provides an S3 bucket CORS configuration resource. For more information about CO ~> **NOTE:** S3 Buckets only support a single CORS configuration. Declaring multiple `aws_s3_bucket_cors_configuration` resources to the same S3 Bucket will cause a perpetual difference in configuration. +-> This resource cannot be used with S3 directory buckets. + ## Example Usage ```terraform diff --git a/website/docs/r/s3_bucket_intelligent_tiering_configuration.html.markdown b/website/docs/r/s3_bucket_intelligent_tiering_configuration.html.markdown index feb60c30a1f..25482ee74a3 100644 --- a/website/docs/r/s3_bucket_intelligent_tiering_configuration.html.markdown +++ b/website/docs/r/s3_bucket_intelligent_tiering_configuration.html.markdown @@ -10,6 +10,8 @@ description: |- Provides an [S3 Intelligent-Tiering](https://docs.aws.amazon.com/AmazonS3/latest/userguide/intelligent-tiering.html) configuration resource. +-> This resource cannot be used with S3 directory buckets. + ## Example Usage ### Add intelligent tiering configuration for entire S3 bucket diff --git a/website/docs/r/s3_bucket_inventory.html.markdown b/website/docs/r/s3_bucket_inventory.html.markdown index 219c12e7ea3..509666ccea6 100644 --- a/website/docs/r/s3_bucket_inventory.html.markdown +++ b/website/docs/r/s3_bucket_inventory.html.markdown @@ -10,6 +10,8 @@ description: |- Provides a S3 bucket [inventory configuration](https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) resource. +-> This resource cannot be used with S3 directory buckets. + ## Example Usage ### Add inventory configuration diff --git a/website/docs/r/s3_bucket_lifecycle_configuration.html.markdown b/website/docs/r/s3_bucket_lifecycle_configuration.html.markdown index 2ef75b3fffc..5474765ce68 100644 --- a/website/docs/r/s3_bucket_lifecycle_configuration.html.markdown +++ b/website/docs/r/s3_bucket_lifecycle_configuration.html.markdown @@ -24,6 +24,8 @@ For more information see the Amazon S3 User Guide on [`Lifecycle Configuration E Running Terraform operations shortly after creating a lifecycle configuration may result in changes that affect configuration idempotence. See the Amazon S3 User Guide on [setting lifecycle configuration on a bucket](https://docs.aws.amazon.com/AmazonS3/latest/userguide/how-to-set-lifecycle-configuration-intro.html). +-> This resource cannot be used with S3 directory buckets. + ## Example Usage ### With neither a filter nor prefix specified diff --git a/website/docs/r/s3_bucket_logging.html.markdown b/website/docs/r/s3_bucket_logging.html.markdown index 69b9a42af11..9a584207786 100644 --- a/website/docs/r/s3_bucket_logging.html.markdown +++ b/website/docs/r/s3_bucket_logging.html.markdown @@ -14,6 +14,8 @@ in the AWS S3 User Guide. ~> **Note:** Amazon S3 supports server access logging, AWS CloudTrail, or a combination of both. Refer to the [Logging options for Amazon S3](https://docs.aws.amazon.com/AmazonS3/latest/userguide/logging-with-S3.html) to decide which method meets your requirements. +-> This resource cannot be used with S3 directory buckets. + ## Example Usage ```terraform diff --git a/website/docs/r/s3_bucket_metric.html.markdown b/website/docs/r/s3_bucket_metric.html.markdown index 5775d8c1694..76fc89a017c 100644 --- a/website/docs/r/s3_bucket_metric.html.markdown +++ b/website/docs/r/s3_bucket_metric.html.markdown @@ -10,6 +10,8 @@ description: |- Provides a S3 bucket [metrics configuration](http://docs.aws.amazon.com/AmazonS3/latest/dev/metrics-configurations.html) resource. +-> This resource cannot be used with S3 directory buckets. + ## Example Usage ### Add metrics configuration for entire S3 bucket diff --git a/website/docs/r/s3_bucket_notification.html.markdown b/website/docs/r/s3_bucket_notification.html.markdown index c4bb1b596c0..775cb54eff1 100644 --- a/website/docs/r/s3_bucket_notification.html.markdown +++ b/website/docs/r/s3_bucket_notification.html.markdown @@ -12,6 +12,8 @@ Manages a S3 Bucket Notification Configuration. For additional information, see ~> **NOTE:** S3 Buckets only support a single notification configuration. Declaring multiple `aws_s3_bucket_notification` resources to the same S3 Bucket will cause a perpetual difference in configuration. See the example "Trigger multiple Lambda functions" for an option. +-> This resource cannot be used with S3 directory buckets. + ## Example Usage ### Add notification configuration to SNS Topic diff --git a/website/docs/r/s3_bucket_object_lock_configuration.html.markdown b/website/docs/r/s3_bucket_object_lock_configuration.html.markdown index 5d0ef168df6..c956b9c37dd 100644 --- a/website/docs/r/s3_bucket_object_lock_configuration.html.markdown +++ b/website/docs/r/s3_bucket_object_lock_configuration.html.markdown @@ -14,6 +14,8 @@ Provides an S3 bucket Object Lock configuration resource. For more information a Thus, to **enable** Object Lock for a **new** bucket, see the [Using object lock configuration](s3_bucket.html.markdown#using-object-lock-configuration) section in the `aws_s3_bucket` resource or the [Object Lock configuration for a new bucket](#object-lock-configuration-for-a-new-bucket) example below. If you want to **enable** Object Lock for an **existing** bucket, contact AWS Support and see the [Object Lock configuration for an existing bucket](#object-lock-configuration-for-an-existing-bucket) example below. +-> This resource cannot be used with S3 directory buckets. + ## Example Usage ### Object Lock configuration for a new bucket diff --git a/website/docs/r/s3_bucket_ownership_controls.html.markdown b/website/docs/r/s3_bucket_ownership_controls.html.markdown index 042e8e50836..fb0c296cdb0 100644 --- a/website/docs/r/s3_bucket_ownership_controls.html.markdown +++ b/website/docs/r/s3_bucket_ownership_controls.html.markdown @@ -10,6 +10,8 @@ description: |- Provides a resource to manage S3 Bucket Ownership Controls. For more information, see the [S3 Developer Guide](https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html). +-> This resource cannot be used with S3 directory buckets. + ## Example Usage ```terraform diff --git a/website/docs/r/s3_bucket_policy.html.markdown b/website/docs/r/s3_bucket_policy.html.markdown index e9fcffd7d0a..f7c6de751b1 100644 --- a/website/docs/r/s3_bucket_policy.html.markdown +++ b/website/docs/r/s3_bucket_policy.html.markdown @@ -10,6 +10,8 @@ description: |- Attaches a policy to an S3 bucket resource. +-> Policies can be attached to both S3 general purpose buckets and S3 directory buckets. + ## Example Usage ### Basic Usage diff --git a/website/docs/r/s3_bucket_public_access_block.html.markdown b/website/docs/r/s3_bucket_public_access_block.html.markdown index e8a4087fbdf..3c4adf3e0b3 100644 --- a/website/docs/r/s3_bucket_public_access_block.html.markdown +++ b/website/docs/r/s3_bucket_public_access_block.html.markdown @@ -10,6 +10,8 @@ description: |- Manages S3 bucket-level Public Access Block configuration. For more information about these settings, see the [AWS S3 Block Public Access documentation](https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html). +-> This resource cannot be used with S3 directory buckets. + ## Example Usage ```terraform diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown index de41dc3ef42..a80852137aa 100644 --- a/website/docs/r/s3_bucket_replication_configuration.html.markdown +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -12,6 +12,8 @@ Provides an independent configuration resource for S3 bucket [replication config ~> **NOTE:** S3 Buckets only support a single replication configuration. Declaring multiple `aws_s3_bucket_replication_configuration` resources to the same S3 Bucket will cause a perpetual difference in configuration. +-> This resource cannot be used with S3 directory buckets. + ## Example Usage ### Using replication configuration diff --git a/website/docs/r/s3_bucket_request_payment_configuration.html.markdown b/website/docs/r/s3_bucket_request_payment_configuration.html.markdown index d49b95ef4f2..3840cd89fa3 100644 --- a/website/docs/r/s3_bucket_request_payment_configuration.html.markdown +++ b/website/docs/r/s3_bucket_request_payment_configuration.html.markdown @@ -12,6 +12,8 @@ Provides an S3 bucket request payment configuration resource. For more informati ~> **NOTE:** Destroying an `aws_s3_bucket_request_payment_configuration` resource resets the bucket's `payer` to the S3 default: the bucket owner. +-> This resource cannot be used with S3 directory buckets. + ## Example Usage ```terraform diff --git a/website/docs/r/s3_bucket_server_side_encryption_configuration.html.markdown b/website/docs/r/s3_bucket_server_side_encryption_configuration.html.markdown index c42192abc19..5cbb5c45f8a 100644 --- a/website/docs/r/s3_bucket_server_side_encryption_configuration.html.markdown +++ b/website/docs/r/s3_bucket_server_side_encryption_configuration.html.markdown @@ -12,6 +12,8 @@ Provides a S3 bucket server-side encryption configuration resource. ~> **NOTE:** Destroying an `aws_s3_bucket_server_side_encryption_configuration` resource resets the bucket to [Amazon S3 bucket default encryption](https://docs.aws.amazon.com/AmazonS3/latest/userguide/default-encryption-faq.html). +-> This resource cannot be used with S3 directory buckets. + ## Example Usage ```terraform diff --git a/website/docs/r/s3_bucket_versioning.html.markdown b/website/docs/r/s3_bucket_versioning.html.markdown index dff0f5a199a..a2190d0167f 100644 --- a/website/docs/r/s3_bucket_versioning.html.markdown +++ b/website/docs/r/s3_bucket_versioning.html.markdown @@ -16,6 +16,8 @@ For more information, see [How S3 versioning works](https://docs.aws.amazon.com/ ~> **NOTE:** If you are enabling versioning on the bucket for the first time, AWS recommends that you wait for 15 minutes after enabling versioning before issuing write operations (PUT or DELETE) on objects in the bucket. +-> This resource cannot be used with S3 directory buckets. + ## Example Usage ### With Versioning Enabled diff --git a/website/docs/r/s3_bucket_website_configuration.html.markdown b/website/docs/r/s3_bucket_website_configuration.html.markdown index 31580d55077..712a6b05ea4 100644 --- a/website/docs/r/s3_bucket_website_configuration.html.markdown +++ b/website/docs/r/s3_bucket_website_configuration.html.markdown @@ -10,6 +10,8 @@ description: |- Provides an S3 bucket website configuration resource. For more information, see [Hosting Websites on S3](https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). +-> This resource cannot be used with S3 directory buckets. + ## Example Usage ### With `routing_rule` configured diff --git a/website/docs/r/s3_directory_bucket.html.markdown b/website/docs/r/s3_directory_bucket.html.markdown new file mode 100644 index 00000000000..72268552922 --- /dev/null +++ b/website/docs/r/s3_directory_bucket.html.markdown @@ -0,0 +1,64 @@ +--- +subcategory: "S3 (Simple Storage)" +layout: "aws" +page_title: "AWS: aws_s3_directory_bucket" +description: |- + Provides an Amazon S3 Express directory bucket resource. +--- + +# Resource: aws_s3_directory_bucket + +Provides an Amazon S3 Express directory bucket resource. + +## Example Usage + +```terraform +resource "aws_s3_directory_bucket" "example" { + bucket = "example--usw2-az1--x-s3" + + location { + name = "usw2-az1" + } +} +``` + +## Argument Reference + +This resource supports the following arguments: + +* `bucket` - (Required) Name of the bucket. The name must be in the format `[bucket_name]--[azid]--x-s3`. Use the [`aws_s3_bucket`](s3_bucket.html) resource to manage general purpose buckets. +* `data_redundancy` - (Optional, Default:`SingleAvailabilityZone`) Data redundancy. Valid values: `SingleAvailabilityZone`. +* `force_destroy` - (Optional, Default:`false`) Boolean that indicates all objects should be deleted from the bucket *when the bucket is destroyed* so that the bucket can be destroyed without error. These objects are *not* recoverable. This only deletes objects when the bucket is destroyed, *not* when setting this parameter to `true`. Once this parameter is set to `true`, there must be a successful `terraform apply` run before a destroy is required to update this value in the resource state. Without a successful `terraform apply` after this parameter is set, this flag will have no effect. If setting this field in the same operation that would require replacing the bucket or destroying the bucket, this flag will not work. Additionally when importing a bucket, a successful `terraform apply` is required to set this value in state before it will take effect on a destroy operation. +* `location` - (Required) Bucket location. See [Location](#location) below for more details. +* `type` - (Optional, Default:`Directory`) Bucket type. Valid values: `Directory`. + +### Location + +The `location` block supports the following: + +* `name` - (Required) [Availability Zone ID](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#az-ids). +* `type` - (Optional, Default:`AvailabilityZone`) Location type. Valid values: `AvailabilityZone`. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `id` - Name of the bucket. +* `arn` - ARN of the bucket. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import an Amazon S3 Express directory bucket using `bucket`. For example: + +```terraform +import { + to = aws_s3_directory_bucket.example + id = "example--usw2-az1--x-s3" +} +``` + +Using `terraform import`, import S3 bucket using `bucket`. For example: + +```console +% terraform import aws_s3_directory_bucket.example example--usw2-az1--x-s3 +``` diff --git a/website/docs/r/s3_object.html.markdown b/website/docs/r/s3_object.html.markdown index 58b8d6b4979..2ebd5460916 100644 --- a/website/docs/r/s3_object.html.markdown +++ b/website/docs/r/s3_object.html.markdown @@ -134,6 +134,8 @@ resource "aws_s3_object" "examplebucket_object" { S3 objects support a [maximum of 10 tags](https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-tagging.html). If the resource's own `tags` and the provider-level `default_tags` would together lead to more than 10 tags on an S3 object, use the `override_provider` configuration block to suppress any provider-level `default_tags`. +-> S3 objects stored in Amazon S3 Express directory buckets do not support tags, so any provider-level `default_tags` must be ignored. + ```terraform resource "aws_s3_bucket" "examplebucket" { bucket = "examplebuckettftest" diff --git a/website/docs/r/s3control_multi_region_access_point.html.markdown b/website/docs/r/s3control_multi_region_access_point.html.markdown index 3d0563b4c6c..716067a2599 100644 --- a/website/docs/r/s3control_multi_region_access_point.html.markdown +++ b/website/docs/r/s3control_multi_region_access_point.html.markdown @@ -10,6 +10,8 @@ description: |- Provides a resource to manage an S3 Multi-Region Access Point associated with specified buckets. +-> This resource cannot be used with S3 directory buckets. + ## Example Usage ### Multiple AWS Buckets in Different Regions