diff --git a/.changelog/33358.txt b/.changelog/33358.txt new file mode 100644 index 00000000000..1aad7087d09 --- /dev/null +++ b/.changelog/33358.txt @@ -0,0 +1,15 @@ +```release-note:enhancement +resource/aws_s3_object: Add `checksum_algorithm` argument and `checksum_crc32`, `checksum_crc32c`, `checksum_sha1` and `checksum_sha256` attributes +``` + +```release-note:enhancement +resource/aws_s3_object_copy: Add `checksum_algorithm` argument and `checksum_crc32`, `checksum_crc32c`, `checksum_sha1` and `checksum_sha256` attributes +``` + +```release-note:enhancement +data-source/aws_s3_object: Add `checksum_mode` argument and `checksum_crc32`, `checksum_crc32c`, `checksum_sha1` and `checksum_sha256` attributes +``` + +```release-note:note +data-source/aws_s3_object: Migration to [AWS SDK for Go v2](https://aws.github.io/aws-sdk-go-v2/) means that the edge case of specifying a single `/` as the value for `key` is no longer supported +``` \ No newline at end of file diff --git a/go.mod b/go.mod index 50eec275841..dc2c1385351 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,9 @@ require ( github.com/YakDriver/regexache v0.23.0 github.com/aws/aws-sdk-go v1.45.7 github.com/aws/aws-sdk-go-v2 v1.21.0 + github.com/aws/aws-sdk-go-v2/config v1.18.39 github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.11 + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.83 github.com/aws/aws-sdk-go-v2/service/accessanalyzer v1.20.5 github.com/aws/aws-sdk-go-v2/service/account v1.11.5 github.com/aws/aws-sdk-go-v2/service/acm v1.18.5 @@ -113,11 +115,10 @@ require ( github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect github.com/armon/go-radix v1.0.0 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.13 // indirect - github.com/aws/aws-sdk-go-v2/config v1.18.33 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.13.32 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.13.37 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.41 // indirect github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.35 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.3.39 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.3.42 // indirect github.com/aws/aws-sdk-go-v2/internal/v4a v1.1.4 // indirect github.com/aws/aws-sdk-go-v2/service/iam v1.22.5 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.14 // indirect @@ -125,8 +126,8 @@ require ( github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.7.35 // indirect github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.35 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.15.4 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.13.5 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.15.5 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.13.6 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.15.6 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.21.5 // indirect github.com/aws/smithy-go v1.14.2 // indirect github.com/bgentry/speakeasy v0.1.0 // indirect diff --git a/go.sum b/go.sum index f08c149900d..aa10863654d 100644 --- a/go.sum +++ b/go.sum @@ -24,26 +24,24 @@ github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aws/aws-sdk-go v1.45.7 h1:k4QsvWZhm8409TYeRuTV1P6+j3lLKoe+giFA/j3VAps= github.com/aws/aws-sdk-go v1.45.7/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= -github.com/aws/aws-sdk-go-v2 v1.20.1/go.mod h1:NU06lETsFm8fUC6ZjhgDpVBcGZTFQ6XM+LZWZxMI4ac= github.com/aws/aws-sdk-go-v2 v1.21.0 h1:gMT0IW+03wtYJhRqTVYn0wLzwdnK9sRMcxmtfGzRdJc= github.com/aws/aws-sdk-go-v2 v1.21.0/go.mod h1:/RfNgGmRxI+iFOB1OeJUyxiU+9s88k3pfHvDagGEp0M= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.13 h1:OPLEkmhXf6xFPiz0bLeDArZIDx1NNS4oJyG4nv3Gct0= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.13/go.mod h1:gpAbvyDGQFozTEmlTFO8XcQKHzubdq0LzRyJpG6MiXM= -github.com/aws/aws-sdk-go-v2/config v1.18.33 h1:JKcw5SFxFW/rpM4mOPjv0VQ11E2kxW13F3exWOy7VZU= -github.com/aws/aws-sdk-go-v2/config v1.18.33/go.mod h1:hXO/l9pgY3K5oZJldamP0pbZHdPqqk+4/maa7DSD3cA= -github.com/aws/aws-sdk-go-v2/credentials v1.13.32 h1:lIH1eKPcCY1ylR4B6PkBGRWMHO3aVenOKJHWiS4/G2w= -github.com/aws/aws-sdk-go-v2/credentials v1.13.32/go.mod h1:lL8U3v/Y79YRG69WlAho0OHIKUXCyFvSXaIvfo81sls= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.8/go.mod h1:ce7BgLQfYr5hQFdy67oX2svto3ufGtm6oBvmsHScI1Q= +github.com/aws/aws-sdk-go-v2/config v1.18.39 h1:oPVyh6fuu/u4OiW4qcuQyEtk7U7uuNBmHmJSLg1AJsQ= +github.com/aws/aws-sdk-go-v2/config v1.18.39/go.mod h1:+NH/ZigdPckFpgB1TRcRuWCB/Kbbvkxc/iNAKTq5RhE= +github.com/aws/aws-sdk-go-v2/credentials v1.13.37 h1:BvEdm09+ZEh2XtN+PVHPcYwKY3wIeB6pw7vPRM4M9/U= +github.com/aws/aws-sdk-go-v2/credentials v1.13.37/go.mod h1:ACLrdkd4CLZyXOghZ8IYumQbcooAcp2jo/s2xsFH8IM= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.11 h1:uDZJF1hu0EVT/4bogChk8DyjSF6fof6uL/0Y26Ma7Fg= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.11/go.mod h1:TEPP4tENqBGO99KwVpV9MlOX4NSrSLP8u3KRy2CDwA8= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.38/go.mod h1:qggunOChCMu9ZF/UkAfhTz25+U2rLVb3ya0Ua6TTfCA= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.83 h1:wcluDLIQ0uYaxv0fCWQRimbXkPdTgWHUD21j1CzXEwc= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.83/go.mod h1:nGCBuon134gW67yAtxHKV73x+tAcY/xG4ZPNPDB1h/I= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.41 h1:22dGT7PneFMx4+b3pz7lMTRyN8ZKH7M2cW4GP9yUS2g= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.41/go.mod h1:CrObHAuPneJBlfEJ5T3szXOUkLEThaGfvnhTf33buas= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.32/go.mod h1:0ZXSqrty4FtQ7p8TEuRde/SZm9X05KT18LAUlR40Ln0= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.35 h1:SijA0mgjV8E+8G45ltVHs0fvKpTj8xmZJ3VwhGKtUSI= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.35/go.mod h1:SJC1nEVVva1g3pHAIdCp7QsRIkMmLAgoDquQ9Rr8kYw= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.39 h1:fc0ukRAiP1syoSGZYu+DaE+FulSYhTiJ8WpVu5jElU4= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.39/go.mod h1:WLAW8PT7+JhjZfLSWe7WEJaJu0GNo0cKc2Zyo003RBs= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.42 h1:GPUcE/Yq7Ur8YSUk6lVkoIMWnJNO0HT18GUzCWCgCI0= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.42/go.mod h1:rzfdUlfA+jdgLDmPKjd3Chq9V7LVLYo1Nz++Wb91aRo= github.com/aws/aws-sdk-go-v2/internal/v4a v1.1.4 h1:6lJvvkQ9HmbHZ4h/IEwclwv2mrTW8Uq1SOB/kXy0mfw= github.com/aws/aws-sdk-go-v2/internal/v4a v1.1.4/go.mod h1:1PrKYwxTM+zjpw9Y41KFtoJCQrJ34Z47Y4VgVbfndjo= github.com/aws/aws-sdk-go-v2/service/accessanalyzer v1.20.5 h1:1w0ELQMC3AptxEFS4A+vJuhyIuC9IoNN2YxNKK5pSYQ= @@ -100,7 +98,6 @@ github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.36 h1:eev2yZX7esGRjq github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.36/go.mod h1:lGnOkH9NJATw0XEPcAknFBj3zzNTEGRHtSw+CwC1YTg= github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.7.35 h1:UKjpIDLVF90RfV88XurdduMoTxPqtGHZMIDYZQM7RO4= github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.7.35/go.mod h1:B3dUg0V6eJesUTi+m27NUkj7n8hdDKYUpxj8f4+TqaQ= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.32/go.mod h1:4jwAWKEkCR0anWk5+1RbfSg1R5Gzld7NLiuaq5bTR/Y= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.35 h1:CdzPW9kKitgIiLV1+MHobfR5Xg25iYnyzWZhyQuSlDI= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.35/go.mod h1:QGF2Rs33W5MaN9gYdEQOBBFPLwTZkEhRwI33f7KIG0o= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.15.4 h1:v0jkRigbSD6uOdwcaUQmgEwG1BkPfAPDqaeNt/29ghg= @@ -165,13 +162,10 @@ github.com/aws/aws-sdk-go-v2/service/ssmcontacts v1.16.5 h1:kt2JpBjKnG2GfiHJU0es github.com/aws/aws-sdk-go-v2/service/ssmcontacts v1.16.5/go.mod h1:g6xJdpynIx7D1UW9te8ul36qWGyuzIL6ATrJF6E6ygI= github.com/aws/aws-sdk-go-v2/service/ssmincidents v1.22.5 h1:1PesErC0GN25MaKtBju52HlJOXtLeFoAsOxAgHhEoCk= github.com/aws/aws-sdk-go-v2/service/ssmincidents v1.22.5/go.mod h1:11Z2L2mDhJbRZo5rwRs1NPz1Vi37U5N1EiaazEoBGag= -github.com/aws/aws-sdk-go-v2/service/sso v1.13.2/go.mod h1:ju+nNXUunfIFamXUIZQiICjnO/TPlOmWcYhZcSy7xaE= -github.com/aws/aws-sdk-go-v2/service/sso v1.13.5 h1:oCvTFSDi67AX0pOX3PuPdGFewvLRU2zzFSrTsgURNo0= -github.com/aws/aws-sdk-go-v2/service/sso v1.13.5/go.mod h1:fIAwKQKBFu90pBxx07BFOMJLpRUGu8VOzLJakeY+0K4= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.15.2/go.mod h1:ubDBBaDFs1GHijSOTi8ljppML15GLG0HxhILtbjNNYQ= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.15.5 h1:dnInJb4S0oy8aQuri1mV6ipLlnZPfnsDNB9BGO9PDNY= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.15.5/go.mod h1:yygr8ACQRY2PrEcy3xsUI357stq2AxnFM6DIsR9lij4= -github.com/aws/aws-sdk-go-v2/service/sts v1.21.2/go.mod h1:FQ/DQcOfESELfJi5ED+IPPAjI5xC6nxtSolVVB773jM= +github.com/aws/aws-sdk-go-v2/service/sso v1.13.6 h1:2PylFCfKCEDv6PeSN09pC/VUiRd10wi1VfHG5FrW0/g= +github.com/aws/aws-sdk-go-v2/service/sso v1.13.6/go.mod h1:fIAwKQKBFu90pBxx07BFOMJLpRUGu8VOzLJakeY+0K4= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.15.6 h1:pSB560BbVj9ZlJZF4WYj5zsytWHWKxg+NgyGV4B2L58= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.15.6/go.mod h1:yygr8ACQRY2PrEcy3xsUI357stq2AxnFM6DIsR9lij4= github.com/aws/aws-sdk-go-v2/service/sts v1.21.5 h1:CQBFElb0LS8RojMJlxRSo/HXipvTZW2S44Lt9Mk2aYQ= github.com/aws/aws-sdk-go-v2/service/sts v1.21.5/go.mod h1:VC7JDqsqiwXukYEDjoHh9U0fOJtNWh04FPQz4ct4GGU= github.com/aws/aws-sdk-go-v2/service/swf v1.17.3 h1:E2i7UVmrS7D+RqvOHdv/6pag549LNrR+W8x8z+fwFWo= @@ -188,7 +182,6 @@ github.com/aws/aws-sdk-go-v2/service/workspaces v1.30.0 h1:1GTubIlhB61KhkJAyCU6W github.com/aws/aws-sdk-go-v2/service/workspaces v1.30.0/go.mod h1:AVjfc8q87mKUZgiW4NjqJgG1OzcFIO6OHyfkOQSrPSY= github.com/aws/aws-sdk-go-v2/service/xray v1.17.5 h1:fJ7KMcuZXBfmK0A8ZfMZIKle0/WuiZwOl+JDpR+LV4I= github.com/aws/aws-sdk-go-v2/service/xray v1.17.5/go.mod h1:aE2t25bCn8YrfL6faz73m5Q/7gKa25HjCoa+z6OQMG4= -github.com/aws/smithy-go v1.14.1/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= github.com/aws/smithy-go v1.14.2 h1:MJU9hqBGbvWZdApzpvoF2WAIJDbtjK2NDJSiJP7HblQ= github.com/aws/smithy-go v1.14.2/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= github.com/beevik/etree v1.2.0 h1:l7WETslUG/T+xOPs47dtd6jov2Ii/8/OjCldk5fYfQw= diff --git a/internal/conns/awsclient.go b/internal/conns/awsclient.go index ef6d43bf95f..15bf9794b2f 100644 --- a/internal/conns/awsclient.go +++ b/internal/conns/awsclient.go @@ -58,6 +58,15 @@ func (client *AWSClient) RegionalHostname(prefix string) string { return fmt.Sprintf("%s.%s.%s", prefix, client.Region, client.DNSSuffix) } +// S3UsePathStyle returns the s3_force_path_style provider configuration value. +func (client *AWSClient) S3UsePathStyle() bool { + return client.s3UsePathStyle +} + +// **************** +// TODO: REVIEW +// TODO: AWS SDK for Go v2 does NO URL cleaning. +// **************** func (client *AWSClient) S3ConnURICleaningDisabled(ctx context.Context) *s3_sdkv1.S3 { config := client.S3Conn(ctx).Config config.DisableRestProtocolURICleaning = aws_sdkv1.Bool(true) diff --git a/internal/service/s3/bucket_object.go b/internal/service/s3/bucket_object.go index 4c5f6cc7407..bc696e299e6 100644 --- a/internal/service/s3/bucket_object.go +++ b/internal/service/s3/bucket_object.go @@ -206,6 +206,9 @@ func resourceBucketObjectCreate(ctx context.Context, d *schema.ResourceData, met } func resourceBucketObjectRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + const ( + objectCreationTimeout = 2 * time.Minute + ) var diags diag.Diagnostics conn := meta.(*conns.AWSClient).S3Conn(ctx) @@ -349,7 +352,7 @@ func resourceBucketObjectUpdate(ctx context.Context, d *schema.ResourceData, met if d.HasChange("tags_all") { o, n := d.GetChange("tags_all") - if err := ObjectUpdateTags(ctx, conn, bucket, key, o, n); err != nil { + if err := ObjectUpdateTagsV1(ctx, conn, bucket, key, o, n); err != nil { return sdkdiag.AppendErrorf(diags, "updating S3 Bucket (%s) Object (%s) tags: %s", bucket, key, err) } } @@ -359,7 +362,7 @@ func resourceBucketObjectUpdate(ctx context.Context, d *schema.ResourceData, met func resourceBucketObjectDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).S3Conn(ctx) + conn := meta.(*conns.AWSClient).S3Client(ctx) bucket := d.Get("bucket").(string) key := d.Get("key").(string) @@ -370,7 +373,7 @@ func resourceBucketObjectDelete(ctx context.Context, d *schema.ResourceData, met var err error if _, ok := d.GetOk("version_id"); ok { - _, err = DeleteAllObjectVersions(ctx, conn, bucket, key, d.Get("force_destroy").(bool), false) + _, err = deleteAllObjectVersions(ctx, conn, bucket, key, d.Get("force_destroy").(bool), false) } else { err = deleteObjectVersion(ctx, conn, bucket, key, "", false) } diff --git a/internal/service/s3/bucket_object_data_source_test.go b/internal/service/s3/bucket_object_data_source_test.go index b12e172c472..d7ffe9399b3 100644 --- a/internal/service/s3/bucket_object_data_source_test.go +++ b/internal/service/s3/bucket_object_data_source_test.go @@ -23,9 +23,6 @@ func TestAccS3BucketObjectDataSource_basic(t *testing.T) { ctx := acctest.Context(t) rInt := sdkacctest.RandInt() - var rObj s3.GetObjectOutput - var dsObj s3.GetObjectOutput - resourceName := "aws_s3_object.object" dataSourceName := "data.aws_s3_object.obj" @@ -38,8 +35,6 @@ func TestAccS3BucketObjectDataSource_basic(t *testing.T) { { Config: testAccBucketObjectDataSourceConfig_basic(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckObjectExists(ctx, resourceName, &rObj), - testAccCheckObjectExistsDataSource(ctx, dataSourceName, &dsObj), resource.TestCheckResourceAttr(dataSourceName, "content_length", "11"), resource.TestCheckResourceAttrPair(dataSourceName, "content_type", resourceName, "content_type"), resource.TestCheckResourceAttrPair(dataSourceName, "etag", resourceName, "etag"), @@ -56,7 +51,6 @@ func TestAccS3BucketObjectDataSource_basic(t *testing.T) { func TestAccS3BucketObjectDataSource_basicViaAccessPoint(t *testing.T) { ctx := acctest.Context(t) - var dsObj, rObj s3.GetObjectOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) dataSourceName := "data.aws_s3_object.test" @@ -71,9 +65,6 @@ func TestAccS3BucketObjectDataSource_basicViaAccessPoint(t *testing.T) { { Config: testAccBucketObjectDataSourceConfig_basicViaAccessPoint(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckObjectExists(ctx, resourceName, &rObj), - testAccCheckObjectExistsDataSource(ctx, dataSourceName, &dsObj), - testAccCheckObjectExists(ctx, resourceName, &rObj), resource.TestCheckResourceAttrPair(dataSourceName, "bucket", accessPointResourceName, "arn"), resource.TestCheckResourceAttrPair(dataSourceName, "key", resourceName, "key"), ), @@ -86,9 +77,6 @@ func TestAccS3BucketObjectDataSource_readableBody(t *testing.T) { ctx := acctest.Context(t) rInt := sdkacctest.RandInt() - var rObj s3.GetObjectOutput - var dsObj s3.GetObjectOutput - resourceName := "aws_s3_object.object" dataSourceName := "data.aws_s3_object.obj" @@ -101,8 +89,6 @@ func TestAccS3BucketObjectDataSource_readableBody(t *testing.T) { { Config: testAccBucketObjectDataSourceConfig_readableBody(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckObjectExists(ctx, resourceName, &rObj), - testAccCheckObjectExistsDataSource(ctx, dataSourceName, &dsObj), resource.TestCheckResourceAttr(dataSourceName, "content_length", "3"), resource.TestCheckResourceAttrPair(dataSourceName, "content_type", resourceName, "content_type"), resource.TestCheckResourceAttrPair(dataSourceName, "etag", resourceName, "etag"), @@ -121,9 +107,6 @@ func TestAccS3BucketObjectDataSource_kmsEncrypted(t *testing.T) { ctx := acctest.Context(t) rInt := sdkacctest.RandInt() - var rObj s3.GetObjectOutput - var dsObj s3.GetObjectOutput - resourceName := "aws_s3_object.object" dataSourceName := "data.aws_s3_object.obj" @@ -136,8 +119,6 @@ func TestAccS3BucketObjectDataSource_kmsEncrypted(t *testing.T) { { Config: testAccBucketObjectDataSourceConfig_kmsEncrypted(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckObjectExists(ctx, resourceName, &rObj), - testAccCheckObjectExistsDataSource(ctx, dataSourceName, &dsObj), resource.TestCheckResourceAttr(dataSourceName, "content_length", "22"), resource.TestCheckResourceAttrPair(dataSourceName, "content_type", resourceName, "content_type"), resource.TestCheckResourceAttrPair(dataSourceName, "etag", resourceName, "etag"), @@ -158,9 +139,6 @@ func TestAccS3BucketObjectDataSource_bucketKeyEnabled(t *testing.T) { ctx := acctest.Context(t) rInt := sdkacctest.RandInt() - var rObj s3.GetObjectOutput - var dsObj s3.GetObjectOutput - resourceName := "aws_s3_object.object" dataSourceName := "data.aws_s3_object.obj" @@ -173,8 +151,6 @@ func TestAccS3BucketObjectDataSource_bucketKeyEnabled(t *testing.T) { { Config: testAccBucketObjectDataSourceConfig_keyEnabled(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckObjectExists(ctx, resourceName, &rObj), - testAccCheckObjectExistsDataSource(ctx, dataSourceName, &dsObj), resource.TestCheckResourceAttr(dataSourceName, "content_length", "22"), resource.TestCheckResourceAttrPair(dataSourceName, "content_type", resourceName, "content_type"), resource.TestCheckResourceAttrPair(dataSourceName, "etag", resourceName, "etag"), @@ -196,9 +172,6 @@ func TestAccS3BucketObjectDataSource_allParams(t *testing.T) { ctx := acctest.Context(t) rInt := sdkacctest.RandInt() - var rObj s3.GetObjectOutput - var dsObj s3.GetObjectOutput - resourceName := "aws_s3_object.object" dataSourceName := "data.aws_s3_object.obj" @@ -211,8 +184,6 @@ func TestAccS3BucketObjectDataSource_allParams(t *testing.T) { { Config: testAccBucketObjectDataSourceConfig_allParams(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckObjectExists(ctx, resourceName, &rObj), - testAccCheckObjectExistsDataSource(ctx, dataSourceName, &dsObj), resource.TestCheckResourceAttr(dataSourceName, "content_length", "25"), resource.TestCheckResourceAttrPair(dataSourceName, "content_type", resourceName, "content_type"), resource.TestCheckResourceAttrPair(dataSourceName, "etag", resourceName, "etag"), @@ -248,9 +219,6 @@ func TestAccS3BucketObjectDataSource_objectLockLegalHoldOff(t *testing.T) { ctx := acctest.Context(t) rInt := sdkacctest.RandInt() - var rObj s3.GetObjectOutput - var dsObj s3.GetObjectOutput - resourceName := "aws_s3_object.object" dataSourceName := "data.aws_s3_object.obj" @@ -263,8 +231,6 @@ func TestAccS3BucketObjectDataSource_objectLockLegalHoldOff(t *testing.T) { { Config: testAccBucketObjectDataSourceConfig_lockLegalHoldOff(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckObjectExists(ctx, resourceName, &rObj), - testAccCheckObjectExistsDataSource(ctx, dataSourceName, &dsObj), resource.TestCheckResourceAttr(dataSourceName, "content_length", "11"), resource.TestCheckResourceAttrPair(dataSourceName, "content_type", resourceName, "content_type"), resource.TestCheckResourceAttrPair(dataSourceName, "etag", resourceName, "etag"), @@ -284,9 +250,6 @@ func TestAccS3BucketObjectDataSource_objectLockLegalHoldOn(t *testing.T) { rInt := sdkacctest.RandInt() retainUntilDate := time.Now().UTC().AddDate(0, 0, 10).Format(time.RFC3339) - var rObj s3.GetObjectOutput - var dsObj s3.GetObjectOutput - resourceName := "aws_s3_object.object" dataSourceName := "data.aws_s3_object.obj" @@ -299,8 +262,6 @@ func TestAccS3BucketObjectDataSource_objectLockLegalHoldOn(t *testing.T) { { Config: testAccBucketObjectDataSourceConfig_lockLegalHoldOn(rInt, retainUntilDate), Check: resource.ComposeTestCheckFunc( - testAccCheckObjectExists(ctx, resourceName, &rObj), - testAccCheckObjectExistsDataSource(ctx, dataSourceName, &dsObj), resource.TestCheckResourceAttr(dataSourceName, "content_length", "11"), resource.TestCheckResourceAttrPair(dataSourceName, "content_type", resourceName, "content_type"), resource.TestCheckResourceAttrPair(dataSourceName, "etag", resourceName, "etag"), @@ -317,8 +278,6 @@ func TestAccS3BucketObjectDataSource_objectLockLegalHoldOn(t *testing.T) { func TestAccS3BucketObjectDataSource_leadingSlash(t *testing.T) { ctx := acctest.Context(t) - var rObj s3.GetObjectOutput - var dsObj1, dsObj2, dsObj3 s3.GetObjectOutput resourceName := "aws_s3_object.object" dataSourceName1 := "data.aws_s3_object.obj1" @@ -336,28 +295,22 @@ func TestAccS3BucketObjectDataSource_leadingSlash(t *testing.T) { Steps: []resource.TestStep{ { // nosemgrep:ci.test-config-funcs-correct-form Config: resourceOnlyConf, - Check: resource.ComposeTestCheckFunc( - testAccCheckObjectExists(ctx, resourceName, &rObj), - ), }, { // nosemgrep:ci.test-config-funcs-correct-form Config: conf, Check: resource.ComposeTestCheckFunc( - testAccCheckObjectExistsDataSource(ctx, dataSourceName1, &dsObj1), resource.TestCheckResourceAttr(dataSourceName1, "content_length", "3"), resource.TestCheckResourceAttrPair(dataSourceName1, "content_type", resourceName, "content_type"), resource.TestCheckResourceAttrPair(dataSourceName1, "etag", resourceName, "etag"), resource.TestMatchResourceAttr(dataSourceName1, "last_modified", regexache.MustCompile(rfc1123RegexPattern)), resource.TestCheckResourceAttr(dataSourceName1, "body", "yes"), - testAccCheckObjectExistsDataSource(ctx, dataSourceName2, &dsObj2), resource.TestCheckResourceAttr(dataSourceName2, "content_length", "3"), resource.TestCheckResourceAttrPair(dataSourceName2, "content_type", resourceName, "content_type"), resource.TestCheckResourceAttrPair(dataSourceName2, "etag", resourceName, "etag"), resource.TestMatchResourceAttr(dataSourceName2, "last_modified", regexache.MustCompile(rfc1123RegexPattern)), resource.TestCheckResourceAttr(dataSourceName2, "body", "yes"), - testAccCheckObjectExistsDataSource(ctx, dataSourceName3, &dsObj3), resource.TestCheckResourceAttr(dataSourceName3, "content_length", "3"), resource.TestCheckResourceAttrPair(dataSourceName3, "content_type", resourceName, "content_type"), resource.TestCheckResourceAttrPair(dataSourceName3, "etag", resourceName, "etag"), @@ -371,8 +324,6 @@ func TestAccS3BucketObjectDataSource_leadingSlash(t *testing.T) { func TestAccS3BucketObjectDataSource_multipleSlashes(t *testing.T) { ctx := acctest.Context(t) - var rObj1, rObj2 s3.GetObjectOutput - var dsObj1, dsObj2, dsObj3 s3.GetObjectOutput resourceName1 := "aws_s3_object.object1" resourceName2 := "aws_s3_object.object2" @@ -391,26 +342,18 @@ func TestAccS3BucketObjectDataSource_multipleSlashes(t *testing.T) { Steps: []resource.TestStep{ { // nosemgrep:ci.test-config-funcs-correct-form Config: resourceOnlyConf, - Check: resource.ComposeTestCheckFunc( - testAccCheckObjectExists(ctx, resourceName1, &rObj1), - testAccCheckObjectExists(ctx, resourceName2, &rObj2), - ), }, { // nosemgrep:ci.test-config-funcs-correct-form Config: conf, Check: resource.ComposeTestCheckFunc( - - testAccCheckObjectExistsDataSource(ctx, dataSourceName1, &dsObj1), resource.TestCheckResourceAttr(dataSourceName1, "content_length", "3"), resource.TestCheckResourceAttrPair(dataSourceName1, "content_type", resourceName1, "content_type"), resource.TestCheckResourceAttr(dataSourceName1, "body", "yes"), - testAccCheckObjectExistsDataSource(ctx, dataSourceName2, &dsObj2), resource.TestCheckResourceAttr(dataSourceName2, "content_length", "3"), resource.TestCheckResourceAttrPair(dataSourceName2, "content_type", resourceName1, "content_type"), resource.TestCheckResourceAttr(dataSourceName2, "body", "yes"), - testAccCheckObjectExistsDataSource(ctx, dataSourceName3, &dsObj3), resource.TestCheckResourceAttr(dataSourceName3, "content_length", "2"), resource.TestCheckResourceAttrPair(dataSourceName3, "content_type", resourceName2, "content_type"), resource.TestCheckResourceAttr(dataSourceName3, "body", "no"), @@ -422,8 +365,6 @@ func TestAccS3BucketObjectDataSource_multipleSlashes(t *testing.T) { func TestAccS3BucketObjectDataSource_singleSlashAsKey(t *testing.T) { ctx := acctest.Context(t) - var dsObj s3.GetObjectOutput - dataSourceName := "data.aws_s3_object.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ @@ -434,9 +375,6 @@ func TestAccS3BucketObjectDataSource_singleSlashAsKey(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccBucketObjectDataSourceConfig_singleSlashAsKey(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckObjectExistsDataSource(ctx, dataSourceName, &dsObj), - ), }, }, }) diff --git a/internal/service/s3/bucket_object_test.go b/internal/service/s3/bucket_object_test.go index de8fe87b00f..81ac525f25c 100644 --- a/internal/service/s3/bucket_object_test.go +++ b/internal/service/s3/bucket_object_test.go @@ -1314,7 +1314,7 @@ func TestAccS3BucketObject_ignoreTags(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckBucketObjectExists(ctx, resourceName, &obj), testAccCheckBucketObjectBody(&obj, "stuff"), - testAccCheckBucketObjectUpdateTags(ctx, resourceName, nil, map[string]string{"ignorekey1": "ignorevalue1"}), + testAccCheckBucketObjectUpdateTagsV1(ctx, resourceName, nil, map[string]string{"ignorekey1": "ignorevalue1"}), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), testAccCheckBucketObjectCheckTags(ctx, resourceName, map[string]string{ "ignorekey1": "ignorevalue1", @@ -1568,12 +1568,12 @@ func testAccBucketObjectCreateTempFile(t *testing.T, data string) string { return filename } -func testAccCheckBucketObjectUpdateTags(ctx context.Context, n string, oldTags, newTags map[string]string) resource.TestCheckFunc { +func testAccCheckBucketObjectUpdateTagsV1(ctx context.Context, n string, oldTags, newTags map[string]string) resource.TestCheckFunc { return func(s *terraform.State) error { rs := s.RootModule().Resources[n] conn := acctest.Provider.Meta().(*conns.AWSClient).S3Conn(ctx) - return tfs3.ObjectUpdateTags(ctx, conn, rs.Primary.Attributes["bucket"], rs.Primary.Attributes["key"], oldTags, newTags) + return tfs3.ObjectUpdateTagsV1(ctx, conn, rs.Primary.Attributes["bucket"], rs.Primary.Attributes["key"], oldTags, newTags) } } diff --git a/internal/service/s3/delete_test.go b/internal/service/s3/delete_test.go index 0bbce64ca1b..e8c61d75e56 100644 --- a/internal/service/s3/delete_test.go +++ b/internal/service/s3/delete_test.go @@ -7,8 +7,10 @@ import ( "flag" "testing" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/s3" + config_sdkv2 "github.com/aws/aws-sdk-go-v2/config" + s3_sdkv2 "github.com/aws/aws-sdk-go-v2/service/s3" + session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" + s3_sdkv1 "github.com/aws/aws-sdk-go/service/s3" "github.com/hashicorp/terraform-provider-aws/internal/acctest" tfs3 "github.com/hashicorp/terraform-provider-aws/internal/service/s3" ) @@ -27,8 +29,8 @@ func TestEmptyBucket(t *testing.T) { t.Skip("bucket not specified") } - sess := session.Must(session.NewSession()) - svc := s3.New(sess) + sess := session_sdkv1.Must(session_sdkv1.NewSession()) + svc := s3_sdkv1.New(sess) n, err := tfs3.EmptyBucket(ctx, svc, *bucket, *force) @@ -48,10 +50,13 @@ func TestDeleteAllObjectVersions(t *testing.T) { t.Skip("bucket not specified") } - sess := session.Must(session.NewSession()) - svc := s3.New(sess) + cfg, err := config_sdkv2.LoadDefaultConfig(ctx) + if err != nil { + t.Fatalf("error loading default SDK config: %s", err) + } - n, err := tfs3.DeleteAllObjectVersions(ctx, svc, *bucket, "", *force, false) + client := s3_sdkv2.NewFromConfig(cfg) + n, err := tfs3.DeleteAllObjectVersions(ctx, client, *bucket, "", *force, false) if err != nil { t.Fatalf("error emptying S3 bucket (%s): %s", *bucket, err) diff --git a/internal/service/s3/errors.go b/internal/service/s3/errors.go index 0be44a488b0..e01d3ce5e1b 100644 --- a/internal/service/s3/errors.go +++ b/internal/service/s3/errors.go @@ -13,10 +13,12 @@ const ( errCodeInvalidRequest = "InvalidRequest" errCodeMalformedPolicy = "MalformedPolicy" errCodeMethodNotAllowed = "MethodNotAllowed" + errCodeNoSuchBucket = "NoSuchBucket" ErrCodeNoSuchBucketPolicy = "NoSuchBucketPolicy" errCodeNoSuchConfiguration = "NoSuchConfiguration" ErrCodeNoSuchCORSConfiguration = "NoSuchCORSConfiguration" ErrCodeNoSuchLifecycleConfiguration = "NoSuchLifecycleConfiguration" + errCodeNoSuchKey = "NoSuchKey" ErrCodeNoSuchPublicAccessBlockConfiguration = "NoSuchPublicAccessBlockConfiguration" errCodeNoSuchTagSet = "NoSuchTagSet" errCodeNoSuchTagSetError = "NoSuchTagSetError" diff --git a/internal/service/s3/exports_test.go b/internal/service/s3/exports_test.go new file mode 100644 index 00000000000..c8ecf70dfda --- /dev/null +++ b/internal/service/s3/exports_test.go @@ -0,0 +1,11 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package s3 + +// Exports for use in tests only. +var ( + DeleteAllObjectVersions = deleteAllObjectVersions + FindObjectByBucketAndKey = findObjectByBucketAndKey + SDKv1CompatibleCleanKey = sdkv1CompatibleCleanKey +) diff --git a/internal/service/s3/object.go b/internal/service/s3/object.go index 48b911eb835..da45e66d2d5 100644 --- a/internal/service/s3/object.go +++ b/internal/service/s3/object.go @@ -10,20 +10,24 @@ import ( "fmt" "io" "log" + "net/http" "os" "strings" "time" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/aws/aws-sdk-go/service/s3/s3manager" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/feature/s3/manager" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" "github.com/hashicorp/terraform-provider-aws/internal/service/kms" @@ -34,8 +38,6 @@ import ( "github.com/mitchellh/go-homedir" ) -const objectCreationTimeout = 2 * time.Minute - // @SDKResource("aws_s3_object", name="Object") // @Tags func ResourceObject() *schema.Resource { @@ -56,10 +58,10 @@ func ResourceObject() *schema.Resource { Schema: map[string]*schema.Schema{ "acl": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validation.StringInSlice(s3.ObjectCannedACL_Values(), false), + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateDiagFunc: enum.Validate[types.ObjectCannedACL](), }, "bucket": { Type: schema.TypeString, @@ -76,6 +78,27 @@ func ResourceObject() *schema.Resource { Type: schema.TypeString, Optional: true, }, + "checksum_algorithm": { + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[types.ChecksumAlgorithm](), + }, + "checksum_crc32": { + Type: schema.TypeString, + Computed: true, + }, + "checksum_crc32c": { + Type: schema.TypeString, + Computed: true, + }, + "checksum_sha1": { + Type: schema.TypeString, + Computed: true, + }, + "checksum_sha256": { + Type: schema.TypeString, + Computed: true, + }, "content": { Type: schema.TypeString, Optional: true, @@ -130,7 +153,7 @@ func ResourceObject() *schema.Resource { ValidateFunc: verify.ValidARN, DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { // ignore diffs where the user hasn't specified a kms_key_id but the bucket has a default KMS key configured - if new == "" && d.Get("server_side_encryption") == s3.ServerSideEncryptionAwsKms { + if new == "" && d.Get("server_side_encryption") == types.ServerSideEncryptionAwsKms { return true } return false @@ -138,19 +161,19 @@ func ResourceObject() *schema.Resource { }, "metadata": { Type: schema.TypeMap, - ValidateFunc: validateMetadataIsLowerCase, Optional: true, Elem: &schema.Schema{Type: schema.TypeString}, + ValidateFunc: validateMetadataIsLowerCase, }, "object_lock_legal_hold_status": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(s3.ObjectLockLegalHoldStatus_Values(), false), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[types.ObjectLockLegalHoldStatus](), }, "object_lock_mode": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(s3.ObjectLockMode_Values(), false), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[types.ObjectLockMode](), }, "object_lock_retain_until_date": { Type: schema.TypeString, @@ -158,10 +181,10 @@ func ResourceObject() *schema.Resource { ValidateFunc: validation.IsRFC3339Time, }, "server_side_encryption": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(s3.ServerSideEncryption_Values(), false), - Computed: true, + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateDiagFunc: enum.Validate[types.ServerSideEncryption](), }, "source": { Type: schema.TypeString, @@ -173,10 +196,10 @@ func ResourceObject() *schema.Resource { Optional: true, }, "storage_class": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validation.StringInSlice(s3.ObjectStorageClass_Values(), false), + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateDiagFunc: enum.Validate[types.ObjectStorageClass](), }, names.AttrTags: tftags.TagsSchema(), names.AttrTagsAll: tftags.TagsSchemaComputed(), @@ -199,13 +222,11 @@ func resourceObjectCreate(ctx context.Context, d *schema.ResourceData, meta inte func resourceObjectRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).S3Conn(ctx) + conn := meta.(*conns.AWSClient).S3Client(ctx) bucket := d.Get("bucket").(string) - key := d.Get("key").(string) - outputRaw, err := tfresource.RetryWhenNewResourceNotFound(ctx, objectCreationTimeout, func() (interface{}, error) { - return FindObjectByThreePartKeyV1(ctx, conn, bucket, key, "") - }, d.IsNewResource()) + key := sdkv1CompatibleCleanKey(d.Get("key").(string)) + output, err := findObjectByBucketAndKey(ctx, conn, bucket, key, "", d.Get("checksum_algorithm").(string)) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] S3 Object (%s) not found, removing from state", d.Id()) @@ -217,60 +238,40 @@ func resourceObjectRead(ctx context.Context, d *schema.ResourceData, meta interf return sdkdiag.AppendErrorf(diags, "reading S3 Object (%s): %s", d.Id(), err) } - output := outputRaw.(*s3.HeadObjectOutput) - d.Set("bucket_key_enabled", output.BucketKeyEnabled) d.Set("cache_control", output.CacheControl) + d.Set("checksum_crc32", output.ChecksumCRC32) + d.Set("checksum_crc32c", output.ChecksumCRC32C) + d.Set("checksum_sha1", output.ChecksumSHA1) + d.Set("checksum_sha256", output.ChecksumSHA256) d.Set("content_disposition", output.ContentDisposition) d.Set("content_encoding", output.ContentEncoding) d.Set("content_language", output.ContentLanguage) d.Set("content_type", output.ContentType) - metadata := flex.PointersMapToStringList(output.Metadata) - - // AWS Go SDK capitalizes metadata, this is a workaround. https://github.com/aws/aws-sdk-go/issues/445 - for k, v := range metadata { - delete(metadata, k) - metadata[strings.ToLower(k)] = v - } - - if err := d.Set("metadata", metadata); err != nil { - return sdkdiag.AppendErrorf(diags, "setting metadata: %s", err) - } - d.Set("version_id", output.VersionId) - d.Set("server_side_encryption", output.ServerSideEncryption) - d.Set("website_redirect", output.WebsiteRedirectLocation) + // See https://forums.aws.amazon.com/thread.jspa?threadID=44003 + d.Set("etag", strings.Trim(aws.ToString(output.ETag), `"`)) + d.Set("metadata", output.Metadata) d.Set("object_lock_legal_hold_status", output.ObjectLockLegalHoldStatus) d.Set("object_lock_mode", output.ObjectLockMode) d.Set("object_lock_retain_until_date", flattenObjectDate(output.ObjectLockRetainUntilDate)) - - if err := resourceObjectSetKMS(ctx, d, meta, output.SSEKMSKeyId); err != nil { - return sdkdiag.AppendErrorf(diags, "object KMS: %s", err) - } - - // See https://forums.aws.amazon.com/thread.jspa?threadID=44003 - d.Set("etag", strings.Trim(aws.StringValue(output.ETag), `"`)) - + d.Set("server_side_encryption", output.ServerSideEncryption) // The "STANDARD" (which is also the default) storage // class when set would not be included in the results. - if output.StorageClass == nil { - d.Set("storage_class", s3.StorageClassStandard) - } else { + d.Set("storage_class", types.ObjectStorageClassStandard) + if output.StorageClass != "" { d.Set("storage_class", output.StorageClass) } + d.Set("version_id", output.VersionId) + d.Set("website_redirect", output.WebsiteRedirectLocation) - // Retry due to S3 eventual consistency - tagsRaw, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, 2*time.Minute, func() (interface{}, error) { - return ObjectListTagsV1(ctx, conn, bucket, key) - }, s3.ErrCodeNoSuchBucket) - - if err != nil { - return sdkdiag.AppendErrorf(diags, "listing tags for S3 Bucket (%s) Object (%s): %s", bucket, key, err) + if err := resourceObjectSetKMS(ctx, d, meta, output.SSEKMSKeyId); err != nil { + return sdkdiag.AppendFromErr(diags, err) } - tags, ok := tagsRaw.(tftags.KeyValueTags) + tags, err := ObjectListTags(ctx, conn, bucket, key) - if !ok { - return sdkdiag.AppendErrorf(diags, "listing tags for S3 Bucket (%s) Object (%s): unable to convert tags", bucket, key) + if err != nil { + return sdkdiag.AppendErrorf(diags, "listing tags for S3 Bucket (%s) Object (%s): %s", bucket, key, err) } setTagsOut(ctx, Tags(tags)) @@ -284,41 +285,47 @@ func resourceObjectUpdate(ctx context.Context, d *schema.ResourceData, meta inte return append(diags, resourceObjectUpload(ctx, d, meta)...) } - conn := meta.(*conns.AWSClient).S3Conn(ctx) + conn := meta.(*conns.AWSClient).S3Client(ctx) bucket := d.Get("bucket").(string) - key := d.Get("key").(string) + key := sdkv1CompatibleCleanKey(d.Get("key").(string)) if d.HasChange("acl") { - _, err := conn.PutObjectAclWithContext(ctx, &s3.PutObjectAclInput{ + input := &s3.PutObjectAclInput{ + ACL: types.ObjectCannedACL(d.Get("acl").(string)), Bucket: aws.String(bucket), Key: aws.String(key), - ACL: aws.String(d.Get("acl").(string)), - }) + } + + _, err := conn.PutObjectAcl(ctx, input) + if err != nil { - return sdkdiag.AppendErrorf(diags, "putting S3 object ACL: %s", err) + return sdkdiag.AppendErrorf(diags, "putting S3 Object (%s) ACL: %s", d.Id(), err) } } if d.HasChange("object_lock_legal_hold_status") { - _, err := conn.PutObjectLegalHoldWithContext(ctx, &s3.PutObjectLegalHoldInput{ + input := &s3.PutObjectLegalHoldInput{ Bucket: aws.String(bucket), Key: aws.String(key), - LegalHold: &s3.ObjectLockLegalHold{ - Status: aws.String(d.Get("object_lock_legal_hold_status").(string)), + LegalHold: &types.ObjectLockLegalHold{ + Status: types.ObjectLockLegalHoldStatus(d.Get("object_lock_legal_hold_status").(string)), }, - }) + } + + _, err := conn.PutObjectLegalHold(ctx, input) + if err != nil { - return sdkdiag.AppendErrorf(diags, "putting S3 object lock legal hold: %s", err) + return sdkdiag.AppendErrorf(diags, "putting S3 Object (%s) legal hold: %s", d.Id(), err) } } if d.HasChanges("object_lock_mode", "object_lock_retain_until_date") { - req := &s3.PutObjectRetentionInput{ + input := &s3.PutObjectRetentionInput{ Bucket: aws.String(bucket), Key: aws.String(key), - Retention: &s3.ObjectLockRetention{ - Mode: aws.String(d.Get("object_lock_mode").(string)), + Retention: &types.ObjectLockRetention{ + Mode: types.ObjectLockRetentionMode(d.Get("object_lock_mode").(string)), RetainUntilDate: expandObjectDate(d.Get("object_lock_retain_until_date").(string)), }, } @@ -326,16 +333,17 @@ func resourceObjectUpdate(ctx context.Context, d *schema.ResourceData, meta inte // Bypass required to lower or clear retain-until date. if d.HasChange("object_lock_retain_until_date") { oraw, nraw := d.GetChange("object_lock_retain_until_date") - o := expandObjectDate(oraw.(string)) - n := expandObjectDate(nraw.(string)) + o, n := expandObjectDate(oraw.(string)), expandObjectDate(nraw.(string)) + if n == nil || (o != nil && n.Before(*o)) { - req.BypassGovernanceRetention = aws.Bool(true) + input.BypassGovernanceRetention = true } } - _, err := conn.PutObjectRetentionWithContext(ctx, req) + _, err := conn.PutObjectRetention(ctx, input) + if err != nil { - return sdkdiag.AppendErrorf(diags, "putting S3 object lock retention: %s", err) + return sdkdiag.AppendErrorf(diags, "putting S3 Object (%s) retention: %s", d.Id(), err) } } @@ -352,18 +360,14 @@ func resourceObjectUpdate(ctx context.Context, d *schema.ResourceData, meta inte func resourceObjectDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).S3Conn(ctx) + conn := meta.(*conns.AWSClient).S3Client(ctx) bucket := d.Get("bucket").(string) - key := d.Get("key").(string) - // We are effectively ignoring all leading '/'s in the key name and - // treating multiple '/'s as a single '/' as aws.Config.DisableRestProtocolURICleaning is false - key = strings.TrimLeft(key, "/") - key = regexache.MustCompile(`/+`).ReplaceAllString(key, "/") + key := sdkv1CompatibleCleanKey(d.Get("key").(string)) var err error if _, ok := d.GetOk("version_id"); ok { - _, err = DeleteAllObjectVersions(ctx, conn, bucket, key, d.Get("force_destroy").(bool), false) + _, err = deleteAllObjectVersions(ctx, conn, bucket, key, d.Get("force_destroy").(bool), false) } else { err = deleteObjectVersion(ctx, conn, bucket, key, "", false) } @@ -396,8 +400,8 @@ func resourceObjectImport(ctx context.Context, d *schema.ResourceData, meta inte func resourceObjectUpload(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).S3Conn(ctx) - uploader := s3manager.NewUploaderWithClient(conn) + conn := meta.(*conns.AWSClient).S3Client(ctx) + uploader := manager.NewUploader(conn) defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig tags := defaultTagsConfig.MergeTags(tftags.New(ctx, d.Get("tags").(map[string]interface{}))) @@ -437,33 +441,30 @@ func resourceObjectUpload(ctx context.Context, d *schema.ResourceData, meta inte body = bytes.NewReader([]byte{}) } - bucket := d.Get("bucket").(string) - key := d.Get("key").(string) - - input := &s3manager.UploadInput{ + input := &s3.PutObjectInput{ Body: body, - Bucket: aws.String(bucket), - Key: aws.String(key), + Bucket: aws.String(d.Get("bucket").(string)), + Key: aws.String(sdkv1CompatibleCleanKey(d.Get("key").(string))), } if v, ok := d.GetOk("acl"); ok { - input.ACL = aws.String(v.(string)) + input.ACL = types.ObjectCannedACL(v.(string)) } - if v, ok := d.GetOk("storage_class"); ok { - input.StorageClass = aws.String(v.(string)) + if v, ok := d.GetOk("bucket_key_enabled"); ok { + input.BucketKeyEnabled = v.(bool) } if v, ok := d.GetOk("cache_control"); ok { input.CacheControl = aws.String(v.(string)) } - if v, ok := d.GetOk("content_type"); ok { - input.ContentType = aws.String(v.(string)) + if v, ok := d.GetOk("checksum_algorithm"); ok { + input.ChecksumAlgorithm = types.ChecksumAlgorithm(v.(string)) } - if v, ok := d.GetOk("metadata"); ok { - input.Metadata = flex.ExpandStringMap(v.(map[string]interface{})) + if v, ok := d.GetOk("content_disposition"); ok { + input.ContentDisposition = aws.String(v.(string)) } if v, ok := d.GetOk("content_encoding"); ok { @@ -474,21 +475,37 @@ func resourceObjectUpload(ctx context.Context, d *schema.ResourceData, meta inte input.ContentLanguage = aws.String(v.(string)) } - if v, ok := d.GetOk("content_disposition"); ok { - input.ContentDisposition = aws.String(v.(string)) + if v, ok := d.GetOk("content_type"); ok { + input.ContentType = aws.String(v.(string)) } - if v, ok := d.GetOk("bucket_key_enabled"); ok { - input.BucketKeyEnabled = aws.Bool(v.(bool)) + if v, ok := d.GetOk("kms_key_id"); ok { + input.SSEKMSKeyId = aws.String(v.(string)) + input.ServerSideEncryption = types.ServerSideEncryptionAwsKms + } + + if v, ok := d.GetOk("metadata"); ok { + input.Metadata = flex.ExpandStringValueMap(v.(map[string]interface{})) + } + + if v, ok := d.GetOk("object_lock_legal_hold_status"); ok { + input.ObjectLockLegalHoldStatus = types.ObjectLockLegalHoldStatus(v.(string)) + } + + if v, ok := d.GetOk("object_lock_mode"); ok { + input.ObjectLockMode = types.ObjectLockMode(v.(string)) + } + + if v, ok := d.GetOk("object_lock_retain_until_date"); ok { + input.ObjectLockRetainUntilDate = expandObjectDate(v.(string)) } if v, ok := d.GetOk("server_side_encryption"); ok { - input.ServerSideEncryption = aws.String(v.(string)) + input.ServerSideEncryption = types.ServerSideEncryption(v.(string)) } - if v, ok := d.GetOk("kms_key_id"); ok { - input.SSEKMSKeyId = aws.String(v.(string)) - input.ServerSideEncryption = aws.String(s3.ServerSideEncryptionAwsKms) + if v, ok := d.GetOk("storage_class"); ok { + input.StorageClass = types.StorageClass(v.(string)) } if len(tags) > 0 { @@ -500,24 +517,20 @@ func resourceObjectUpload(ctx context.Context, d *schema.ResourceData, meta inte input.WebsiteRedirectLocation = aws.String(v.(string)) } - if v, ok := d.GetOk("object_lock_legal_hold_status"); ok { - input.ObjectLockLegalHoldStatus = aws.String(v.(string)) + if (input.ObjectLockLegalHoldStatus != "" || input.ObjectLockMode != "" || input.ObjectLockRetainUntilDate != nil) && input.ChecksumAlgorithm == "" { + // "Content-MD5 OR x-amz-checksum- HTTP header is required for Put Object requests with Object Lock parameters". + // AWS SDK for Go v1 transparently added a Content-MD4 header. + input.ChecksumAlgorithm = types.ChecksumAlgorithmCrc32 } - if v, ok := d.GetOk("object_lock_mode"); ok { - input.ObjectLockMode = aws.String(v.(string)) - } - - if v, ok := d.GetOk("object_lock_retain_until_date"); ok { - input.ObjectLockRetainUntilDate = expandObjectDate(v.(string)) + if _, err := uploader.Upload(ctx, input); err != nil { + return sdkdiag.AppendErrorf(diags, "uploading S3 Object (%s) to Bucket (%s): %s", aws.ToString(input.Key), aws.ToString(input.Bucket), err) } - if _, err := uploader.Upload(input); err != nil { - return sdkdiag.AppendErrorf(diags, "uploading object to S3 bucket (%s): %s", bucket, err) + if d.IsNewResource() { + d.SetId(d.Get("key").(string)) } - d.SetId(key) - return append(diags, resourceObjectRead(ctx, d, meta)...) } @@ -531,8 +544,8 @@ func resourceObjectSetKMS(ctx context.Context, d *schema.ResourceData, meta inte return fmt.Errorf("Failed to describe default S3 KMS key (%s): %s", DefaultKMSKeyAlias, err) } - if aws.StringValue(sseKMSKeyId) != aws.StringValue(keyMetadata.Arn) { - log.Printf("[DEBUG] S3 object is encrypted using a non-default KMS Key ID: %s", aws.StringValue(sseKMSKeyId)) + if kmsKeyID := aws.ToString(sseKMSKeyId); kmsKeyID != aws.ToString(keyMetadata.Arn) { + log.Printf("[DEBUG] S3 object is encrypted using a non-default KMS Key ID: %s", kmsKeyID) d.Set("kms_key_id", sseKMSKeyId) } } @@ -569,6 +582,7 @@ func hasObjectContentChanges(d verify.ResourceDiffer) bool { for _, key := range []string{ "bucket_key_enabled", "cache_control", + "checksum_algorithm", "content_base64", "content_disposition", "content_encoding", @@ -591,11 +605,47 @@ func hasObjectContentChanges(d verify.ResourceDiffer) bool { return false } -// DeleteAllObjectVersions deletes all versions of a specified key from an S3 bucket. +func findObjectByBucketAndKey(ctx context.Context, conn *s3.Client, bucket, key, etag, checksumAlgorithm string) (*s3.HeadObjectOutput, error) { + input := &s3.HeadObjectInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), + } + if checksumAlgorithm != "" { + input.ChecksumMode = types.ChecksumModeEnabled + } + if etag != "" { + input.IfMatch = aws.String(etag) + } + + return findObject(ctx, conn, input) +} + +func findObject(ctx context.Context, conn *s3.Client, input *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) { + output, err := conn.HeadObject(ctx, input) + + if tfawserr.ErrHTTPStatusCodeEquals(err, http.StatusNotFound) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output, nil +} + +// deleteAllObjectVersions deletes all versions of a specified key from an S3 bucket. // If key is empty then all versions of all objects are deleted. // Set force to true to override any S3 object lock protections on object lock enabled buckets. // Returns the number of objects deleted. -func DeleteAllObjectVersions(ctx context.Context, conn *s3.S3, bucketName, key string, force, ignoreObjectErrors bool) (int64, error) { +func deleteAllObjectVersions(ctx context.Context, conn *s3.Client, bucketName, key string, force, ignoreObjectErrors bool) (int64, error) { var nObjects int64 input := &s3.ListObjectVersionsInput{ @@ -606,14 +656,22 @@ func DeleteAllObjectVersions(ctx context.Context, conn *s3.S3, bucketName, key s } var lastErr error - err := conn.ListObjectVersionsPagesWithContext(ctx, input, func(page *s3.ListObjectVersionsOutput, lastPage bool) bool { - if page == nil { - return !lastPage + + pages := s3.NewListObjectVersionsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket) { + break + } + + if err != nil { + return nObjects, err } for _, objectVersion := range page.Versions { - objectKey := aws.StringValue(objectVersion.Key) - objectVersionID := aws.StringValue(objectVersion.VersionId) + objectKey := aws.ToString(objectVersion.Key) + objectVersionID := aws.ToString(objectVersion.VersionId) if key != "" && key != objectKey { continue @@ -625,32 +683,36 @@ func DeleteAllObjectVersions(ctx context.Context, conn *s3.S3, bucketName, key s nObjects++ } - if tfawserr.ErrCodeEquals(err, "AccessDenied") && force { + if tfawserr.ErrCodeEquals(err, errCodeAccessDenied) && force { // Remove any legal hold. - resp, err := conn.HeadObjectWithContext(ctx, &s3.HeadObjectInput{ + input := &s3.HeadObjectInput{ Bucket: aws.String(bucketName), - Key: objectVersion.Key, - VersionId: objectVersion.VersionId, - }) + Key: aws.String(objectKey), + VersionId: aws.String(objectVersionID), + } + + output, err := conn.HeadObject(ctx, input) if err != nil { - log.Printf("[ERROR] Error getting S3 Bucket (%s) Object (%s) Version (%s) metadata: %s", bucketName, objectKey, objectVersionID, err) + log.Printf("[ERROR] Getting S3 Bucket (%s) Object (%s) Version (%s) metadata: %s", bucketName, objectKey, objectVersionID, err) lastErr = err continue } - if aws.StringValue(resp.ObjectLockLegalHoldStatus) == s3.ObjectLockLegalHoldStatusOn { - _, err := conn.PutObjectLegalHoldWithContext(ctx, &s3.PutObjectLegalHoldInput{ - Bucket: aws.String(bucketName), - Key: objectVersion.Key, - VersionId: objectVersion.VersionId, - LegalHold: &s3.ObjectLockLegalHold{ - Status: aws.String(s3.ObjectLockLegalHoldStatusOff), + if output.ObjectLockLegalHoldStatus == types.ObjectLockLegalHoldStatusOn { + input := &s3.PutObjectLegalHoldInput{ + Bucket: aws.String(bucketName), + Key: aws.String(objectKey), + LegalHold: &types.ObjectLockLegalHold{ + Status: types.ObjectLockLegalHoldStatusOff, }, - }) + VersionId: aws.String(objectVersionID), + } + + _, err := conn.PutObjectLegalHold(ctx, input) if err != nil { - log.Printf("[ERROR] Error putting S3 Bucket (%s) Object (%s) Version(%s) legal hold: %s", bucketName, objectKey, objectVersionID, err) + log.Printf("[ERROR] Putting S3 Bucket (%s) Object (%s) Version(%s) legal hold: %s", bucketName, objectKey, objectVersionID, err) lastErr = err continue } @@ -668,7 +730,7 @@ func DeleteAllObjectVersions(ctx context.Context, conn *s3.S3, bucketName, key s } // AccessDenied for another reason. - lastErr = fmt.Errorf("AccessDenied deleting S3 Bucket (%s) Object (%s) Version: %s", bucketName, objectKey, objectVersionID) + lastErr = fmt.Errorf("deleting S3 Bucket (%s) Object (%s) Version (%s): %w", bucketName, objectKey, objectVersionID, err) continue } @@ -676,34 +738,31 @@ func DeleteAllObjectVersions(ctx context.Context, conn *s3.S3, bucketName, key s lastErr = err } } - - return !lastPage - }) - - if tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) { - err = nil - } - - if err != nil { - return nObjects, err } if lastErr != nil { if !ignoreObjectErrors { - return nObjects, fmt.Errorf("deleting at least one object version, last error: %s", lastErr) + return nObjects, fmt.Errorf("deleting at least one S3 Object version, last error: %w", lastErr) } lastErr = nil } - err = conn.ListObjectVersionsPagesWithContext(ctx, input, func(page *s3.ListObjectVersionsOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages = s3.NewListObjectVersionsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket) { + break + } + + if err != nil { + return nObjects, err } for _, deleteMarker := range page.DeleteMarkers { - deleteMarkerKey := aws.StringValue(deleteMarker.Key) - deleteMarkerVersionID := aws.StringValue(deleteMarker.VersionId) + deleteMarkerKey := aws.ToString(deleteMarker.Key) + deleteMarkerVersionID := aws.ToString(deleteMarker.VersionId) if key != "" && key != deleteMarkerKey { continue @@ -718,24 +777,12 @@ func DeleteAllObjectVersions(ctx context.Context, conn *s3.S3, bucketName, key s nObjects++ } } - - return !lastPage - }) - - if tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) { - err = nil - } - - if err != nil { - return nObjects, err } if lastErr != nil { if !ignoreObjectErrors { - return nObjects, fmt.Errorf("deleting at least one object delete marker, last error: %s", lastErr) + return nObjects, fmt.Errorf("deleting at least one S3 Object delete marker, last error: %w", lastErr) } - - lastErr = nil } return nObjects, nil @@ -743,7 +790,7 @@ func DeleteAllObjectVersions(ctx context.Context, conn *s3.S3, bucketName, key s // deleteObjectVersion deletes a specific object version. // Set force to true to override any S3 object lock protections. -func deleteObjectVersion(ctx context.Context, conn *s3.S3, b, k, v string, force bool) error { +func deleteObjectVersion(ctx context.Context, conn *s3.Client, b, k, v string, force bool) error { input := &s3.DeleteObjectInput{ Bucket: aws.String(b), Key: aws.String(k), @@ -752,19 +799,18 @@ func deleteObjectVersion(ctx context.Context, conn *s3.S3, b, k, v string, force if v != "" { input.VersionId = aws.String(v) } - if force { - input.BypassGovernanceRetention = aws.Bool(true) + input.BypassGovernanceRetention = true } - log.Printf("[INFO] Deleting S3 Bucket (%s) Object (%s) Version: %s", b, k, v) - _, err := conn.DeleteObjectWithContext(ctx, input) + log.Printf("[INFO] Deleting S3 Bucket (%s) Object (%s) Version (%s)", b, k, v) + _, err := conn.DeleteObject(ctx, input) if err != nil { - log.Printf("[WARN] Error deleting S3 Bucket (%s) Object (%s) Version (%s): %s", b, k, v, err) + log.Printf("[WARN] Deleting S3 Bucket (%s) Object (%s) Version (%s): %s", b, k, v, err) } - if tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) || tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchKey) { + if tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket, errCodeNoSuchKey) { return nil } @@ -787,3 +833,15 @@ func flattenObjectDate(t *time.Time) string { return t.Format(time.RFC3339) } + +// sdkv1CompatibleCleanKey returns an AWS SDK for Go v1 compatible clean key. +// DisableRestProtocolURICleaning was false on the standard S3Conn, so to ensure backwards +// compatibility we must "clean" the configured key before passing to AWS SDK for Go v2 APIs. +// See https://docs.aws.amazon.com/sdk-for-go/api/service/s3/#hdr-Automatic_URI_cleaning. +// See https://github.com/aws/aws-sdk-go/blob/cf903c8c543034654bb8f53b5f9d6454fdb2117f/private/protocol/rest/build.go#L247-L258. +func sdkv1CompatibleCleanKey(key string) string { + // We are effectively ignoring all leading '/'s and treating multiple '/'s as a single '/'. + key = strings.TrimLeft(key, "/") + key = regexache.MustCompile(`/+`).ReplaceAllString(key, "/") + return key +} diff --git a/internal/service/s3/object_copy.go b/internal/service/s3/object_copy.go index 9628dd10186..8cc595685b9 100644 --- a/internal/service/s3/object_copy.go +++ b/internal/service/s3/object_copy.go @@ -8,17 +8,13 @@ import ( "context" "fmt" "log" - "net/http" "net/url" "strings" - "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -65,6 +61,27 @@ func ResourceObjectCopy() *schema.Resource { Optional: true, Computed: true, }, + "checksum_algorithm": { + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[types.ChecksumAlgorithm](), + }, + "checksum_crc32": { + Type: schema.TypeString, + Computed: true, + }, + "checksum_crc32c": { + Type: schema.TypeString, + Computed: true, + }, + "checksum_sha1": { + Type: schema.TypeString, + Computed: true, + }, + "checksum_sha256": { + Type: schema.TypeString, + Computed: true, + }, "content_disposition": { Type: schema.TypeString, Optional: true, @@ -315,8 +332,8 @@ func resourceObjectCopyRead(ctx context.Context, d *schema.ResourceData, meta in conn := meta.(*conns.AWSClient).S3Client(ctx) bucket := d.Get("bucket").(string) - key := d.Get("key").(string) - output, err := FindObjectByThreePartKey(ctx, conn, bucket, key, "") + key := sdkv1CompatibleCleanKey(d.Get("key").(string)) + output, err := findObjectByBucketAndKey(ctx, conn, bucket, key, "", d.Get("checksum_algorithm").(string)) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] S3 Object (%s) not found, removing from state", d.Id()) @@ -330,6 +347,10 @@ func resourceObjectCopyRead(ctx context.Context, d *schema.ResourceData, meta in d.Set("bucket_key_enabled", output.BucketKeyEnabled) d.Set("cache_control", output.CacheControl) + d.Set("checksum_crc32", output.ChecksumCRC32) + d.Set("checksum_crc32c", output.ChecksumCRC32C) + d.Set("checksum_sha1", output.ChecksumSHA1) + d.Set("checksum_sha256", output.ChecksumSHA256) d.Set("content_disposition", output.ContentDisposition) d.Set("content_encoding", output.ContentEncoding) d.Set("content_language", output.ContentLanguage) @@ -390,6 +411,7 @@ func resourceObjectCopyUpdate(ctx context.Context, d *schema.ResourceData, meta "bucket", "bucket_key_enabled", "cache_control", + "checksum_algorithm", "content_disposition", "content_encoding", "content_language", @@ -430,18 +452,14 @@ func resourceObjectCopyUpdate(ctx context.Context, d *schema.ResourceData, meta func resourceObjectCopyDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).S3Conn(ctx) + conn := meta.(*conns.AWSClient).S3Client(ctx) bucket := d.Get("bucket").(string) - key := d.Get("key").(string) - // We are effectively ignoring all leading '/'s in the key name and - // treating multiple '/'s as a single '/' as aws.Config.DisableRestProtocolURICleaning is false - key = strings.TrimLeft(key, "/") - key = regexache.MustCompile(`/+`).ReplaceAllString(key, "/") + key := sdkv1CompatibleCleanKey(d.Get("key").(string)) var err error if _, ok := d.GetOk("version_id"); ok { - _, err = DeleteAllObjectVersions(ctx, conn, bucket, key, d.Get("force_destroy").(bool), false) + _, err = deleteAllObjectVersions(ctx, conn, bucket, key, d.Get("force_destroy").(bool), false) } else { err = deleteObjectVersion(ctx, conn, bucket, key, "", false) } @@ -461,7 +479,7 @@ func resourceObjectCopyDoCopy(ctx context.Context, d *schema.ResourceData, meta input := &s3.CopyObjectInput{ Bucket: aws.String(d.Get("bucket").(string)), CopySource: aws.String(url.QueryEscape(d.Get("source").(string))), - Key: aws.String(d.Get("key").(string)), + Key: aws.String(sdkv1CompatibleCleanKey(d.Get("key").(string))), } if v, ok := d.GetOk("acl"); ok { @@ -476,6 +494,10 @@ func resourceObjectCopyDoCopy(ctx context.Context, d *schema.ResourceData, meta input.CacheControl = aws.String(v.(string)) } + if v, ok := d.GetOk("checksum_algorithm"); ok { + input.ChecksumAlgorithm = types.ChecksumAlgorithm(v.(string)) + } + if v, ok := d.GetOk("content_disposition"); ok { input.ContentDisposition = aws.String(v.(string)) } @@ -610,7 +632,7 @@ func resourceObjectCopyDoCopy(ctx context.Context, d *schema.ResourceData, meta output, err := conn.CopyObject(ctx, input) if err != nil { - return sdkdiag.AppendErrorf(diags, "copying S3 object (bucket: %s; key: %s; source: %s): %s", aws.ToString(input.Bucket), aws.ToString(input.Key), aws.ToString(input.CopySource), err) + return sdkdiag.AppendErrorf(diags, "copying %s to S3 Bucket (%s) Object (%s): %s", aws.ToString(input.CopySource), aws.ToString(input.Bucket), aws.ToString(input.Key), err) } if d.IsNewResource() { @@ -625,35 +647,6 @@ func resourceObjectCopyDoCopy(ctx context.Context, d *schema.ResourceData, meta return append(diags, resourceObjectCopyRead(ctx, d, meta)...) } -func FindObjectByThreePartKey(ctx context.Context, conn *s3.Client, bucket, key, etag string) (*s3.HeadObjectOutput, error) { - input := &s3.HeadObjectInput{ - Bucket: aws.String(bucket), - Key: aws.String(key), - } - if etag != "" { - input.IfMatch = aws.String(etag) - } - - output, err := conn.HeadObject(ctx, input) - - if tfawserr.ErrHTTPStatusCodeEquals(err, http.StatusNotFound) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - if output == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - return output, nil -} - type s3Grants struct { FullControl *string Read *string diff --git a/internal/service/s3/object_copy_test.go b/internal/service/s3/object_copy_test.go index 1506fc95338..def0d35abb6 100644 --- a/internal/service/s3/object_copy_test.go +++ b/internal/service/s3/object_copy_test.go @@ -41,10 +41,15 @@ func TestAccS3ObjectCopy_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "bucket", rName2), resource.TestCheckResourceAttr(resourceName, "bucket_key_enabled", "false"), resource.TestCheckResourceAttr(resourceName, "cache_control", ""), + resource.TestCheckNoResourceAttr(resourceName, "checksum_algorithm"), + resource.TestCheckResourceAttr(resourceName, "checksum_crc32", ""), + resource.TestCheckResourceAttr(resourceName, "checksum_crc32c", ""), + resource.TestCheckResourceAttr(resourceName, "checksum_sha1", ""), + resource.TestCheckResourceAttr(resourceName, "checksum_sha256", ""), resource.TestCheckResourceAttr(resourceName, "content_disposition", ""), resource.TestCheckResourceAttr(resourceName, "content_encoding", ""), resource.TestCheckResourceAttr(resourceName, "content_language", ""), - resource.TestCheckResourceAttr(resourceName, "content_type", "binary/octet-stream"), + resource.TestCheckResourceAttr(resourceName, "content_type", "application/octet-stream"), resource.TestCheckNoResourceAttr(resourceName, "copy_if_match"), resource.TestCheckNoResourceAttr(resourceName, "copy_if_modified_since"), resource.TestCheckNoResourceAttr(resourceName, "copy_if_none_match"), @@ -297,6 +302,139 @@ func TestAccS3ObjectCopy_sourceWithSlashes(t *testing.T) { }) } +func TestAccS3ObjectCopy_checksumAlgorithm(t *testing.T) { + ctx := acctest.Context(t) + rName1 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName2 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_object_copy.test" + sourceKey := "source" + targetKey := "target" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckObjectCopyDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccObjectCopyConfig_checksumAlgorithm(rName1, sourceKey, rName2, targetKey, "CRC32C"), + Check: resource.ComposeTestCheckFunc( + testAccCheckObjectCopyExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "checksum_algorithm", "CRC32C"), + resource.TestCheckResourceAttr(resourceName, "checksum_crc32", ""), + resource.TestCheckResourceAttr(resourceName, "checksum_crc32c", "7y1BJA=="), + resource.TestCheckResourceAttr(resourceName, "checksum_sha1", ""), + resource.TestCheckResourceAttr(resourceName, "checksum_sha256", ""), + ), + }, + { + Config: testAccObjectCopyConfig_checksumAlgorithm(rName1, sourceKey, rName2, targetKey, "SHA1"), + Check: resource.ComposeTestCheckFunc( + testAccCheckObjectCopyExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "checksum_algorithm", "SHA1"), + resource.TestCheckResourceAttr(resourceName, "checksum_crc32", ""), + resource.TestCheckResourceAttr(resourceName, "checksum_crc32c", ""), + resource.TestCheckResourceAttr(resourceName, "checksum_sha1", "7MuLDoLjuZB9Uv63Krr4E7U5x30="), + resource.TestCheckResourceAttr(resourceName, "checksum_sha256", ""), + ), + }, + }, + }) +} + +func TestAccS3ObjectCopy_objectLockLegalHold(t *testing.T) { + ctx := acctest.Context(t) + rName1 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName2 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_object_copy.test" + sourceKey := "source" + targetKey := "target" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckObjectCopyDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccObjectCopyConfig_lockLegalHold(rName1, sourceKey, rName2, targetKey, "ON"), + Check: resource.ComposeTestCheckFunc( + testAccCheckObjectCopyExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "object_lock_legal_hold_status", "ON"), + ), + }, + { + Config: testAccObjectCopyConfig_lockLegalHold(rName1, sourceKey, rName2, targetKey, "OFF"), + Check: resource.ComposeTestCheckFunc( + testAccCheckObjectCopyExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "object_lock_legal_hold_status", "OFF"), + ), + }, + }, + }) +} + +func TestAccS3ObjectCopy_targetWithMultipleSlashes(t *testing.T) { + ctx := acctest.Context(t) + rName1 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName2 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_object_copy.test" + sourceKey := "source" + targetKey := "/dir//target/" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckObjectCopyDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccObjectCopyConfig_basic(rName1, sourceKey, rName2, targetKey), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceName, "key", targetKey), + resource.TestCheckResourceAttr(resourceName, "source", fmt.Sprintf("%s/%s", rName1, sourceKey)), + ), + }, + }, + }) +} + +func TestAccS3ObjectCopy_targetWithMultipleSlashesMigrated(t *testing.T) { + ctx := acctest.Context(t) + rName1 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName2 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_object_copy.test" + sourceKey := "source" + targetKey := "/dir//target/" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), + CheckDestroy: testAccCheckObjectCopyDestroy(ctx), + Steps: []resource.TestStep{ + { + ExternalProviders: map[string]resource.ExternalProvider{ + // Final version for aws_s3_object_copy using AWS SDK for Go v1. + "aws": { + Source: "hashicorp/aws", + VersionConstraint: "5.15.0", + }, + }, + Config: testAccObjectCopyConfig_basic(rName1, sourceKey, rName2, targetKey), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceName, "key", targetKey), + resource.TestCheckResourceAttr(resourceName, "source", fmt.Sprintf("%s/%s", rName1, sourceKey)), + ), + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Config: testAccObjectCopyConfig_basic(rName1, sourceKey, rName2, targetKey), + PlanOnly: true, + }, + }, + }) +} + func testAccCheckObjectCopyDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) @@ -306,7 +444,7 @@ func testAccCheckObjectCopyDestroy(ctx context.Context) resource.TestCheckFunc { continue } - _, err := tfs3.FindObjectByThreePartKey(ctx, conn, rs.Primary.Attributes["bucket"], rs.Primary.Attributes["key"], rs.Primary.Attributes["etag"]) + _, err := tfs3.FindObjectByBucketAndKey(ctx, conn, rs.Primary.Attributes["bucket"], tfs3.SDKv1CompatibleCleanKey(rs.Primary.Attributes["key"]), rs.Primary.Attributes["etag"], "") if tfresource.NotFound(err) { continue @@ -332,7 +470,7 @@ func testAccCheckObjectCopyExists(ctx context.Context, n string) resource.TestCh conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) - _, err := tfs3.FindObjectByThreePartKey(ctx, conn, rs.Primary.Attributes["bucket"], rs.Primary.Attributes["key"], rs.Primary.Attributes["etag"]) + _, err := tfs3.FindObjectByBucketAndKey(ctx, conn, rs.Primary.Attributes["bucket"], tfs3.SDKv1CompatibleCleanKey(rs.Primary.Attributes["key"]), rs.Primary.Attributes["etag"], "") return err } @@ -348,8 +486,6 @@ resource "aws_s3_bucket" "source" { resource "aws_s3_bucket" "target" { bucket = %[2]q - - force_destroy = true } `, sourceBucket, targetBucket) } @@ -515,3 +651,56 @@ resource "aws_s3_object_copy" "test" { } `, sourceKey, targetKey)) } + +func testAccObjectCopyConfig_checksumAlgorithm(sourceBucket, sourceKey, targetBucket, targetKey, checksumAlgorithm string) string { + return acctest.ConfigCompose(testAccObjectCopyConfig_baseSourceObject(sourceBucket, sourceKey, targetBucket), fmt.Sprintf(` +resource "aws_s3_object_copy" "test" { + bucket = aws_s3_bucket.target.bucket + key = %[1]q + source = "${aws_s3_bucket.source.bucket}/${aws_s3_object.source.key}" + + checksum_algorithm = %[2]q +} +`, targetKey, checksumAlgorithm)) +} + +func testAccObjectCopyConfig_lockLegalHold(sourceBucket, sourceKey, targetBucket, targetKey, legalHoldStatus string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "source" { + bucket = %[1]q + + force_destroy = true +} + +resource "aws_s3_bucket" "target" { + bucket = %[3]q + + object_lock_enabled = true + + force_destroy = true +} + +resource "aws_s3_bucket_versioning" "target" { + bucket = aws_s3_bucket.target.id + versioning_configuration { + status = "Enabled" + } +} + +resource "aws_s3_object" "source" { + bucket = aws_s3_bucket.source.bucket + key = %[2]q + content = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" +} + +resource "aws_s3_object_copy" "test" { + # Must have bucket versioning enabled first + bucket = aws_s3_bucket_versioning.target.bucket + key = %[4]q + source = "${aws_s3_bucket.source.bucket}/${aws_s3_object.source.key}" + + object_lock_legal_hold_status = %[5]q + force_destroy = true +} +`, sourceBucket, sourceKey, targetBucket, targetKey, legalHoldStatus) +} diff --git a/internal/service/s3/object_data_source.go b/internal/service/s3/object_data_source.go index 75c374f7271..44068b8d1c3 100644 --- a/internal/service/s3/object_data_source.go +++ b/internal/service/s3/object_data_source.go @@ -4,22 +4,21 @@ package s3 import ( - "bytes" "context" - "fmt" - "log" "regexp" "strings" "time" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/feature/s3/manager" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" - "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" ) @@ -45,6 +44,27 @@ func DataSourceObject() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "checksum_mode": { + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[types.ChecksumMode](), + }, + "checksum_crc32": { + Type: schema.TypeString, + Computed: true, + }, + "checksum_crc32c": { + Type: schema.TypeString, + Computed: true, + }, + "checksum_sha1": { + Type: schema.TypeString, + Computed: true, + }, + "checksum_sha256": { + Type: schema.TypeString, + Computed: true, + }, "content_disposition": { Type: schema.TypeString, Computed: true, @@ -118,6 +138,7 @@ func DataSourceObject() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "tags": tftags.TagsSchemaComputed(), "version_id": { Type: schema.TypeString, Optional: true, @@ -127,24 +148,24 @@ func DataSourceObject() *schema.Resource { Type: schema.TypeString, Computed: true, }, - - "tags": tftags.TagsSchemaComputed(), }, } } func dataSourceObjectRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).S3Conn(ctx) + conn := meta.(*conns.AWSClient).S3Client(ctx) ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig bucket := d.Get("bucket").(string) - key := d.Get("key").(string) - - input := s3.HeadObjectInput{ + key := sdkv1CompatibleCleanKey(d.Get("key").(string)) + input := &s3.HeadObjectInput{ Bucket: aws.String(bucket), Key: aws.String(key), } + if v, ok := d.GetOk("checksum_mode"); ok { + input.ChecksumMode = types.ChecksumMode(v.(string)) + } if v, ok := d.GetOk("range"); ok { input.Range = aws.String(v.(string)) } @@ -152,94 +173,83 @@ func dataSourceObjectRead(ctx context.Context, d *schema.ResourceData, meta inte input.VersionId = aws.String(v.(string)) } - versionText := "" - uniqueId := bucket + "/" + key - if v, ok := d.GetOk("version_id"); ok { - versionText = fmt.Sprintf(" of version %q", v.(string)) - uniqueId += "@" + v.(string) - } + output, err := findObject(ctx, conn, input) - log.Printf("[DEBUG] Reading S3 Object: %s", input) - out, err := conn.HeadObjectWithContext(ctx, &input) if err != nil { - return sdkdiag.AppendErrorf(diags, "getting S3 Bucket (%s) Object (%s): %s", bucket, key, err) - } - if aws.BoolValue(out.DeleteMarker) { - return sdkdiag.AppendErrorf(diags, "Requested S3 object %q%s has been deleted", bucket+key, versionText) + return sdkdiag.AppendErrorf(diags, "reading S3 Bucket (%s) Object (%s): %s", bucket, key, err) } - log.Printf("[DEBUG] Received S3 object: %s", out) + if output.DeleteMarker { + return sdkdiag.AppendErrorf(diags, "S3 Bucket (%s) Object (%s) has been deleted", bucket, key) + } - d.SetId(uniqueId) + id := bucket + "/" + d.Get("key").(string) + if v, ok := d.GetOk("version_id"); ok { + id += "@" + v.(string) + } + d.SetId(id) - d.Set("bucket_key_enabled", out.BucketKeyEnabled) - d.Set("cache_control", out.CacheControl) - d.Set("content_disposition", out.ContentDisposition) - d.Set("content_encoding", out.ContentEncoding) - d.Set("content_language", out.ContentLanguage) - d.Set("content_length", out.ContentLength) - d.Set("content_type", out.ContentType) + d.Set("bucket_key_enabled", output.BucketKeyEnabled) + d.Set("cache_control", output.CacheControl) + d.Set("checksum_crc32", output.ChecksumCRC32) + d.Set("checksum_crc32c", output.ChecksumCRC32C) + d.Set("checksum_sha1", output.ChecksumSHA1) + d.Set("checksum_sha256", output.ChecksumSHA256) + d.Set("content_disposition", output.ContentDisposition) + d.Set("content_encoding", output.ContentEncoding) + d.Set("content_language", output.ContentLanguage) + d.Set("content_length", output.ContentLength) + d.Set("content_type", output.ContentType) // See https://forums.aws.amazon.com/thread.jspa?threadID=44003 - d.Set("etag", strings.Trim(aws.StringValue(out.ETag), `"`)) - d.Set("expiration", out.Expiration) - d.Set("expires", out.Expires) - if out.LastModified != nil { - d.Set("last_modified", out.LastModified.Format(time.RFC1123)) + d.Set("etag", strings.Trim(aws.ToString(output.ETag), `"`)) + d.Set("expiration", output.Expiration) + if output.Expires != nil { + d.Set("expires", output.Expires.Format(time.RFC1123)) } else { - d.Set("last_modified", "") + d.Set("expires", nil) } - d.Set("metadata", flex.PointersMapToStringList(out.Metadata)) - d.Set("object_lock_legal_hold_status", out.ObjectLockLegalHoldStatus) - d.Set("object_lock_mode", out.ObjectLockMode) - d.Set("object_lock_retain_until_date", flattenObjectDate(out.ObjectLockRetainUntilDate)) - d.Set("server_side_encryption", out.ServerSideEncryption) - d.Set("sse_kms_key_id", out.SSEKMSKeyId) - d.Set("version_id", out.VersionId) - d.Set("website_redirect_location", out.WebsiteRedirectLocation) - + if output.LastModified != nil { + d.Set("last_modified", output.LastModified.Format(time.RFC1123)) + } else { + d.Set("last_modified", nil) + } + d.Set("metadata", output.Metadata) + d.Set("object_lock_legal_hold_status", output.ObjectLockLegalHoldStatus) + d.Set("object_lock_mode", output.ObjectLockMode) + d.Set("object_lock_retain_until_date", flattenObjectDate(output.ObjectLockRetainUntilDate)) + d.Set("server_side_encryption", output.ServerSideEncryption) + d.Set("sse_kms_key_id", output.SSEKMSKeyId) // The "STANDARD" (which is also the default) storage // class when set would not be included in the results. - if out.StorageClass == nil { - d.Set("storage_class", s3.StorageClassStandard) - } else { - d.Set("storage_class", out.StorageClass) + d.Set("storage_class", types.ObjectStorageClassStandard) + if output.StorageClass != "" { + d.Set("storage_class", output.StorageClass) } + d.Set("version_id", output.VersionId) + d.Set("website_redirect_location", output.WebsiteRedirectLocation) - if isContentTypeAllowed(out.ContentType) { - input := s3.GetObjectInput{ - Bucket: aws.String(bucket), - Key: aws.String(key), + if isContentTypeAllowed(output.ContentType) { + downloader := manager.NewDownloader(conn) + buf := manager.NewWriteAtBuffer(make([]byte, 0)) + input := &s3.GetObjectInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), + VersionId: output.VersionId, } if v, ok := d.GetOk("range"); ok { input.Range = aws.String(v.(string)) } - if out.VersionId != nil { - input.VersionId = out.VersionId - } - out, err := conn.GetObjectWithContext(ctx, &input) - if err != nil { - return sdkdiag.AppendErrorf(diags, "Failed getting S3 object: %s", err) - } - buf := new(bytes.Buffer) - bytesRead, err := buf.ReadFrom(out.Body) + _, err := downloader.Download(ctx, buf, input) + if err != nil { - return sdkdiag.AppendErrorf(diags, "Failed reading content of S3 object (%s): %s", uniqueId, err) - } - log.Printf("[INFO] Saving %d bytes from S3 object %s", bytesRead, uniqueId) - d.Set("body", buf.String()) - } else { - contentType := "" - if out.ContentType == nil { - contentType = "" - } else { - contentType = aws.StringValue(out.ContentType) + return sdkdiag.AppendErrorf(diags, "downloading S3 Bucket (%s) Object (%s): %s", bucket, key, err) } - log.Printf("[INFO] Ignoring body of S3 object %s with Content-Type %q", uniqueId, contentType) + d.Set("body", string(buf.Bytes())) } - tags, err := ObjectListTagsV1(ctx, conn, bucket, key) + tags, err := ObjectListTags(ctx, conn, bucket, key) if err != nil { return sdkdiag.AppendErrorf(diags, "listing tags for S3 Bucket (%s) Object (%s): %s", bucket, key, err) @@ -252,9 +262,8 @@ func dataSourceObjectRead(ctx context.Context, d *schema.ResourceData, meta inte return diags } -// This is to prevent potential issues w/ binary files -// and generally unprintable characters -// See https://github.com/hashicorp/terraform/pull/3858#issuecomment-156856738 +// This is to prevent potential issues w/ binary files and generally unprintable characters. +// See https://github.com/hashicorp/terraform/pull/3858#issuecomment-156856738. func isContentTypeAllowed(contentType *string) bool { if contentType == nil { return false @@ -271,9 +280,8 @@ func isContentTypeAllowed(contentType *string) bool { regexache.MustCompile(`^application/xml$`), regexache.MustCompile(`^text/.+`), } - for _, r := range allowedContentTypes { - if r.MatchString(*contentType) { + if r.MatchString(aws.ToString(contentType)) { return true } } diff --git a/internal/service/s3/object_data_source_test.go b/internal/service/s3/object_data_source_test.go index 4b1266739c9..53c5c87958b 100644 --- a/internal/service/s3/object_data_source_test.go +++ b/internal/service/s3/object_data_source_test.go @@ -4,44 +4,40 @@ package s3_test import ( - "context" "fmt" "testing" "time" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/s3" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/names" ) const rfc1123RegexPattern = `^[A-Za-z]{3}, [0-9]+ [A-Za-z]+ [0-9]{4} [0-9:]+ [A-Z]+$` func TestAccS3ObjectDataSource_basic(t *testing.T) { ctx := acctest.Context(t) - rInt := sdkacctest.RandInt() - - var rObj s3.GetObjectOutput - var dsObj s3.GetObjectOutput - - resourceName := "aws_s3_object.object" - dataSourceName := "data.aws_s3_object.obj" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_object.test" + dataSourceName := "data.aws_s3_object.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, PreventPostDestroyRefresh: true, Steps: []resource.TestStep{ { - Config: testAccObjectDataSourceConfig_basic(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckObjectExists(ctx, resourceName, &rObj), - testAccCheckObjectExistsDataSource(ctx, dataSourceName, &dsObj), + Config: testAccObjectDataSourceConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckNoResourceAttr(dataSourceName, "body"), + resource.TestCheckNoResourceAttr(dataSourceName, "checksum_mode"), + resource.TestCheckResourceAttr(resourceName, "checksum_crc32", ""), + resource.TestCheckResourceAttr(resourceName, "checksum_crc32c", ""), + resource.TestCheckResourceAttr(resourceName, "checksum_sha1", ""), + resource.TestCheckResourceAttr(resourceName, "checksum_sha256", ""), resource.TestCheckResourceAttr(dataSourceName, "content_length", "11"), resource.TestCheckResourceAttrPair(dataSourceName, "content_type", resourceName, "content_type"), resource.TestCheckResourceAttrPair(dataSourceName, "etag", resourceName, "etag"), @@ -49,7 +45,6 @@ func TestAccS3ObjectDataSource_basic(t *testing.T) { resource.TestCheckResourceAttrPair(dataSourceName, "object_lock_legal_hold_status", resourceName, "object_lock_legal_hold_status"), resource.TestCheckResourceAttrPair(dataSourceName, "object_lock_mode", resourceName, "object_lock_mode"), resource.TestCheckResourceAttrPair(dataSourceName, "object_lock_retain_until_date", resourceName, "object_lock_retain_until_date"), - resource.TestCheckNoResourceAttr(dataSourceName, "body"), ), }, }, @@ -58,24 +53,19 @@ func TestAccS3ObjectDataSource_basic(t *testing.T) { func TestAccS3ObjectDataSource_basicViaAccessPoint(t *testing.T) { ctx := acctest.Context(t) - var dsObj, rObj s3.GetObjectOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - dataSourceName := "data.aws_s3_object.test" resourceName := "aws_s3_object.test" accessPointResourceName := "aws_s3_access_point.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, Steps: []resource.TestStep{ { Config: testAccObjectDataSourceConfig_basicViaAccessPoint(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckObjectExists(ctx, resourceName, &rObj), - testAccCheckObjectExistsDataSource(ctx, dataSourceName, &dsObj), - testAccCheckObjectExists(ctx, resourceName, &rObj), resource.TestCheckResourceAttrPair(dataSourceName, "bucket", accessPointResourceName, "arn"), resource.TestCheckResourceAttrPair(dataSourceName, "key", resourceName, "key"), ), @@ -86,25 +76,20 @@ func TestAccS3ObjectDataSource_basicViaAccessPoint(t *testing.T) { func TestAccS3ObjectDataSource_readableBody(t *testing.T) { ctx := acctest.Context(t) - rInt := sdkacctest.RandInt() - - var rObj s3.GetObjectOutput - var dsObj s3.GetObjectOutput - - resourceName := "aws_s3_object.object" - dataSourceName := "data.aws_s3_object.obj" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_object.test" + dataSourceName := "data.aws_s3_object.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, PreventPostDestroyRefresh: true, Steps: []resource.TestStep{ { - Config: testAccObjectDataSourceConfig_readableBody(rInt), + Config: testAccObjectDataSourceConfig_readableBody(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckObjectExists(ctx, resourceName, &rObj), - testAccCheckObjectExistsDataSource(ctx, dataSourceName, &dsObj), + resource.TestCheckResourceAttr(dataSourceName, "body", "yes"), resource.TestCheckResourceAttr(dataSourceName, "content_length", "3"), resource.TestCheckResourceAttrPair(dataSourceName, "content_type", resourceName, "content_type"), resource.TestCheckResourceAttrPair(dataSourceName, "etag", resourceName, "etag"), @@ -112,7 +97,6 @@ func TestAccS3ObjectDataSource_readableBody(t *testing.T) { resource.TestCheckResourceAttrPair(dataSourceName, "object_lock_legal_hold_status", resourceName, "object_lock_legal_hold_status"), resource.TestCheckResourceAttrPair(dataSourceName, "object_lock_mode", resourceName, "object_lock_mode"), resource.TestCheckResourceAttrPair(dataSourceName, "object_lock_retain_until_date", resourceName, "object_lock_retain_until_date"), - resource.TestCheckResourceAttr(dataSourceName, "body", "yes"), ), }, }, @@ -121,35 +105,29 @@ func TestAccS3ObjectDataSource_readableBody(t *testing.T) { func TestAccS3ObjectDataSource_kmsEncrypted(t *testing.T) { ctx := acctest.Context(t) - rInt := sdkacctest.RandInt() - - var rObj s3.GetObjectOutput - var dsObj s3.GetObjectOutput - - resourceName := "aws_s3_object.object" - dataSourceName := "data.aws_s3_object.obj" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_object.test" + dataSourceName := "data.aws_s3_object.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, PreventPostDestroyRefresh: true, Steps: []resource.TestStep{ { - Config: testAccObjectDataSourceConfig_kmsEncrypted(rInt), + Config: testAccObjectDataSourceConfig_kmsEncrypted(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckObjectExists(ctx, resourceName, &rObj), - testAccCheckObjectExistsDataSource(ctx, dataSourceName, &dsObj), + resource.TestCheckResourceAttr(dataSourceName, "body", "Keep Calm and Carry On"), resource.TestCheckResourceAttr(dataSourceName, "content_length", "22"), resource.TestCheckResourceAttrPair(dataSourceName, "content_type", resourceName, "content_type"), resource.TestCheckResourceAttrPair(dataSourceName, "etag", resourceName, "etag"), - resource.TestCheckResourceAttrPair(dataSourceName, "server_side_encryption", resourceName, "server_side_encryption"), - resource.TestCheckResourceAttrPair(dataSourceName, "sse_kms_key_id", resourceName, "kms_key_id"), resource.TestMatchResourceAttr(dataSourceName, "last_modified", regexache.MustCompile(rfc1123RegexPattern)), resource.TestCheckResourceAttrPair(dataSourceName, "object_lock_legal_hold_status", resourceName, "object_lock_legal_hold_status"), resource.TestCheckResourceAttrPair(dataSourceName, "object_lock_mode", resourceName, "object_lock_mode"), resource.TestCheckResourceAttrPair(dataSourceName, "object_lock_retain_until_date", resourceName, "object_lock_retain_until_date"), - resource.TestCheckResourceAttr(dataSourceName, "body", "Keep Calm and Carry On"), + resource.TestCheckResourceAttrPair(dataSourceName, "server_side_encryption", resourceName, "server_side_encryption"), + resource.TestCheckResourceAttrPair(dataSourceName, "sse_kms_key_id", resourceName, "kms_key_id"), ), }, }, @@ -158,36 +136,30 @@ func TestAccS3ObjectDataSource_kmsEncrypted(t *testing.T) { func TestAccS3ObjectDataSource_bucketKeyEnabled(t *testing.T) { ctx := acctest.Context(t) - rInt := sdkacctest.RandInt() - - var rObj s3.GetObjectOutput - var dsObj s3.GetObjectOutput - - resourceName := "aws_s3_object.object" - dataSourceName := "data.aws_s3_object.obj" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_object.test" + dataSourceName := "data.aws_s3_object.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, PreventPostDestroyRefresh: true, Steps: []resource.TestStep{ { - Config: testAccObjectDataSourceConfig_bucketKeyEnabled(rInt), + Config: testAccObjectDataSourceConfig_bucketKeyEnabled(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckObjectExists(ctx, resourceName, &rObj), - testAccCheckObjectExistsDataSource(ctx, dataSourceName, &dsObj), + resource.TestCheckResourceAttr(dataSourceName, "body", "Keep Calm and Carry On"), + resource.TestCheckResourceAttrPair(dataSourceName, "bucket_key_enabled", resourceName, "bucket_key_enabled"), resource.TestCheckResourceAttr(dataSourceName, "content_length", "22"), resource.TestCheckResourceAttrPair(dataSourceName, "content_type", resourceName, "content_type"), resource.TestCheckResourceAttrPair(dataSourceName, "etag", resourceName, "etag"), - resource.TestCheckResourceAttrPair(dataSourceName, "server_side_encryption", resourceName, "server_side_encryption"), - resource.TestCheckResourceAttrPair(dataSourceName, "sse_kms_key_id", resourceName, "kms_key_id"), - resource.TestCheckResourceAttrPair(dataSourceName, "bucket_key_enabled", resourceName, "bucket_key_enabled"), resource.TestMatchResourceAttr(dataSourceName, "last_modified", regexache.MustCompile(rfc1123RegexPattern)), resource.TestCheckResourceAttrPair(dataSourceName, "object_lock_legal_hold_status", resourceName, "object_lock_legal_hold_status"), resource.TestCheckResourceAttrPair(dataSourceName, "object_lock_mode", resourceName, "object_lock_mode"), resource.TestCheckResourceAttrPair(dataSourceName, "object_lock_retain_until_date", resourceName, "object_lock_retain_until_date"), - resource.TestCheckResourceAttr(dataSourceName, "body", "Keep Calm and Carry On"), + resource.TestCheckResourceAttrPair(dataSourceName, "server_side_encryption", resourceName, "server_side_encryption"), + resource.TestCheckResourceAttrPair(dataSourceName, "sse_kms_key_id", resourceName, "kms_key_id"), ), }, }, @@ -196,50 +168,44 @@ func TestAccS3ObjectDataSource_bucketKeyEnabled(t *testing.T) { func TestAccS3ObjectDataSource_allParams(t *testing.T) { ctx := acctest.Context(t) - rInt := sdkacctest.RandInt() - - var rObj s3.GetObjectOutput - var dsObj s3.GetObjectOutput - - resourceName := "aws_s3_object.object" - dataSourceName := "data.aws_s3_object.obj" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_object.test" + dataSourceName := "data.aws_s3_object.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, PreventPostDestroyRefresh: true, Steps: []resource.TestStep{ { - Config: testAccObjectDataSourceConfig_allParams(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckObjectExists(ctx, resourceName, &rObj), - testAccCheckObjectExistsDataSource(ctx, dataSourceName, &dsObj), - resource.TestCheckResourceAttr(dataSourceName, "content_length", "25"), - resource.TestCheckResourceAttrPair(dataSourceName, "content_type", resourceName, "content_type"), - resource.TestCheckResourceAttrPair(dataSourceName, "etag", resourceName, "etag"), - resource.TestMatchResourceAttr(dataSourceName, "last_modified", regexache.MustCompile(rfc1123RegexPattern)), - resource.TestCheckResourceAttrPair(dataSourceName, "version_id", resourceName, "version_id"), + Config: testAccObjectDataSourceConfig_allParams(rName), + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckNoResourceAttr(dataSourceName, "body"), resource.TestCheckResourceAttrPair(dataSourceName, "bucket_key_enabled", resourceName, "bucket_key_enabled"), resource.TestCheckResourceAttrPair(dataSourceName, "cache_control", resourceName, "cache_control"), resource.TestCheckResourceAttrPair(dataSourceName, "content_disposition", resourceName, "content_disposition"), resource.TestCheckResourceAttrPair(dataSourceName, "content_encoding", resourceName, "content_encoding"), resource.TestCheckResourceAttrPair(dataSourceName, "content_language", resourceName, "content_language"), - // Encryption is off - resource.TestCheckResourceAttrPair(dataSourceName, "server_side_encryption", resourceName, "server_side_encryption"), - resource.TestCheckResourceAttr(dataSourceName, "sse_kms_key_id", ""), - // Supported, but difficult to reproduce in short testing time - resource.TestCheckResourceAttrPair(dataSourceName, "storage_class", resourceName, "storage_class"), + resource.TestCheckResourceAttr(dataSourceName, "content_length", "25"), + resource.TestCheckResourceAttrPair(dataSourceName, "content_type", resourceName, "content_type"), + resource.TestCheckResourceAttrPair(dataSourceName, "etag", resourceName, "etag"), resource.TestCheckResourceAttr(dataSourceName, "expiration", ""), // Currently unsupported in aws_s3_object resource resource.TestCheckResourceAttr(dataSourceName, "expires", ""), - resource.TestCheckResourceAttrPair(dataSourceName, "website_redirect_location", resourceName, "website_redirect"), + resource.TestMatchResourceAttr(dataSourceName, "last_modified", regexache.MustCompile(rfc1123RegexPattern)), resource.TestCheckResourceAttr(dataSourceName, "metadata.%", "0"), - resource.TestCheckResourceAttr(dataSourceName, "tags.%", "1"), resource.TestCheckResourceAttrPair(dataSourceName, "object_lock_legal_hold_status", resourceName, "object_lock_legal_hold_status"), resource.TestCheckResourceAttrPair(dataSourceName, "object_lock_mode", resourceName, "object_lock_mode"), resource.TestCheckResourceAttrPair(dataSourceName, "object_lock_retain_until_date", resourceName, "object_lock_retain_until_date"), + // Encryption is off + resource.TestCheckResourceAttrPair(dataSourceName, "server_side_encryption", resourceName, "server_side_encryption"), + resource.TestCheckResourceAttr(dataSourceName, "sse_kms_key_id", ""), + // Supported, but difficult to reproduce in short testing time + resource.TestCheckResourceAttrPair(dataSourceName, "storage_class", resourceName, "storage_class"), + resource.TestCheckResourceAttr(dataSourceName, "tags.%", "1"), + resource.TestCheckResourceAttrPair(dataSourceName, "version_id", resourceName, "version_id"), + resource.TestCheckResourceAttrPair(dataSourceName, "website_redirect_location", resourceName, "website_redirect"), ), }, }, @@ -248,25 +214,20 @@ func TestAccS3ObjectDataSource_allParams(t *testing.T) { func TestAccS3ObjectDataSource_objectLockLegalHoldOff(t *testing.T) { ctx := acctest.Context(t) - rInt := sdkacctest.RandInt() - - var rObj s3.GetObjectOutput - var dsObj s3.GetObjectOutput - - resourceName := "aws_s3_object.object" - dataSourceName := "data.aws_s3_object.obj" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_object.test" + dataSourceName := "data.aws_s3_object.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, PreventPostDestroyRefresh: true, Steps: []resource.TestStep{ { - Config: testAccObjectDataSourceConfig_lockLegalHoldOff(rInt), + Config: testAccObjectDataSourceConfig_lockLegalHoldOff(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckObjectExists(ctx, resourceName, &rObj), - testAccCheckObjectExistsDataSource(ctx, dataSourceName, &dsObj), + resource.TestCheckNoResourceAttr(dataSourceName, "body"), resource.TestCheckResourceAttr(dataSourceName, "content_length", "11"), resource.TestCheckResourceAttrPair(dataSourceName, "content_type", resourceName, "content_type"), resource.TestCheckResourceAttrPair(dataSourceName, "etag", resourceName, "etag"), @@ -274,7 +235,6 @@ func TestAccS3ObjectDataSource_objectLockLegalHoldOff(t *testing.T) { resource.TestCheckResourceAttrPair(dataSourceName, "object_lock_legal_hold_status", resourceName, "object_lock_legal_hold_status"), resource.TestCheckResourceAttrPair(dataSourceName, "object_lock_mode", resourceName, "object_lock_mode"), resource.TestCheckResourceAttrPair(dataSourceName, "object_lock_retain_until_date", resourceName, "object_lock_retain_until_date"), - resource.TestCheckNoResourceAttr(dataSourceName, "body"), ), }, }, @@ -283,26 +243,21 @@ func TestAccS3ObjectDataSource_objectLockLegalHoldOff(t *testing.T) { func TestAccS3ObjectDataSource_objectLockLegalHoldOn(t *testing.T) { ctx := acctest.Context(t) - rInt := sdkacctest.RandInt() + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) retainUntilDate := time.Now().UTC().AddDate(0, 0, 10).Format(time.RFC3339) - - var rObj s3.GetObjectOutput - var dsObj s3.GetObjectOutput - - resourceName := "aws_s3_object.object" - dataSourceName := "data.aws_s3_object.obj" + resourceName := "aws_s3_object.test" + dataSourceName := "data.aws_s3_object.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, PreventPostDestroyRefresh: true, Steps: []resource.TestStep{ { - Config: testAccObjectDataSourceConfig_lockLegalHoldOn(rInt, retainUntilDate), + Config: testAccObjectDataSourceConfig_lockLegalHoldOn(rName, retainUntilDate), Check: resource.ComposeTestCheckFunc( - testAccCheckObjectExists(ctx, resourceName, &rObj), - testAccCheckObjectExistsDataSource(ctx, dataSourceName, &dsObj), + resource.TestCheckNoResourceAttr(dataSourceName, "body"), resource.TestCheckResourceAttr(dataSourceName, "content_length", "11"), resource.TestCheckResourceAttrPair(dataSourceName, "content_type", resourceName, "content_type"), resource.TestCheckResourceAttrPair(dataSourceName, "etag", resourceName, "etag"), @@ -310,7 +265,6 @@ func TestAccS3ObjectDataSource_objectLockLegalHoldOn(t *testing.T) { resource.TestCheckResourceAttrPair(dataSourceName, "object_lock_legal_hold_status", resourceName, "object_lock_legal_hold_status"), resource.TestCheckResourceAttrPair(dataSourceName, "object_lock_mode", resourceName, "object_lock_mode"), resource.TestCheckResourceAttrPair(dataSourceName, "object_lock_retain_until_date", resourceName, "object_lock_retain_until_date"), - resource.TestCheckNoResourceAttr(dataSourceName, "body"), ), }, }, @@ -319,52 +273,43 @@ func TestAccS3ObjectDataSource_objectLockLegalHoldOn(t *testing.T) { func TestAccS3ObjectDataSource_leadingSlash(t *testing.T) { ctx := acctest.Context(t) - var rObj s3.GetObjectOutput - var dsObj1, dsObj2, dsObj3 s3.GetObjectOutput - - resourceName := "aws_s3_object.object" - dataSourceName1 := "data.aws_s3_object.obj1" - dataSourceName2 := "data.aws_s3_object.obj2" - dataSourceName3 := "data.aws_s3_object.obj3" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_object.test" + dataSourceName1 := "data.aws_s3_object.test1" + dataSourceName2 := "data.aws_s3_object.test2" + dataSourceName3 := "data.aws_s3_object.test3" - rInt := sdkacctest.RandInt() - resourceOnlyConf, conf := testAccObjectDataSourceConfig_leadingSlash(rInt) + resourceOnlyConf, conf := testAccObjectDataSourceConfig_leadingSlash(rName) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, PreventPostDestroyRefresh: true, Steps: []resource.TestStep{ { // nosemgrep:ci.test-config-funcs-correct-form Config: resourceOnlyConf, - Check: resource.ComposeTestCheckFunc( - testAccCheckObjectExists(ctx, resourceName, &rObj), - ), }, { // nosemgrep:ci.test-config-funcs-correct-form Config: conf, Check: resource.ComposeTestCheckFunc( - testAccCheckObjectExistsDataSource(ctx, dataSourceName1, &dsObj1), + resource.TestCheckResourceAttr(dataSourceName1, "body", "yes"), resource.TestCheckResourceAttr(dataSourceName1, "content_length", "3"), resource.TestCheckResourceAttrPair(dataSourceName1, "content_type", resourceName, "content_type"), resource.TestCheckResourceAttrPair(dataSourceName1, "etag", resourceName, "etag"), resource.TestMatchResourceAttr(dataSourceName1, "last_modified", regexache.MustCompile(rfc1123RegexPattern)), - resource.TestCheckResourceAttr(dataSourceName1, "body", "yes"), - testAccCheckObjectExistsDataSource(ctx, dataSourceName2, &dsObj2), + resource.TestCheckResourceAttr(dataSourceName2, "body", "yes"), resource.TestCheckResourceAttr(dataSourceName2, "content_length", "3"), resource.TestCheckResourceAttrPair(dataSourceName2, "content_type", resourceName, "content_type"), resource.TestCheckResourceAttrPair(dataSourceName2, "etag", resourceName, "etag"), resource.TestMatchResourceAttr(dataSourceName2, "last_modified", regexache.MustCompile(rfc1123RegexPattern)), - resource.TestCheckResourceAttr(dataSourceName2, "body", "yes"), - testAccCheckObjectExistsDataSource(ctx, dataSourceName3, &dsObj3), + resource.TestCheckResourceAttr(dataSourceName3, "body", "yes"), resource.TestCheckResourceAttr(dataSourceName3, "content_length", "3"), resource.TestCheckResourceAttrPair(dataSourceName3, "content_type", resourceName, "content_type"), resource.TestCheckResourceAttrPair(dataSourceName3, "etag", resourceName, "etag"), resource.TestMatchResourceAttr(dataSourceName3, "last_modified", regexache.MustCompile(rfc1123RegexPattern)), - resource.TestCheckResourceAttr(dataSourceName3, "body", "yes"), ), }, }, @@ -373,49 +318,38 @@ func TestAccS3ObjectDataSource_leadingSlash(t *testing.T) { func TestAccS3ObjectDataSource_multipleSlashes(t *testing.T) { ctx := acctest.Context(t) - var rObj1, rObj2 s3.GetObjectOutput - var dsObj1, dsObj2, dsObj3 s3.GetObjectOutput - - resourceName1 := "aws_s3_object.object1" - resourceName2 := "aws_s3_object.object2" - dataSourceName1 := "data.aws_s3_object.obj1" - dataSourceName2 := "data.aws_s3_object.obj2" - dataSourceName3 := "data.aws_s3_object.obj3" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName1 := "aws_s3_object.test1" + resourceName2 := "aws_s3_object.test2" + dataSourceName1 := "data.aws_s3_object.test1" + dataSourceName2 := "data.aws_s3_object.test2" + dataSourceName3 := "data.aws_s3_object.test3" - rInt := sdkacctest.RandInt() - resourceOnlyConf, conf := testAccObjectDataSourceConfig_multipleSlashes(rInt) + resourceOnlyConf, conf := testAccObjectDataSourceConfig_multipleSlashes(rName) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, PreventPostDestroyRefresh: true, Steps: []resource.TestStep{ { // nosemgrep:ci.test-config-funcs-correct-form Config: resourceOnlyConf, - Check: resource.ComposeTestCheckFunc( - testAccCheckObjectExists(ctx, resourceName1, &rObj1), - testAccCheckObjectExists(ctx, resourceName2, &rObj2), - ), }, { // nosemgrep:ci.test-config-funcs-correct-form Config: conf, Check: resource.ComposeTestCheckFunc( - - testAccCheckObjectExistsDataSource(ctx, dataSourceName1, &dsObj1), + resource.TestCheckResourceAttr(dataSourceName1, "body", "yes"), resource.TestCheckResourceAttr(dataSourceName1, "content_length", "3"), resource.TestCheckResourceAttrPair(dataSourceName1, "content_type", resourceName1, "content_type"), - resource.TestCheckResourceAttr(dataSourceName1, "body", "yes"), - testAccCheckObjectExistsDataSource(ctx, dataSourceName2, &dsObj2), + resource.TestCheckResourceAttr(dataSourceName2, "body", "yes"), resource.TestCheckResourceAttr(dataSourceName2, "content_length", "3"), resource.TestCheckResourceAttrPair(dataSourceName2, "content_type", resourceName1, "content_type"), - resource.TestCheckResourceAttr(dataSourceName2, "body", "yes"), - testAccCheckObjectExistsDataSource(ctx, dataSourceName3, &dsObj3), + resource.TestCheckResourceAttr(dataSourceName3, "body", "no"), resource.TestCheckResourceAttr(dataSourceName3, "content_length", "2"), resource.TestCheckResourceAttrPair(dataSourceName3, "content_type", resourceName2, "content_type"), - resource.TestCheckResourceAttr(dataSourceName3, "body", "no"), ), }, }, @@ -424,70 +358,66 @@ func TestAccS3ObjectDataSource_multipleSlashes(t *testing.T) { func TestAccS3ObjectDataSource_singleSlashAsKey(t *testing.T) { ctx := acctest.Context(t) - var dsObj s3.GetObjectOutput - dataSourceName := "data.aws_s3_object.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, PreventPostDestroyRefresh: true, Steps: []resource.TestStep{ { - Config: testAccObjectDataSourceConfig_singleSlashAsKey(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckObjectExistsDataSource(ctx, dataSourceName, &dsObj), - ), + Config: testAccObjectDataSourceConfig_singleSlashAsKey(rName), + ExpectError: regexache.MustCompile(`input member Key must not be empty`), }, }, }) } -func testAccCheckObjectExistsDataSource(ctx context.Context, n string, obj *s3.GetObjectOutput) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Can't find S3 object data source: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("S3 object data source ID not set") - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).S3Conn(ctx) - out, err := conn.GetObjectWithContext(ctx, &s3.GetObjectInput{ - Bucket: aws.String(rs.Primary.Attributes["bucket"]), - Key: aws.String(rs.Primary.Attributes["key"]), - }) - if err != nil { - return fmt.Errorf("Failed getting S3 Object from %s: %s", - rs.Primary.Attributes["bucket"]+"/"+rs.Primary.Attributes["key"], err) - } - - *obj = *out +func TestAccS3ObjectDataSource_checksumMode(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_object.test" + dataSourceName := "data.aws_s3_object.test" - return nil - } + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + PreventPostDestroyRefresh: true, + Steps: []resource.TestStep{ + { + Config: testAccObjectDataSourceConfig_checksumMode(rName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(dataSourceName, "checksum_mode", "ENABLED"), + resource.TestCheckResourceAttrPair(dataSourceName, "checksum_crc32", resourceName, "checksum_crc32"), + resource.TestCheckResourceAttrPair(dataSourceName, "checksum_crc32c", resourceName, "checksum_crc32c"), + resource.TestCheckResourceAttrPair(dataSourceName, "checksum_sha1", resourceName, "checksum_sha1"), + resource.TestCheckResourceAttrPair(dataSourceName, "checksum_sha256", resourceName, "checksum_sha256"), + resource.TestCheckResourceAttrSet(dataSourceName, "checksum_sha256"), + ), + }, + }, + }) } -func testAccObjectDataSourceConfig_basic(randInt int) string { +func testAccObjectDataSourceConfig_basic(rName string) string { return fmt.Sprintf(` -resource "aws_s3_bucket" "object_bucket" { - bucket = "tf-object-test-bucket-%[1]d" +resource "aws_s3_bucket" "test" { + bucket = %[1]q } -resource "aws_s3_object" "object" { - bucket = aws_s3_bucket.object_bucket.bucket - key = "tf-testing-obj-%[1]d" +resource "aws_s3_object" "test" { + bucket = aws_s3_bucket.test.bucket + key = "%[1]s-key" content = "Hello World" } -data "aws_s3_object" "obj" { - bucket = aws_s3_bucket.object_bucket.bucket - key = aws_s3_object.object.key +data "aws_s3_object" "test" { + bucket = aws_s3_bucket.test.bucket + key = aws_s3_object.test.key } -`, randInt) +`, rName) } func testAccObjectDataSourceConfig_basicViaAccessPoint(rName string) string { @@ -503,7 +433,7 @@ resource "aws_s3_access_point" "test" { resource "aws_s3_object" "test" { bucket = aws_s3_bucket.test.bucket - key = %[1]q + key = "%[1]s-key" content = "Hello World" } @@ -514,98 +444,98 @@ data "aws_s3_object" "test" { `, rName) } -func testAccObjectDataSourceConfig_readableBody(randInt int) string { +func testAccObjectDataSourceConfig_readableBody(rName string) string { return fmt.Sprintf(` -resource "aws_s3_bucket" "object_bucket" { - bucket = "tf-object-test-bucket-%[1]d" +resource "aws_s3_bucket" "test" { + bucket = %[1]q } -resource "aws_s3_object" "object" { - bucket = aws_s3_bucket.object_bucket.bucket - key = "tf-testing-obj-%[1]d-readable" +resource "aws_s3_object" "test" { + bucket = aws_s3_bucket.test.bucket + key = "%[1]s-key" content = "yes" content_type = "text/plain" } -data "aws_s3_object" "obj" { - bucket = aws_s3_bucket.object_bucket.bucket - key = aws_s3_object.object.key +data "aws_s3_object" "test" { + bucket = aws_s3_bucket.test.bucket + key = aws_s3_object.test.key } -`, randInt) +`, rName) } -func testAccObjectDataSourceConfig_kmsEncrypted(randInt int) string { +func testAccObjectDataSourceConfig_kmsEncrypted(rName string) string { return fmt.Sprintf(` -resource "aws_s3_bucket" "object_bucket" { - bucket = "tf-object-test-bucket-%[1]d" +resource "aws_s3_bucket" "test" { + bucket = %[1]q } -resource "aws_kms_key" "example" { - description = "TF Acceptance Test KMS key" +resource "aws_kms_key" "test" { + description = %[1]q deletion_window_in_days = 7 } -resource "aws_s3_object" "object" { - bucket = aws_s3_bucket.object_bucket.bucket - key = "tf-testing-obj-%[1]d-encrypted" +resource "aws_s3_object" "test" { + bucket = aws_s3_bucket.test.bucket + key = "%[1]s-key" content = "Keep Calm and Carry On" content_type = "text/plain" - kms_key_id = aws_kms_key.example.arn + kms_key_id = aws_kms_key.test.arn } -data "aws_s3_object" "obj" { - bucket = aws_s3_bucket.object_bucket.bucket - key = aws_s3_object.object.key +data "aws_s3_object" "test" { + bucket = aws_s3_bucket.test.bucket + key = aws_s3_object.test.key } -`, randInt) +`, rName) } -func testAccObjectDataSourceConfig_bucketKeyEnabled(randInt int) string { +func testAccObjectDataSourceConfig_bucketKeyEnabled(rName string) string { return fmt.Sprintf(` -resource "aws_s3_bucket" "object_bucket" { - bucket = "tf-object-test-bucket-%[1]d" +resource "aws_s3_bucket" "test" { + bucket = %[1]q } -resource "aws_kms_key" "example" { - description = "TF Acceptance Test KMS key" +resource "aws_kms_key" "test" { + description = %[1]q deletion_window_in_days = 7 } -resource "aws_s3_object" "object" { - bucket = aws_s3_bucket.object_bucket.bucket - key = "tf-testing-obj-%[1]d-encrypted" +resource "aws_s3_object" "test" { + bucket = aws_s3_bucket.test.bucket + key = "%[1]s-key" content = "Keep Calm and Carry On" content_type = "text/plain" - kms_key_id = aws_kms_key.example.arn + kms_key_id = aws_kms_key.test.arn bucket_key_enabled = true } -data "aws_s3_object" "obj" { - bucket = aws_s3_bucket.object_bucket.bucket - key = aws_s3_object.object.key +data "aws_s3_object" "test" { + bucket = aws_s3_bucket.test.bucket + key = aws_s3_object.test.key } -`, randInt) +`, rName) } -func testAccObjectDataSourceConfig_allParams(randInt int) string { +func testAccObjectDataSourceConfig_allParams(rName string) string { return fmt.Sprintf(` -resource "aws_s3_bucket" "object_bucket" { - bucket = "tf-object-test-bucket-%[1]d" +resource "aws_s3_bucket" "test" { + bucket = %[1]q } -resource "aws_s3_bucket_versioning" "object_bucket" { - bucket = aws_s3_bucket.object_bucket.id +resource "aws_s3_bucket_versioning" "test" { + bucket = aws_s3_bucket.test.id versioning_configuration { status = "Enabled" } } -resource "aws_s3_object" "object" { +resource "aws_s3_object" "test" { # Must have bucket versioning enabled first - depends_on = [aws_s3_bucket_versioning.object_bucket] + depends_on = [aws_s3_bucket_versioning.test] - bucket = aws_s3_bucket.object_bucket.bucket - key = "tf-testing-obj-%[1]d-all-params" + bucket = aws_s3_bucket.test.bucket + key = "%[1]s-key" content = < 0 { + input := &s3_sdkv2.PutObjectTaggingInput{ + Bucket: aws_sdkv2.String(bucket), + Key: aws_sdkv2.String(key), + Tagging: &s3types_sdkv2.Tagging{ + TagSet: tagsV2(newTags.Merge(ignoredTags)), + }, + } + + _, err := conn.PutObjectTagging(ctx, input) + + if err != nil { + return fmt.Errorf("setting resource tags (%s/%s): %w", bucket, key, err) + } + } else if len(oldTags) > 0 && len(ignoredTags) == 0 { + input := &s3_sdkv2.DeleteObjectTaggingInput{ + Bucket: aws_sdkv2.String(bucket), + Key: aws_sdkv2.String(key), + } + + _, err := conn.DeleteObjectTagging(ctx, input) + + if err != nil { + return fmt.Errorf("deleting resource tags (%s/%s): %w", bucket, key, err) + } + } + + return nil } // ObjectListTagsV1 lists S3 object tags (AWS SDK for Go v1). @@ -135,8 +177,8 @@ func ObjectListTagsV1(ctx context.Context, conn s3iface_sdkv1.S3API, bucket, key return KeyValueTags(ctx, outputRaw.(*s3_sdkv1.GetObjectTaggingOutput).TagSet), nil } -// ObjectUpdateTags updates S3 object tags. -func ObjectUpdateTags(ctx context.Context, conn s3iface_sdkv1.S3API, bucket, key string, oldTagsMap, newTagsMap any) error { +// ObjectUpdateTagsV1 updates S3 object tags (AWS SDK for Go v1). +func ObjectUpdateTagsV1(ctx context.Context, conn s3iface_sdkv1.S3API, bucket, key string, oldTagsMap, newTagsMap any) error { oldTags := tftags.New(ctx, oldTagsMap) newTags := tftags.New(ctx, newTagsMap) diff --git a/website/docs/d/s3_object.html.markdown b/website/docs/d/s3_object.html.markdown index c5d04996880..e692408ea86 100644 --- a/website/docs/d/s3_object.html.markdown +++ b/website/docs/d/s3_object.html.markdown @@ -58,6 +58,7 @@ resource "aws_lambda_function" "test_lambda" { This data source supports the following arguments: * `bucket` - (Required) Name of the bucket to read the object from. Alternatively, an [S3 access point](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) ARN can be specified +* `checksum_mode` - (Optional) To retrieve the object's checksum, this argument must be `ENABLED`. If you enable `checksum_mode` and the object is encrypted with KMS, you must have permission to use the `kms:Decrypt` action. Valid values: `ENABLED` * `key` - (Required) Full path to the object inside the bucket * `version_id` - (Optional) Specific version ID of the object returned (defaults to latest version) @@ -68,6 +69,10 @@ This data source exports the following attributes in addition to the arguments a * `body` - Object data (see **limitations above** to understand cases in which this field is actually available) * `bucket_key_enabled` - (Optional) Whether or not to use [Amazon S3 Bucket Keys](https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) for SSE-KMS. * `cache_control` - Caching behavior along the request/reply chain. +* `checksum_crc32` - The base64-encoded, 32-bit CRC32 checksum of the object. +* `checksum_crc32c` - The base64-encoded, 32-bit CRC32C checksum of the object. +* `checksum_sha1` - The base64-encoded, 160-bit SHA-1 digest of the object. +* `checksum_sha256` - The base64-encoded, 256-bit SHA-256 digest of the object. * `content_disposition` - Presentational information for the object. * `content_encoding` - What content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. * `content_language` - Language the content is in. diff --git a/website/docs/r/s3_object.html.markdown b/website/docs/r/s3_object.html.markdown index fe982bc9e11..9bb45d8f794 100644 --- a/website/docs/r/s3_object.html.markdown +++ b/website/docs/r/s3_object.html.markdown @@ -143,6 +143,7 @@ The following arguments are optional: * `acl` - (Optional) [Canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl) to apply. Valid values are `private`, `public-read`, `public-read-write`, `aws-exec-read`, `authenticated-read`, `bucket-owner-read`, and `bucket-owner-full-control`. * `bucket_key_enabled` - (Optional) Whether or not to use [Amazon S3 Bucket Keys](https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) for SSE-KMS. * `cache_control` - (Optional) Caching behavior along the request/reply chain Read [w3c cache_control](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9) for further details. +* `checksum_algorithm` - (Optional) Indicates the algorithm used to create the checksum for the object. If a value is specified and the object is encrypted with KMS, you must have permission to use the `kms:Decrypt` action. Valid values: `CRC32`, `CRC32C`, `SHA1`, `SHA256`. * `content_base64` - (Optional, conflicts with `source` and `content`) Base64-encoded data that will be decoded and uploaded as raw bytes for the object content. This allows safely uploading non-UTF8 binary data, but is recommended only for small content such as the result of the `gzipbase64` function with small text strings. For larger objects, use `source` to stream the content from a disk file. * `content_disposition` - (Optional) Presentational information for the object. Read [w3c content_disposition](http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1) for further information. * `content_encoding` - (Optional) Content encodings that have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. Read [w3c content encoding](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11) for further information. @@ -171,6 +172,10 @@ If no content is provided through `source`, `content` or `content_base64`, then This resource exports the following attributes in addition to the arguments above: +* `checksum_crc32` - The base64-encoded, 32-bit CRC32 checksum of the object. +* `checksum_crc32c` - The base64-encoded, 32-bit CRC32C checksum of the object. +* `checksum_sha1` - The base64-encoded, 160-bit SHA-1 digest of the object. +* `checksum_sha256` - The base64-encoded, 256-bit SHA-256 digest of the object. * `etag` - ETag generated for the object (an MD5 sum of the object content). For plaintext objects or objects encrypted with an AWS-managed key, the hash is an MD5 digest of the object data. For objects encrypted with a KMS key or objects created by either the Multipart Upload or Part Copy operation, the hash is not an MD5 digest, regardless of the method of encryption. More information on possible values can be found on [Common Response Headers](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonResponseHeaders.html). * `id` - `key` of the resource supplied above * `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). diff --git a/website/docs/r/s3_object_copy.html.markdown b/website/docs/r/s3_object_copy.html.markdown index 6944ac3f48b..bb89f797585 100644 --- a/website/docs/r/s3_object_copy.html.markdown +++ b/website/docs/r/s3_object_copy.html.markdown @@ -38,6 +38,7 @@ The following arguments are optional: * `acl` - (Optional) [Canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl) to apply. Valid values are `private`, `public-read`, `public-read-write`, `authenticated-read`, `aws-exec-read`, `bucket-owner-read`, and `bucket-owner-full-control`. Conflicts with `grant`. * `cache_control` - (Optional) Specifies caching behavior along the request/reply chain Read [w3c cache_control](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9) for further details. +* `checksum_algorithm` - (Optional) Indicates the algorithm used to create the checksum for the object. If a value is specified and the object is encrypted with KMS, you must have permission to use the `kms:Decrypt` action. Valid values: `CRC32`, `CRC32C`, `SHA1`, `SHA256`. * `content_disposition` - (Optional) Specifies presentational information for the object. Read [w3c content_disposition](http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1) for further information. * `content_encoding` - (Optional) Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. Read [w3c content encoding](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11) for further information. * `content_language` - (Optional) Language the content is in e.g., en-US or en-GB. @@ -92,6 +93,10 @@ This configuration block has the following optional arguments (one of the three This resource exports the following attributes in addition to the arguments above: +* `checksum_crc32` - The base64-encoded, 32-bit CRC32 checksum of the object. +* `checksum_crc32c` - The base64-encoded, 32-bit CRC32C checksum of the object. +* `checksum_sha1` - The base64-encoded, 160-bit SHA-1 digest of the object. +* `checksum_sha256` - The base64-encoded, 256-bit SHA-256 digest of the object. * `etag` - ETag generated for the object (an MD5 sum of the object content). For plaintext objects or objects encrypted with an AWS-managed key, the hash is an MD5 digest of the object data. For objects encrypted with a KMS key or objects created by either the Multipart Upload or Part Copy operation, the hash is not an MD5 digest, regardless of the method of encryption. More information on possible values can be found on [Common Response Headers](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonResponseHeaders.html). * `expiration` - If the object expiration is configured, this attribute will be set. * `id` - The `key` of the resource supplied above.