Skip to content

Commit

Permalink
r/s3_bucket: deprecate 'logging'
Browse files Browse the repository at this point in the history
  • Loading branch information
anGie44 committed Jan 27, 2022
1 parent 396c6aa commit 47cac55
Show file tree
Hide file tree
Showing 3 changed files with 34 additions and 178 deletions.
96 changes: 29 additions & 67 deletions internal/service/s3/bucket.go
Original file line number Diff line number Diff line change
Expand Up @@ -244,26 +244,22 @@ func ResourceBucket() *schema.Resource {

"logging": {
Type: schema.TypeSet,
Optional: true,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"target_bucket": {
Type: schema.TypeString,
Required: true,
Type: schema.TypeString,
Computed: true,
Deprecated: "Use the aws_s3_bucket_logging resource instead",
},
"target_prefix": {
Type: schema.TypeString,
Optional: true,
Type: schema.TypeString,
Computed: true,
Deprecated: "Use the aws_s3_bucket_logging resource instead",
},
},
},
Set: func(v interface{}) int {
var buf bytes.Buffer
m := v.(map[string]interface{})
buf.WriteString(fmt.Sprintf("%s-", m["target_bucket"]))
buf.WriteString(fmt.Sprintf("%s-", m["target_prefix"]))
return create.StringHashcode(buf.String())
},
Deprecated: "Use the aws_s3_bucket_logging resource instead",
},

"lifecycle_rule": {
Expand Down Expand Up @@ -805,12 +801,6 @@ func resourceBucketUpdate(d *schema.ResourceData, meta interface{}) error {
}
}

if d.HasChange("logging") {
if err := resourceBucketLoggingUpdate(conn, d); err != nil {
return err
}
}

if d.HasChange("lifecycle_rule") {
if err := resourceBucketLifecycleUpdate(conn, d); err != nil {
return err
Expand Down Expand Up @@ -1109,7 +1099,7 @@ func resourceBucketRead(d *schema.ResourceData, meta interface{}) error {
d.Set("request_payer", payer.Payer)
}

// Read the logging configuration
// Read the logging configuration if configured outside this resource
loggingResponse, err := verify.RetryOnAWSCode(s3.ErrCodeNoSuchBucket, func() (interface{}, error) {
return conn.GetBucketLogging(&s3.GetBucketLoggingInput{
Bucket: aws.String(d.Id()),
Expand All @@ -1120,20 +1110,10 @@ func resourceBucketRead(d *schema.ResourceData, meta interface{}) error {
return fmt.Errorf("error getting S3 Bucket logging: %s", err)
}

lcl := make([]map[string]interface{}, 0, 1)
if logging, ok := loggingResponse.(*s3.GetBucketLoggingOutput); ok && logging.LoggingEnabled != nil {
v := logging.LoggingEnabled
lc := make(map[string]interface{})
if aws.StringValue(v.TargetBucket) != "" {
lc["target_bucket"] = aws.StringValue(v.TargetBucket)
if logging, ok := loggingResponse.(*s3.GetBucketLoggingOutput); ok {
if err := d.Set("logging", flattenBucketLoggingEnabled(logging.LoggingEnabled)); err != nil {
return fmt.Errorf("error setting logging: %s", err)
}
if aws.StringValue(v.TargetPrefix) != "" {
lc["target_prefix"] = aws.StringValue(v.TargetPrefix)
}
lcl = append(lcl, lc)
}
if err := d.Set("logging", lcl); err != nil {
return fmt.Errorf("error setting logging: %s", err)
}

// Read the lifecycle configuration
Expand Down Expand Up @@ -1868,41 +1848,6 @@ func resourceBucketInternalVersioningUpdate(conn *s3.S3, bucket string, versioni
return nil
}

func resourceBucketLoggingUpdate(conn *s3.S3, d *schema.ResourceData) error {
logging := d.Get("logging").(*schema.Set).List()
bucket := d.Get("bucket").(string)
loggingStatus := &s3.BucketLoggingStatus{}

if len(logging) > 0 {
c := logging[0].(map[string]interface{})

loggingEnabled := &s3.LoggingEnabled{}
if val, ok := c["target_bucket"]; ok {
loggingEnabled.TargetBucket = aws.String(val.(string))
}
if val, ok := c["target_prefix"]; ok {
loggingEnabled.TargetPrefix = aws.String(val.(string))
}

loggingStatus.LoggingEnabled = loggingEnabled
}

i := &s3.PutBucketLoggingInput{
Bucket: aws.String(bucket),
BucketLoggingStatus: loggingStatus,
}
log.Printf("[DEBUG] S3 put bucket logging: %#v", i)

_, err := verify.RetryOnAWSCode(s3.ErrCodeNoSuchBucket, func() (interface{}, error) {
return conn.PutBucketLogging(i)
})
if err != nil {
return fmt.Errorf("Error putting S3 logging: %s", err)
}

return nil
}

func resourceBucketAccelerationUpdate(conn *s3.S3, d *schema.ResourceData) error {
bucket := d.Get("bucket").(string)
enableAcceleration := d.Get("acceleration_status").(string)
Expand Down Expand Up @@ -2371,6 +2316,23 @@ func resourceBucketLifecycleUpdate(conn *s3.S3, d *schema.ResourceData) error {
return nil
}

func flattenBucketLoggingEnabled(loggingEnabled *s3.LoggingEnabled) []interface{} {
if loggingEnabled == nil {
return []interface{}{}
}

m := make(map[string]interface{})

if loggingEnabled.TargetBucket != nil {
m["target_bucket"] = aws.StringValue(loggingEnabled.TargetBucket)
}
if loggingEnabled.TargetPrefix != nil {
m["target_prefix"] = aws.StringValue(loggingEnabled.TargetPrefix)
}

return []interface{}{m}
}

func flattenServerSideEncryptionConfiguration(c *s3.ServerSideEncryptionConfiguration) []map[string]interface{} {
var encryptionConfiguration []map[string]interface{}
rules := make([]interface{}, 0, len(c.Rules))
Expand Down
89 changes: 0 additions & 89 deletions internal/service/s3/bucket_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1225,33 +1225,6 @@ func TestAccS3Bucket_Security_corsEmptyOrigin(t *testing.T) {
})
}

func TestAccS3Bucket_Security_logging(t *testing.T) {
bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket")
resourceName := "aws_s3_bucket.bucket"

resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { acctest.PreCheck(t) },
ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID),
Providers: acctest.Providers,
CheckDestroy: testAccCheckBucketDestroy,
Steps: []resource.TestStep{
{
Config: testAccBucketWithLoggingConfig(bucketName),
Check: resource.ComposeTestCheckFunc(
testAccCheckBucketExists(resourceName),
testAccCheckBucketLogging(resourceName, "aws_s3_bucket.log_bucket", "log/"),
),
},
{
ResourceName: resourceName,
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"force_destroy", "acl"},
},
},
})
}

func TestAccS3Bucket_Manage_lifecycleBasic(t *testing.T) {
bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket")
resourceName := "aws_s3_bucket.bucket"
Expand Down Expand Up @@ -3278,49 +3251,6 @@ func testAccCheckRequestPayer(n, expectedPayer string) resource.TestCheckFunc {
}
}

func testAccCheckBucketLogging(n, b, p string) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs := s.RootModule().Resources[n]
conn := acctest.Provider.Meta().(*conns.AWSClient).S3Conn

out, err := conn.GetBucketLogging(&s3.GetBucketLoggingInput{
Bucket: aws.String(rs.Primary.ID),
})

if err != nil {
return fmt.Errorf("GetBucketLogging error: %v", err)
}

if out.LoggingEnabled == nil {
return fmt.Errorf("logging not enabled for bucket: %s", rs.Primary.ID)
}

tb := s.RootModule().Resources[b]

if v := out.LoggingEnabled.TargetBucket; v == nil {
if tb.Primary.ID != "" {
return fmt.Errorf("bad target bucket, found nil, expected: %s", tb.Primary.ID)
}
} else {
if *v != tb.Primary.ID {
return fmt.Errorf("bad target bucket, expected: %s, got %s", tb.Primary.ID, *v)
}
}

if v := out.LoggingEnabled.TargetPrefix; v == nil {
if p != "" {
return fmt.Errorf("bad target prefix, found nil, expected: %s", p)
}
} else {
if *v != p {
return fmt.Errorf("bad target prefix, expected: %s, got %s", p, *v)
}
}

return nil
}
}

func testAccCheckBucketReplicationRules(n string, rules []*s3.ReplicationRule) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs := s.RootModule().Resources[n]
Expand Down Expand Up @@ -3931,25 +3861,6 @@ resource "aws_s3_bucket" "bucket" {
`, bucketName)
}

func testAccBucketWithLoggingConfig(bucketName string) string {
return fmt.Sprintf(`
resource "aws_s3_bucket" "log_bucket" {
bucket = "%[1]s-log"
acl = "log-delivery-write"
}
resource "aws_s3_bucket" "bucket" {
bucket = %[1]q
acl = "private"
logging {
target_bucket = aws_s3_bucket.log_bucket.id
target_prefix = "log/"
}
}
`, bucketName)
}

func testAccBucketWithLifecycleConfig(bucketName string) string {
return fmt.Sprintf(`
resource "aws_s3_bucket" "bucket" {
Expand Down
27 changes: 5 additions & 22 deletions website/docs/r/s3_bucket.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -86,22 +86,8 @@ resource "aws_s3_bucket" "b" {

### Enable Logging

```terraform
resource "aws_s3_bucket" "log_bucket" {
bucket = "my-tf-log-bucket"
acl = "log-delivery-write"
}
resource "aws_s3_bucket" "b" {
bucket = "my-tf-test-bucket"
acl = "private"
logging {
target_bucket = aws_s3_bucket.log_bucket.id
target_prefix = "log/"
}
}
```
The `logging` argument is read-only as of version 4.0.
See the [`aws_s3_bucket_logging` resource](s3_bucket_logging.html.markdown) for configuration details.

### Using object lifecycle

Expand Down Expand Up @@ -362,7 +348,6 @@ The following arguments are supported:
* `website` - (Optional) A website object (documented below).
* `cors_rule` - (Optional) A rule of [Cross-Origin Resource Sharing](https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) (documented below).
* `versioning` - (Optional) A state of [versioning](https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html) (documented below)
* `logging` - (Optional) A settings of [bucket logging](https://docs.aws.amazon.com/AmazonS3/latest/UG/ManagingBucketLogging.html) (documented below).
* `lifecycle_rule` - (Optional) A configuration of [object lifecycle management](http://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) (documented below).
* `acceleration_status` - (Optional) Sets the accelerate configuration of an existing bucket. Can be `Enabled` or `Suspended`.
* `request_payer` - (Optional) Specifies who should bear the cost of Amazon S3 data transfer.
Expand Down Expand Up @@ -396,11 +381,6 @@ The `versioning` object supports the following:
* `enabled` - (Optional) Enable versioning. Once you version-enable a bucket, it can never return to an unversioned state. You can, however, suspend versioning on that bucket.
* `mfa_delete` - (Optional) Enable MFA delete for either `Change the versioning state of your bucket` or `Permanently delete an object version`. Default is `false`. This cannot be used to toggle this setting but is available to allow managed buckets to reflect the state in AWS

The `logging` object supports the following:

* `target_bucket` - (Required) The name of the bucket that will receive the log objects.
* `target_prefix` - (Optional) To specify a key prefix for log objects.

The `lifecycle_rule` object supports the following:

* `id` - (Optional) Unique identifier for the rule. Must be less than or equal to 255 characters in length.
Expand Down Expand Up @@ -559,6 +539,9 @@ In addition to all arguments above, the following attributes are exported:
* `bucket_domain_name` - The bucket domain name. Will be of format `bucketname.s3.amazonaws.com`.
* `bucket_regional_domain_name` - The bucket region-specific domain name. The bucket domain name including the region name, please refer [here](https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) for format. Note: The AWS CloudFront allows specifying S3 region-specific endpoint when creating S3 origin, it will prevent [redirect issues](https://forums.aws.amazon.com/thread.jspa?threadID=216814) from CloudFront to S3 Origin URL.
* `hosted_zone_id` - The [Route 53 Hosted Zone ID](https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_website_region_endpoints) for this bucket's region.
* `logging` - The [logging parameters](https://docs.aws.amazon.com/AmazonS3/latest/UG/ManagingBucketLogging.html) for the bucket.
* `target_bucket` - The name of the bucket that receives the log objects.
* `target_prefix` - The prefix for all log object keys/
* `region` - The AWS region this bucket resides in.
* `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block).
* `website_endpoint` - The website endpoint, if the bucket is configured with a website. If not, this will be an empty string.
Expand Down

0 comments on commit 47cac55

Please sign in to comment.