From f363930c4a1e945507a839f7724d007f65452203 Mon Sep 17 00:00:00 2001 From: megan07 Date: Wed, 24 Jul 2019 18:19:16 +0000 Subject: [PATCH] Add retention_policy to storage_bucket Signed-off-by: Modular Magician --- google/resource_storage_bucket.go | 150 +++++++++++++++++++- google/resource_storage_bucket_test.go | 124 ++++++++++++++++ website/docs/r/storage_bucket.html.markdown | 8 ++ 3 files changed, 279 insertions(+), 3 deletions(-) diff --git a/google/resource_storage_bucket.go b/google/resource_storage_bucket.go index f199ca8dad1..00d2bd1bf82 100644 --- a/google/resource_storage_bucket.go +++ b/google/resource_storage_bucket.go @@ -11,6 +11,7 @@ import ( "time" "github.com/gammazero/workerpool" + "github.com/hashicorp/terraform/helper/customdiff" "github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/schema" @@ -29,6 +30,8 @@ func resourceStorageBucket() *schema.Resource { Importer: &schema.ResourceImporter{ State: resourceStorageBucketStateImporter, }, + CustomizeDiff: customdiff.All( + customdiff.ForceNewIfChange("retention_policy.0.is_locked", isPolicyLocked)), Schema: map[string]*schema.Schema{ "name": { @@ -211,6 +214,26 @@ func resourceStorageBucket() *schema.Resource { }, }, + "retention_policy": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "is_locked": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "retention_period": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(1, 3155760000), + }, + }, + }, + }, + "cors": { Type: schema.TypeList, Optional: true, @@ -271,6 +294,20 @@ func resourceStorageBucket() *schema.Resource { } } +// Is the old bucket retention policy locked? +func isPolicyLocked(old, new, _ interface{}) bool { + if old == nil || new == nil { + return false + } + + // if the old policy is locked, but the new policy is not + if old.(bool) && !new.(bool) { + return true + } + + return false +} + func resourceStorageBucketCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) @@ -309,6 +346,18 @@ func resourceStorageBucketCreate(d *schema.ResourceData, meta interface{}) error sb.Website = expandBucketWebsite(v.([]interface{})) } + if v, ok := d.GetOk("retention_policy"); ok { + retention_policies := v.([]interface{}) + + sb.RetentionPolicy = &storage.BucketRetentionPolicy{} + + retentionPolicy := retention_policies[0].(map[string]interface{}) + + if v, ok := retentionPolicy["retention_period"]; ok { + sb.RetentionPolicy.RetentionPeriod = int64(v.(int)) + } + } + if v, ok := d.GetOk("cors"); ok { sb.Cors = expandCors(v.([]interface{})) } @@ -340,8 +389,25 @@ func resourceStorageBucketCreate(d *schema.ResourceData, meta interface{}) error } log.Printf("[DEBUG] Created bucket %v at location %v\n\n", res.Name, res.SelfLink) - d.SetId(res.Id) + + if v, ok := d.GetOk("retention_policy"); ok { + retention_policies := v.([]interface{}) + + sb.RetentionPolicy = &storage.BucketRetentionPolicy{} + + retentionPolicy := retention_policies[0].(map[string]interface{}) + + if locked, ok := retentionPolicy["is_locked"]; ok && locked.(bool) { + err = lockRetentionPolicy(config.clientStorage.Buckets, bucket, res.Metageneration) + if err != nil { + return err + } + + log.Printf("[DEBUG] Locked bucket %v at location %v\n\n", res.Name, res.SelfLink) + } + } + return resourceStorageBucketRead(d, meta) } @@ -376,6 +442,14 @@ func resourceStorageBucketUpdate(d *schema.ResourceData, meta interface{}) error sb.Website = expandBucketWebsite(d.Get("website")) } + if d.HasChange("retention_policy") { + if v, ok := d.GetOk("retention_policy"); ok { + sb.RetentionPolicy = expandBucketRetentionPolicy(v.([]interface{})) + } else { + sb.NullFields = append(sb.NullFields, "RetentionPolicy") + } + } + if v, ok := d.GetOk("cors"); ok { sb.Cors = expandCors(v.([]interface{})) } @@ -428,10 +502,28 @@ func resourceStorageBucketUpdate(d *schema.ResourceData, meta interface{}) error return err } - log.Printf("[DEBUG] Patched bucket %v at location %v\n\n", res.Name, res.SelfLink) - // Assign the bucket ID as the resource ID d.Set("self_link", res.SelfLink) + + if d.HasChange("retention_policy") { + if v, ok := d.GetOk("retention_policy"); ok { + retention_policies := v.([]interface{}) + + sb.RetentionPolicy = &storage.BucketRetentionPolicy{} + + retentionPolicy := retention_policies[0].(map[string]interface{}) + + if locked, ok := retentionPolicy["is_locked"]; ok && locked.(bool) { + err = lockRetentionPolicy(config.clientStorage.Buckets, d.Get("name").(string), res.Metageneration) + if err != nil { + return err + } + } + } + } + + log.Printf("[DEBUG] Patched bucket %v at location %v\n\n", res.Name, res.SelfLink) + d.SetId(res.Id) return nil @@ -481,6 +573,7 @@ func resourceStorageBucketRead(d *schema.ResourceData, meta interface{}) error { d.Set("versioning", flattenBucketVersioning(res.Versioning)) d.Set("lifecycle_rule", flattenBucketLifecycle(res.Lifecycle)) d.Set("labels", res.Labels) + d.Set("retention_policy", flattenBucketRetentionPolicy(res.RetentionPolicy)) d.Set("website", flattenBucketWebsite(res.Website)) if res.IamConfiguration != nil && res.IamConfiguration.BucketPolicyOnly != nil { @@ -513,6 +606,20 @@ func resourceStorageBucketDelete(d *schema.ResourceData, meta interface{}) error } if len(res.Items) != 0 { + if d.Get("retention_policy.0.is_locked").(bool) { + for _, item := range res.Items { + expiration, err := time.Parse(time.RFC3339, item.RetentionExpirationTime) + if err != nil { + return err + } + if expiration.After(time.Now()) { + deleteErr := errors.New("Bucket '" + d.Get("name").(string) + "' contains objects that have not met the retention period yet and cannot be deleted.") + log.Printf("Error! %s : %s\n\n", bucket, deleteErr) + return deleteErr + } + } + } + if d.Get("force_destroy").(bool) { // GCS requires that a bucket be empty (have no objects or object // versions) before it can be deleted. @@ -686,6 +793,34 @@ func flattenBucketLogging(bucketLogging *storage.BucketLogging) []map[string]int return loggings } +func expandBucketRetentionPolicy(configured interface{}) *storage.BucketRetentionPolicy { + retentionPolicies := configured.([]interface{}) + retentionPolicy := retentionPolicies[0].(map[string]interface{}) + + bucketRetentionPolicy := &storage.BucketRetentionPolicy{ + IsLocked: retentionPolicy["is_locked"].(bool), + RetentionPeriod: int64(retentionPolicy["retention_period"].(int)), + } + + return bucketRetentionPolicy +} + +func flattenBucketRetentionPolicy(bucketRetentionPolicy *storage.BucketRetentionPolicy) []map[string]interface{} { + bucketRetentionPolicies := make([]map[string]interface{}, 0, 1) + + if bucketRetentionPolicy == nil { + return bucketRetentionPolicies + } + + retentionPolicy := map[string]interface{}{ + "is_locked": bucketRetentionPolicy.IsLocked, + "retention_period": bucketRetentionPolicy.RetentionPeriod, + } + + bucketRetentionPolicies = append(bucketRetentionPolicies, retentionPolicy) + return bucketRetentionPolicies +} + func expandBucketVersioning(configured interface{}) *storage.BucketVersioning { versionings := configured.([]interface{}) versioning := versionings[0].(map[string]interface{}) @@ -1028,3 +1163,12 @@ func resourceGCSBucketLifecycleRuleConditionHash(v interface{}) int { return hashcode.String(buf.String()) } + +func lockRetentionPolicy(bucketsService *storage.BucketsService, bucketName string, metageneration int64) error { + lockPolicyCall := bucketsService.LockRetentionPolicy(bucketName, metageneration) + if _, err := lockPolicyCall.Do(); err != nil { + return err + } + + return nil +} diff --git a/google/resource_storage_bucket_test.go b/google/resource_storage_bucket_test.go index bdfe351b781..bc613189a42 100644 --- a/google/resource_storage_bucket_test.go +++ b/google/resource_storage_bucket_test.go @@ -5,6 +5,7 @@ import ( "fmt" "log" "testing" + "time" "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" @@ -836,6 +837,34 @@ func TestAccStorageBucket_labels(t *testing.T) { }) } +func TestAccStorageBucket_retentionPolicy(t *testing.T) { + t.Parallel() + + var bucket storage.Bucket + bucketName := fmt.Sprintf("tf-test-acc-bucket-%d", acctest.RandInt()) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccStorageBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccStorageBucket_retentionPolicy(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + "google_storage_bucket.bucket", bucketName, &bucket), + testAccCheckStorageBucketRetentionPolicy(bucketName), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccStorageBucket_website(t *testing.T) { t.Parallel() @@ -858,6 +887,43 @@ func TestAccStorageBucket_website(t *testing.T) { }) } +func TestAccStorageBucket_retentionPolicyLocked(t *testing.T) { + t.Parallel() + + var bucket storage.Bucket + var newBucket storage.Bucket + bucketName := fmt.Sprintf("tf-test-acc-bucket-%d", acctest.RandInt()) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccStorageBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccStorageBucket_lockedRetentionPolicy(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + "google_storage_bucket.bucket", bucketName, &bucket), + testAccCheckStorageBucketRetentionPolicy(bucketName), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccStorageBucket_retentionPolicy(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + "google_storage_bucket.bucket", bucketName, &newBucket), + testAccCheckStorageBucketWasRecreated(&newBucket, &bucket), + ), + }, + }, + }) +} + func testAccCheckStorageBucketExists(n string, bucketName string, bucket *storage.Bucket) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -926,6 +992,39 @@ func testAccCheckStorageBucketPutItem(bucketName string) resource.TestCheckFunc } } +func testAccCheckStorageBucketRetentionPolicy(bucketName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + data := bytes.NewBufferString("test") + dataReader := bytes.NewReader(data.Bytes()) + object := &storage.Object{Name: "bucketDestroyTestFile"} + + // This needs to use Media(io.Reader) call, otherwise it does not go to /upload API and fails + if res, err := config.clientStorage.Objects.Insert(bucketName, object).Media(dataReader).Do(); err == nil { + log.Printf("[INFO] Created object %v at location %v\n\n", res.Name, res.SelfLink) + } else { + return fmt.Errorf("Objects.Insert failed: %v", err) + } + + // Test deleting immediately, this should fail because of the 10 second retention + if err := config.clientStorage.Objects.Delete(bucketName, objectName).Do(); err == nil { + return fmt.Errorf("Objects.Delete succeeded: %v", object.Name) + } + + // Wait 10 seconds and delete again + time.Sleep(10000 * time.Millisecond) + + if err := config.clientStorage.Objects.Delete(bucketName, object.Name).Do(); err == nil { + log.Printf("[INFO] Deleted object %v at location %v\n\n", object.Name, object.SelfLink) + } else { + return fmt.Errorf("Objects.Delete failed: %v", err) + } + + return nil + } +} + func testAccCheckStorageBucketMissing(bucketName string) resource.TestCheckFunc { return func(s *terraform.State) error { config := testAccProvider.Meta().(*Config) @@ -1363,6 +1462,31 @@ resource "google_storage_bucket" "bucket" { `, bucketName) } +func testAccStorageBucket_retentionPolicy(bucketName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + + retention_policy { + retention_period = 10 + } +} +`, bucketName) +} + +func testAccStorageBucket_lockedRetentionPolicy(bucketName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + + retention_policy { + is_locked = true + retention_period = 10 + } +} +`, bucketName) +} + func testAccStorageBucket_website(bucketName string) string { return fmt.Sprintf(` resource "google_storage_bucket" "website" { diff --git a/website/docs/r/storage_bucket.html.markdown b/website/docs/r/storage_bucket.html.markdown index b09ecc87937..c3309d744e4 100644 --- a/website/docs/r/storage_bucket.html.markdown +++ b/website/docs/r/storage_bucket.html.markdown @@ -65,6 +65,8 @@ The following arguments are supported: * `cors` - (Optional) The bucket's [Cross-Origin Resource Sharing (CORS)](https://www.w3.org/TR/cors/) configuration. Multiple blocks of this type are permitted. Structure is documented below. +* `retention_policy` - (Optional) Configuration of the bucket's data retention policy for how long objects in the bucket should be retained. Structure is documented below. + * `labels` - (Optional) A set of key/value label pairs to assign to the bucket. * `logging` - (Optional) The bucket's [Access & Storage Logs](https://cloud.google.com/storage/docs/access-logs) configuration. @@ -123,6 +125,12 @@ The `cors` block supports: * `max_age_seconds` - (Optional) The value, in seconds, to return in the [Access-Control-Max-Age header](https://www.w3.org/TR/cors/#access-control-max-age-response-header) used in preflight responses. +The `retention_policy` block supports: + +* `is_locked` - (Optional) If set to `true`, the bucket will be [locked](https://cloud.google.com/storage/docs/using-bucket-lock#lock-bucket) and permanently restrict edits to the bucket's retention policy. Caution: Locking a bucket is an irreversible action. + +* `retention_period` - (Optional) The period of time, in seconds, that objects in the bucket must be retained and cannot be deleted, overwritten, or archived. The value must be less than 3,155,760,000 seconds. + The `logging` block supports: * `log_bucket` - (Required) The bucket that will receive log objects.