Skip to content

Commit

Permalink
Add retention_policy to storage_bucket
Browse files Browse the repository at this point in the history
Signed-off-by: Modular Magician <[email protected]>
  • Loading branch information
megan07 authored and modular-magician committed Jul 24, 2019
1 parent c35a20b commit e20a35d
Show file tree
Hide file tree
Showing 3 changed files with 279 additions and 3 deletions.
150 changes: 147 additions & 3 deletions google/resource_storage_bucket.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ import (
"time"

"github.com/gammazero/workerpool"
"github.com/hashicorp/terraform/helper/customdiff"
"github.com/hashicorp/terraform/helper/hashcode"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
Expand All @@ -29,6 +30,8 @@ func resourceStorageBucket() *schema.Resource {
Importer: &schema.ResourceImporter{
State: resourceStorageBucketStateImporter,
},
CustomizeDiff: customdiff.All(
customdiff.ForceNewIfChange("retention_policy.0.is_locked", isPolicyLocked)),

Schema: map[string]*schema.Schema{
"name": {
Expand Down Expand Up @@ -211,6 +214,26 @@ func resourceStorageBucket() *schema.Resource {
},
},

"retention_policy": {
Type: schema.TypeList,
Optional: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"is_locked": {
Type: schema.TypeBool,
Optional: true,
Default: false,
},
"retention_period": {
Type: schema.TypeInt,
Required: true,
ValidateFunc: validation.IntBetween(1, 3155760000),
},
},
},
},

"cors": {
Type: schema.TypeList,
Optional: true,
Expand Down Expand Up @@ -271,6 +294,20 @@ func resourceStorageBucket() *schema.Resource {
}
}

// Is the old bucket retention policy locked?
func isPolicyLocked(old, new, _ interface{}) bool {
if old == nil || new == nil {
return false
}

// if the old policy is locked, but the new policy is not
if old.(bool) && !new.(bool) {
return true
}

return false
}

func resourceStorageBucketCreate(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)

Expand Down Expand Up @@ -309,6 +346,18 @@ func resourceStorageBucketCreate(d *schema.ResourceData, meta interface{}) error
sb.Website = expandBucketWebsite(v.([]interface{}))
}

if v, ok := d.GetOk("retention_policy"); ok {
retention_policies := v.([]interface{})

sb.RetentionPolicy = &storage.BucketRetentionPolicy{}

retentionPolicy := retention_policies[0].(map[string]interface{})

if v, ok := retentionPolicy["retention_period"]; ok {
sb.RetentionPolicy.RetentionPeriod = int64(v.(int))
}
}

if v, ok := d.GetOk("cors"); ok {
sb.Cors = expandCors(v.([]interface{}))
}
Expand Down Expand Up @@ -340,8 +389,25 @@ func resourceStorageBucketCreate(d *schema.ResourceData, meta interface{}) error
}

log.Printf("[DEBUG] Created bucket %v at location %v\n\n", res.Name, res.SelfLink)

d.SetId(res.Id)

if v, ok := d.GetOk("retention_policy"); ok {
retention_policies := v.([]interface{})

sb.RetentionPolicy = &storage.BucketRetentionPolicy{}

retentionPolicy := retention_policies[0].(map[string]interface{})

if locked, ok := retentionPolicy["is_locked"]; ok && locked.(bool) {
err = lockRetentionPolicy(config.clientStorage.Buckets, bucket, res.Metageneration)
if err != nil {
return err
}

log.Printf("[DEBUG] Locked bucket %v at location %v\n\n", res.Name, res.SelfLink)
}
}

return resourceStorageBucketRead(d, meta)
}

Expand Down Expand Up @@ -376,6 +442,14 @@ func resourceStorageBucketUpdate(d *schema.ResourceData, meta interface{}) error
sb.Website = expandBucketWebsite(d.Get("website"))
}

if d.HasChange("retention_policy") {
if v, ok := d.GetOk("retention_policy"); ok {
sb.RetentionPolicy = expandBucketRetentionPolicy(v.([]interface{}))
} else {
sb.NullFields = append(sb.NullFields, "RetentionPolicy")
}
}

if v, ok := d.GetOk("cors"); ok {
sb.Cors = expandCors(v.([]interface{}))
}
Expand Down Expand Up @@ -428,10 +502,28 @@ func resourceStorageBucketUpdate(d *schema.ResourceData, meta interface{}) error
return err
}

log.Printf("[DEBUG] Patched bucket %v at location %v\n\n", res.Name, res.SelfLink)

// Assign the bucket ID as the resource ID
d.Set("self_link", res.SelfLink)

if d.HasChange("retention_policy") {
if v, ok := d.GetOk("retention_policy"); ok {
retention_policies := v.([]interface{})

sb.RetentionPolicy = &storage.BucketRetentionPolicy{}

retentionPolicy := retention_policies[0].(map[string]interface{})

if locked, ok := retentionPolicy["is_locked"]; ok && locked.(bool) {
err = lockRetentionPolicy(config.clientStorage.Buckets, d.Get("name").(string), res.Metageneration)
if err != nil {
return err
}
}
}
}

log.Printf("[DEBUG] Patched bucket %v at location %v\n\n", res.Name, res.SelfLink)

d.SetId(res.Id)

return nil
Expand Down Expand Up @@ -481,6 +573,7 @@ func resourceStorageBucketRead(d *schema.ResourceData, meta interface{}) error {
d.Set("versioning", flattenBucketVersioning(res.Versioning))
d.Set("lifecycle_rule", flattenBucketLifecycle(res.Lifecycle))
d.Set("labels", res.Labels)
d.Set("retention_policy", flattenBucketRetentionPolicy(res.RetentionPolicy))
d.Set("website", flattenBucketWebsite(res.Website))

if res.IamConfiguration != nil && res.IamConfiguration.BucketPolicyOnly != nil {
Expand Down Expand Up @@ -513,6 +606,20 @@ func resourceStorageBucketDelete(d *schema.ResourceData, meta interface{}) error
}

if len(res.Items) != 0 {
if d.Get("retention_policy.0.is_locked").(bool) {
for _, item := range res.Items {
expiration, err := time.Parse(time.RFC3339, item.RetentionExpirationTime)
if err != nil {
return err
}
if expiration.After(time.Now()) {
deleteErr := errors.New("Bucket '" + d.Get("name").(string) + "' contains objects that have not met the retention period yet and cannot be deleted.")
log.Printf("Error! %s : %s\n\n", bucket, deleteErr)
return deleteErr
}
}
}

if d.Get("force_destroy").(bool) {
// GCS requires that a bucket be empty (have no objects or object
// versions) before it can be deleted.
Expand Down Expand Up @@ -686,6 +793,34 @@ func flattenBucketLogging(bucketLogging *storage.BucketLogging) []map[string]int
return loggings
}

func expandBucketRetentionPolicy(configured interface{}) *storage.BucketRetentionPolicy {
retentionPolicies := configured.([]interface{})
retentionPolicy := retentionPolicies[0].(map[string]interface{})

bucketRetentionPolicy := &storage.BucketRetentionPolicy{
IsLocked: retentionPolicy["is_locked"].(bool),
RetentionPeriod: int64(retentionPolicy["retention_period"].(int)),
}

return bucketRetentionPolicy
}

func flattenBucketRetentionPolicy(bucketRetentionPolicy *storage.BucketRetentionPolicy) []map[string]interface{} {
bucketRetentionPolicies := make([]map[string]interface{}, 0, 1)

if bucketRetentionPolicy == nil {
return bucketRetentionPolicies
}

retentionPolicy := map[string]interface{}{
"is_locked": bucketRetentionPolicy.IsLocked,
"retention_period": bucketRetentionPolicy.RetentionPeriod,
}

bucketRetentionPolicies = append(bucketRetentionPolicies, retentionPolicy)
return bucketRetentionPolicies
}

func expandBucketVersioning(configured interface{}) *storage.BucketVersioning {
versionings := configured.([]interface{})
versioning := versionings[0].(map[string]interface{})
Expand Down Expand Up @@ -1028,3 +1163,12 @@ func resourceGCSBucketLifecycleRuleConditionHash(v interface{}) int {

return hashcode.String(buf.String())
}

func lockRetentionPolicy(bucketsService *storage.BucketsService, bucketName string, metageneration int64) error {
lockPolicyCall := bucketsService.LockRetentionPolicy(bucketName, metageneration)
if _, err := lockPolicyCall.Do(); err != nil {
return err
}

return nil
}
124 changes: 124 additions & 0 deletions google/resource_storage_bucket_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ import (
"fmt"
"log"
"testing"
"time"

"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
Expand Down Expand Up @@ -836,6 +837,34 @@ func TestAccStorageBucket_labels(t *testing.T) {
})
}

func TestAccStorageBucket_retentionPolicy(t *testing.T) {
t.Parallel()

var bucket storage.Bucket
bucketName := fmt.Sprintf("tf-test-acc-bucket-%d", acctest.RandInt())

resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccStorageBucketDestroy,
Steps: []resource.TestStep{
{
Config: testAccStorageBucket_retentionPolicy(bucketName),
Check: resource.ComposeTestCheckFunc(
testAccCheckStorageBucketExists(
"google_storage_bucket.bucket", bucketName, &bucket),
testAccCheckStorageBucketRetentionPolicy(bucketName),
),
},
{
ResourceName: "google_storage_bucket.bucket",
ImportState: true,
ImportStateVerify: true,
},
},
})
}

func TestAccStorageBucket_website(t *testing.T) {
t.Parallel()

Expand All @@ -858,6 +887,43 @@ func TestAccStorageBucket_website(t *testing.T) {
})
}

func TestAccStorageBucket_retentionPolicyLocked(t *testing.T) {
t.Parallel()

var bucket storage.Bucket
var newBucket storage.Bucket
bucketName := fmt.Sprintf("tf-test-acc-bucket-%d", acctest.RandInt())

resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccStorageBucketDestroy,
Steps: []resource.TestStep{
{
Config: testAccStorageBucket_lockedRetentionPolicy(bucketName),
Check: resource.ComposeTestCheckFunc(
testAccCheckStorageBucketExists(
"google_storage_bucket.bucket", bucketName, &bucket),
testAccCheckStorageBucketRetentionPolicy(bucketName),
),
},
{
ResourceName: "google_storage_bucket.bucket",
ImportState: true,
ImportStateVerify: true,
},
{
Config: testAccStorageBucket_retentionPolicy(bucketName),
Check: resource.ComposeTestCheckFunc(
testAccCheckStorageBucketExists(
"google_storage_bucket.bucket", bucketName, &newBucket),
testAccCheckStorageBucketWasRecreated(&newBucket, &bucket),
),
},
},
})
}

func testAccCheckStorageBucketExists(n string, bucketName string, bucket *storage.Bucket) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
Expand Down Expand Up @@ -926,6 +992,39 @@ func testAccCheckStorageBucketPutItem(bucketName string) resource.TestCheckFunc
}
}

func testAccCheckStorageBucketRetentionPolicy(bucketName string) resource.TestCheckFunc {
return func(s *terraform.State) error {
config := testAccProvider.Meta().(*Config)

data := bytes.NewBufferString("test")
dataReader := bytes.NewReader(data.Bytes())
object := &storage.Object{Name: "bucketDestroyTestFile"}

// This needs to use Media(io.Reader) call, otherwise it does not go to /upload API and fails
if res, err := config.clientStorage.Objects.Insert(bucketName, object).Media(dataReader).Do(); err == nil {
log.Printf("[INFO] Created object %v at location %v\n\n", res.Name, res.SelfLink)
} else {
return fmt.Errorf("Objects.Insert failed: %v", err)
}

// Test deleting immediately, this should fail because of the 10 second retention
if err := config.clientStorage.Objects.Delete(bucketName, objectName).Do(); err == nil {
return fmt.Errorf("Objects.Delete succeeded: %v", object.Name)
}

// Wait 10 seconds and delete again
time.Sleep(10000 * time.Millisecond)

if err := config.clientStorage.Objects.Delete(bucketName, object.Name).Do(); err == nil {
log.Printf("[INFO] Deleted object %v at location %v\n\n", object.Name, object.SelfLink)
} else {
return fmt.Errorf("Objects.Delete failed: %v", err)
}

return nil
}
}

func testAccCheckStorageBucketMissing(bucketName string) resource.TestCheckFunc {
return func(s *terraform.State) error {
config := testAccProvider.Meta().(*Config)
Expand Down Expand Up @@ -1363,6 +1462,31 @@ resource "google_storage_bucket" "bucket" {
`, bucketName)
}

func testAccStorageBucket_retentionPolicy(bucketName string) string {
return fmt.Sprintf(`
resource "google_storage_bucket" "bucket" {
name = "%s"
retention_policy {
retention_period = 10
}
}
`, bucketName)
}

func testAccStorageBucket_lockedRetentionPolicy(bucketName string) string {
return fmt.Sprintf(`
resource "google_storage_bucket" "bucket" {
name = "%s"
retention_policy {
is_locked = true
retention_period = 10
}
}
`, bucketName)
}

func testAccStorageBucket_website(bucketName string) string {
return fmt.Sprintf(`
resource "google_storage_bucket" "website" {
Expand Down
Loading

0 comments on commit e20a35d

Please sign in to comment.