diff --git a/.changelog/.txt b/.changelog/.txt new file mode 100644 index 00000000000..e69de29bb2d diff --git a/google/data_source_google_compute_address.go b/google/data_source_google_compute_address.go index b57c0fe40cc..f3b21f366d7 100644 --- a/google/data_source_google_compute_address.go +++ b/google/data_source_google_compute_address.go @@ -3,212 +3,10 @@ package google import ( - "fmt" - "regexp" - "strings" - - "github.com/hashicorp/terraform-provider-google/google/tpgresource" + "github.com/hashicorp/terraform-provider-google/google/services/compute" transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) -var ( - computeAddressIdTemplate = "projects/%s/regions/%s/addresses/%s" - computeAddressLinkRegex = regexp.MustCompile("projects/(.+)/regions/(.+)/addresses/(.+)$") -) - -func DataSourceGoogleComputeAddress() *schema.Resource { - return &schema.Resource{ - Read: dataSourceGoogleComputeAddressRead, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - - "address": { - Type: schema.TypeString, - Computed: true, - }, - - "address_type": { - Type: schema.TypeString, - Computed: true, - }, - - "network": { - Type: schema.TypeString, - Computed: true, - }, - - "network_tier": { - Type: schema.TypeString, - Computed: true, - }, - - "prefix_length": { - Type: schema.TypeInt, - Computed: true, - }, - - "purpose": { - Type: schema.TypeString, - Computed: true, - }, - - "subnetwork": { - Type: schema.TypeString, - Computed: true, - }, - - "users": { - Type: schema.TypeString, - Computed: true, - }, - - "status": { - Type: schema.TypeString, - Computed: true, - }, - - "self_link": { - Type: schema.TypeString, - Computed: true, - }, - - "region": { - Type: schema.TypeString, - Computed: true, - Optional: true, - }, - - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - }, - }, - } -} - -func dataSourceGoogleComputeAddressRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*transport_tpg.Config) - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - project, err := tpgresource.GetProject(d, config) - if err != nil { - return err - } - region, err := tpgresource.GetRegion(d, config) - if err != nil { - return err - } - name := d.Get("name").(string) - - address, err := config.NewComputeClient(userAgent).Addresses.Get(project, region, name).Do() - if err != nil { - return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Address Not Found : %s", name)) - } - - if err := d.Set("address", address.Address); err != nil { - return fmt.Errorf("Error setting address: %s", err) - } - if err := d.Set("address_type", address.AddressType); err != nil { - return fmt.Errorf("Error setting address_type: %s", err) - } - if err := d.Set("network", address.Network); err != nil { - return fmt.Errorf("Error setting network: %s", err) - } - if err := d.Set("network_tier", address.NetworkTier); err != nil { - return fmt.Errorf("Error setting network_tier: %s", err) - } - if err := d.Set("prefix_length", address.PrefixLength); err != nil { - return fmt.Errorf("Error setting prefix_length: %s", err) - } - if err := d.Set("purpose", address.Purpose); err != nil { - return fmt.Errorf("Error setting purpose: %s", err) - } - if err := d.Set("subnetwork", address.Subnetwork); err != nil { - return fmt.Errorf("Error setting subnetwork: %s", err) - } - if err := d.Set("status", address.Status); err != nil { - return fmt.Errorf("Error setting status: %s", err) - } - if err := d.Set("self_link", address.SelfLink); err != nil { - return fmt.Errorf("Error setting self_link: %s", err) - } - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("region", region); err != nil { - return fmt.Errorf("Error setting region: %s", err) - } - - d.SetId(fmt.Sprintf("projects/%s/regions/%s/addresses/%s", project, region, name)) - return nil -} - -type computeAddressId struct { - Project string - Region string - Name string -} - -func (s computeAddressId) canonicalId() string { - return fmt.Sprintf(computeAddressIdTemplate, s.Project, s.Region, s.Name) -} - -func parseComputeAddressId(id string, config *transport_tpg.Config) (*computeAddressId, error) { - var parts []string - if computeAddressLinkRegex.MatchString(id) { - parts = computeAddressLinkRegex.FindStringSubmatch(id) - - return &computeAddressId{ - Project: parts[1], - Region: parts[2], - Name: parts[3], - }, nil - } else { - parts = strings.Split(id, "/") - } - - if len(parts) == 3 { - return &computeAddressId{ - Project: parts[0], - Region: parts[1], - Name: parts[2], - }, nil - } else if len(parts) == 2 { - // Project is optional. - if config.Project == "" { - return nil, fmt.Errorf("The default project for the provider must be set when using the `{region}/{name}` id format.") - } - - return &computeAddressId{ - Project: config.Project, - Region: parts[0], - Name: parts[1], - }, nil - } else if len(parts) == 1 { - // Project and region is optional - if config.Project == "" { - return nil, fmt.Errorf("The default project for the provider must be set when using the `{name}` id format.") - } - if config.Region == "" { - return nil, fmt.Errorf("The default region for the provider must be set when using the `{name}` id format.") - } - - return &computeAddressId{ - Project: config.Project, - Region: config.Region, - Name: parts[0], - }, nil - } - - return nil, fmt.Errorf("Invalid compute address id. Expecting resource link, `{project}/{region}/{name}`, `{region}/{name}` or `{name}` format.") +func parseComputeAddressId(id string, config *transport_tpg.Config) (*compute.ComputeAddressId, error) { + return compute.ParseComputeAddressId(id, config) } diff --git a/google/data_source_google_compute_address_test.go b/google/data_source_google_compute_address_test.go index a356371ed57..d12ca4974b9 100644 --- a/google/data_source_google_compute_address_test.go +++ b/google/data_source_google_compute_address_test.go @@ -7,6 +7,7 @@ import ( "strings" "testing" + "github.com/hashicorp/terraform-provider-google/google/services/compute" "github.com/hashicorp/terraform-provider-google/google/tpgresource" transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" @@ -56,7 +57,7 @@ func TestComputeAddressIdParsing(t *testing.T) { } for tn, tc := range cases { - addressId, err := parseComputeAddressId(tc.ImportId, tc.Config) + addressId, err := compute.ParseComputeAddressId(tc.ImportId, tc.Config) if tc.ExpectedError && err == nil { t.Fatalf("bad: %s, expected an error", tn) @@ -69,8 +70,8 @@ func TestComputeAddressIdParsing(t *testing.T) { t.Fatalf("bad: %s, err: %#v", tn, err) } - if addressId.canonicalId() != tc.ExpectedCanonicalId { - t.Fatalf("bad: %s, expected canonical id to be `%s` but is `%s`", tn, tc.ExpectedCanonicalId, addressId.canonicalId()) + if addressId.CanonicalId() != tc.ExpectedCanonicalId { + t.Fatalf("bad: %s, expected canonical id to be `%s` but is `%s`", tn, tc.ExpectedCanonicalId, addressId.CanonicalId()) } } } @@ -156,7 +157,7 @@ func testAccCheckDataSourceComputeAddressDestroy(t *testing.T, name string) reso config := GoogleProviderConfig(t) - addressId, err := parseComputeAddressId(rs.Primary.ID, nil) + addressId, err := compute.ParseComputeAddressId(rs.Primary.ID, nil) if err != nil { return err } diff --git a/google/iam_bigquery_dataset.go b/google/iam_bigquery_dataset.go index 0aa12c5d135..ad496245e53 100644 --- a/google/iam_bigquery_dataset.go +++ b/google/iam_bigquery_dataset.go @@ -3,287 +3,20 @@ package google import ( - "errors" - "fmt" - "strings" - + "github.com/hashicorp/terraform-provider-google/google/services/bigquery" "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" "github.com/hashicorp/terraform-provider-google/google/tpgresource" transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" - "github.com/hashicorp/errwrap" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" ) -var IamBigqueryDatasetSchema = map[string]*schema.Schema{ - "dataset_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, -} - -var bigqueryAccessPrimitiveToRoleMap = map[string]string{ - "OWNER": "roles/bigquery.dataOwner", - "WRITER": "roles/bigquery.dataEditor", - "READER": "roles/bigquery.dataViewer", -} - -type BigqueryDatasetIamUpdater struct { - project string - datasetId string - d tpgresource.TerraformResourceData - Config *transport_tpg.Config -} +var IamBigqueryDatasetSchema = bigquery.IamBigqueryDatasetSchema func NewBigqueryDatasetIamUpdater(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { - project, err := tpgresource.GetProject(d, config) - if err != nil { - return nil, err - } - - if err := d.Set("project", project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - - return &BigqueryDatasetIamUpdater{ - project: project, - datasetId: d.Get("dataset_id").(string), - d: d, - Config: config, - }, nil + return bigquery.NewBigqueryDatasetIamUpdater(d, config) } func BigqueryDatasetIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { - fv, err := tpgresource.ParseProjectFieldValue("datasets", d.Id(), "project", d, config, false) - if err != nil { - return err - } - - if err := d.Set("project", fv.Project); err != nil { - return fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("dataset_id", fv.Name); err != nil { - return fmt.Errorf("Error setting dataset_id: %s", err) - } - - // Explicitly set the id so imported resources have the same ID format as non-imported ones. - d.SetId(fv.RelativeLink()) - return nil -} - -func (u *BigqueryDatasetIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url := fmt.Sprintf("%s%s", u.Config.BigQueryBasePath, u.GetResourceId()) - - userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ - Config: u.Config, - Method: "GET", - Project: u.project, - RawURL: url, - UserAgent: userAgent, - }) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - policy, err := accessToPolicy(res["access"]) - if err != nil { - return nil, err - } - return policy, nil -} - -func (u *BigqueryDatasetIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - url := fmt.Sprintf("%s%s", u.Config.BigQueryBasePath, u.GetResourceId()) - - access, err := policyToAccess(policy) - if err != nil { - return err - } - obj := map[string]interface{}{ - "access": access, - } - - userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ - Config: u.Config, - Method: "PATCH", - Project: u.project, - RawURL: url, - UserAgent: userAgent, - Body: obj, - }) - if err != nil { - return fmt.Errorf("Error creating DatasetAccess: %s", err) - } - - return nil -} - -func accessToPolicy(access interface{}) (*cloudresourcemanager.Policy, error) { - if access == nil { - return nil, nil - } - roleToBinding := make(map[string]*cloudresourcemanager.Binding) - - accessArr := access.([]interface{}) - for _, v := range accessArr { - memberRole := v.(map[string]interface{}) - rawRole, ok := memberRole["role"] - if !ok { - // "view" allows role to not be defined. It is a special dataset access construct, so ignore - // If a user wants to manage "view" access they should use the `bigquery_dataset_access` resource - continue - } - role := rawRole.(string) - if iamRole, ok := bigqueryAccessPrimitiveToRoleMap[role]; ok { - // API changes certain IAM roles to legacy roles. Revert these changes - role = iamRole - } - member, err := accessToIamMember(memberRole) - if err != nil { - return nil, err - } - // We have to combine bindings manually - binding, ok := roleToBinding[role] - if !ok { - binding = &cloudresourcemanager.Binding{Role: role, Members: []string{}} - } - binding.Members = append(binding.Members, member) - - roleToBinding[role] = binding - } - bindings := make([]*cloudresourcemanager.Binding, 0) - for _, v := range roleToBinding { - bindings = append(bindings, v) - } - - return &cloudresourcemanager.Policy{Bindings: bindings}, nil -} - -func policyToAccess(policy *cloudresourcemanager.Policy) ([]map[string]interface{}, error) { - res := make([]map[string]interface{}, 0) - if len(policy.AuditConfigs) != 0 { - return nil, errors.New("Access policies not allowed on BigQuery Dataset IAM policies") - } - for _, binding := range policy.Bindings { - if binding.Condition != nil { - return nil, errors.New("IAM conditions not allowed on BigQuery Dataset IAM") - } - if fullRole, ok := bigqueryAccessPrimitiveToRoleMap[binding.Role]; ok { - return nil, fmt.Errorf("BigQuery Dataset legacy role %s is not allowed when using google_bigquery_dataset_iam resources. Please use the full form: %s", binding.Role, fullRole) - } - for _, member := range binding.Members { - // Do not append any deleted members - if strings.HasPrefix(member, "deleted:") { - continue - } - access := map[string]interface{}{ - "role": binding.Role, - } - memberType, member, err := iamMemberToAccess(member) - if err != nil { - return nil, err - } - access[memberType] = member - res = append(res, access) - } - } - - return res, nil -} - -// Returns the member access type and member for an IAM member. -// Dataset access uses different member types to identify groups, domains, etc. -// these types are used as keys in the access JSON payload -func iamMemberToAccess(member string) (string, string, error) { - if strings.HasPrefix(member, "deleted:") { - return "", "", fmt.Errorf("BigQuery Dataset IAM member is deleted: %s", member) - } - - pieces := strings.SplitN(member, ":", 2) - if len(pieces) > 1 { - switch pieces[0] { - case "group": - return "groupByEmail", pieces[1], nil - case "domain": - return "domain", pieces[1], nil - case "user": - return "userByEmail", pieces[1], nil - case "serviceAccount": - return "userByEmail", pieces[1], nil - default: - return "", "", fmt.Errorf("Failed to parse BigQuery Dataset IAM member type: %s", member) - } - } - if member == "projectOwners" || member == "projectReaders" || member == "projectWriters" || member == "allAuthenticatedUsers" { - // These are special BigQuery Dataset permissions - return "specialGroup", member, nil - } - return "iamMember", member, nil -} - -func accessToIamMember(access map[string]interface{}) (string, error) { - // One of the fields must be set, we have to find which IAM member type this maps to - if member, ok := access["groupByEmail"]; ok { - return fmt.Sprintf("group:%s", member.(string)), nil - } - if member, ok := access["domain"]; ok { - return fmt.Sprintf("domain:%s", member.(string)), nil - } - if member, ok := access["specialGroup"]; ok { - return member.(string), nil - } - if member, ok := access["iamMember"]; ok { - return member.(string), nil - } - if _, ok := access["view"]; ok { - // view does not map to an IAM member, use access instead - return "", fmt.Errorf("Failed to convert BigQuery Dataset access to IAM member. To use views with a dataset, please use dataset_access") - } - if _, ok := access["dataset"]; ok { - // dataset does not map to an IAM member, use access instead - return "", fmt.Errorf("Failed to convert BigQuery Dataset access to IAM member. To use views with a dataset, please use dataset_access") - } - if _, ok := access["routine"]; ok { - // dataset does not map to an IAM member, use access instead - return "", fmt.Errorf("Failed to convert BigQuery Dataset access to IAM member. To use views with a dataset, please use dataset_access") - } - if member, ok := access["userByEmail"]; ok { - // service accounts have "gservice" in their email. This is best guess due to lost information - if strings.Contains(member.(string), "gserviceaccount") { - return fmt.Sprintf("serviceAccount:%s", member.(string)), nil - } - return fmt.Sprintf("user:%s", member.(string)), nil - } - return "", fmt.Errorf("Failed to identify IAM member from BigQuery Dataset access: %v", access) -} - -func (u *BigqueryDatasetIamUpdater) GetResourceId() string { - return fmt.Sprintf("projects/%s/datasets/%s", u.project, u.datasetId) -} - -// Matches the mutex of google_big_query_dataset_access -func (u *BigqueryDatasetIamUpdater) GetMutexKey() string { - return fmt.Sprintf("%s", u.datasetId) -} - -func (u *BigqueryDatasetIamUpdater) DescribeResource() string { - return fmt.Sprintf("Bigquery Dataset %s/%s", u.project, u.datasetId) + return bigquery.BigqueryDatasetIdParseFunc(d, config) } diff --git a/google/iam_bigtable_instance.go b/google/iam_bigtable_instance.go index 5a9c5d49725..9edd03ce6b3 100644 --- a/google/iam_bigtable_instance.go +++ b/google/iam_bigtable_instance.go @@ -3,143 +3,20 @@ package google import ( - "fmt" - + "github.com/hashicorp/terraform-provider-google/google/services/bigtable" "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" "github.com/hashicorp/terraform-provider-google/google/tpgresource" transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" - "google.golang.org/api/bigtableadmin/v2" - "github.com/hashicorp/errwrap" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" ) -var IamBigtableInstanceSchema = map[string]*schema.Schema{ - "instance": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, -} - -type BigtableInstanceIamUpdater struct { - project string - instance string - d tpgresource.TerraformResourceData - Config *transport_tpg.Config -} +var IamBigtableInstanceSchema = bigtable.IamBigtableInstanceSchema func NewBigtableInstanceUpdater(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { - project, err := tpgresource.GetProject(d, config) - if err != nil { - return nil, err - } - - if err := d.Set("project", project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - - return &BigtableInstanceIamUpdater{ - project: project, - instance: d.Get("instance").(string), - d: d, - Config: config, - }, nil + return bigtable.NewBigtableInstanceUpdater(d, config) } func BigtableInstanceIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { - fv, err := tpgresource.ParseProjectFieldValue("instances", d.Id(), "project", d, config, false) - if err != nil { - return err - } - - if err := d.Set("project", fv.Project); err != nil { - return fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("instance", fv.Name); err != nil { - return fmt.Errorf("Error setting instance: %s", err) - } - - // Explicitly set the id so imported resources have the same ID format as non-imported ones. - d.SetId(fv.RelativeLink()) - return nil -} - -func (u *BigtableInstanceIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - req := &bigtableadmin.GetIamPolicyRequest{} - - userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - p, err := u.Config.NewBigTableProjectsInstancesClient(userAgent).GetIamPolicy(u.GetResourceId(), req).Do() - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - cloudResourcePolicy, err := bigtableToResourceManagerPolicy(p) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return cloudResourcePolicy, nil -} - -func (u *BigtableInstanceIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - bigtablePolicy, err := resourceManagerToBigtablePolicy(policy) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - req := &bigtableadmin.SetIamPolicyRequest{Policy: bigtablePolicy} - - userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = u.Config.NewBigTableProjectsInstancesClient(userAgent).SetIamPolicy(u.GetResourceId(), req).Do() - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *BigtableInstanceIamUpdater) GetResourceId() string { - return fmt.Sprintf("projects/%s/instances/%s", u.project, u.instance) -} - -func (u *BigtableInstanceIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-bigtable-instance-%s-%s", u.project, u.instance) -} - -func (u *BigtableInstanceIamUpdater) DescribeResource() string { - return fmt.Sprintf("Bigtable Instance %s/%s", u.project, u.instance) -} - -func resourceManagerToBigtablePolicy(p *cloudresourcemanager.Policy) (*bigtableadmin.Policy, error) { - out := &bigtableadmin.Policy{} - err := tpgresource.Convert(p, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a bigtable policy to a cloudresourcemanager policy: {{err}}", err) - } - return out, nil -} - -func bigtableToResourceManagerPolicy(p *bigtableadmin.Policy) (*cloudresourcemanager.Policy, error) { - out := &cloudresourcemanager.Policy{} - err := tpgresource.Convert(p, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a cloudresourcemanager policy to a bigtable policy: {{err}}", err) - } - return out, nil + return bigtable.BigtableInstanceIdParseFunc(d, config) } diff --git a/google/iam_bigtable_table.go b/google/iam_bigtable_table.go index 4ce9b497d81..51cfd20a562 100644 --- a/google/iam_bigtable_table.go +++ b/google/iam_bigtable_table.go @@ -3,145 +3,20 @@ package google import ( - "fmt" - + "github.com/hashicorp/terraform-provider-google/google/services/bigtable" "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" "github.com/hashicorp/terraform-provider-google/google/tpgresource" transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" - "google.golang.org/api/bigtableadmin/v2" - "github.com/hashicorp/errwrap" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" ) -var IamBigtableTableSchema = map[string]*schema.Schema{ - "instance": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "table": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, -} - -type BigtableTableIamUpdater struct { - project string - instance string - table string - d tpgresource.TerraformResourceData - Config *transport_tpg.Config -} +var IamBigtableTableSchema = bigtable.IamBigtableTableSchema func NewBigtableTableUpdater(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { - project, err := tpgresource.GetProject(d, config) - if err != nil { - return nil, err - } - - if err := d.Set("project", project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - - return &BigtableTableIamUpdater{ - project: project, - instance: d.Get("instance").(string), - table: d.Get("table").(string), - d: d, - Config: config, - }, nil + return bigtable.NewBigtableTableUpdater(d, config) } func BigtableTableIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { - values := make(map[string]string) - - m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/instances/(?P[^/]+)/tables/(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - project, _ := tpgresource.GetProject(d, config) - - for k, v := range m { - values[k] = v - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error setting project: %s", err) - } - - if err := d.Set("instance", values["instance"]); err != nil { - return fmt.Errorf("Error setting instance: %s", err) - } - - if err := d.Set("table", values["table"]); err != nil { - return fmt.Errorf("Error setting table: %s", err) - } - - // Explicitly set the id so imported resources have the same ID format as non-imported ones. - d.SetId(fmt.Sprintf("projects/%s/instances/%s/tables/%s", project, values["instance"], values["table"])) - return nil -} - -func (u *BigtableTableIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - req := &bigtableadmin.GetIamPolicyRequest{} - - userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - p, err := u.Config.NewBigTableProjectsInstancesTablesClient(userAgent).GetIamPolicy(u.GetResourceId(), req).Do() - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - cloudResourcePolicy, err := bigtableToResourceManagerPolicy(p) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return cloudResourcePolicy, nil -} - -func (u *BigtableTableIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - bigtablePolicy, err := resourceManagerToBigtablePolicy(policy) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - req := &bigtableadmin.SetIamPolicyRequest{Policy: bigtablePolicy} - - userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = u.Config.NewBigTableProjectsInstancesTablesClient(userAgent).SetIamPolicy(u.GetResourceId(), req).Do() - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *BigtableTableIamUpdater) GetResourceId() string { - return fmt.Sprintf("projects/%s/instances/%s/tables/%s", u.project, u.instance, u.table) -} - -func (u *BigtableTableIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-bigtable-instance-%s-%s-%s", u.project, u.instance, u.table) -} - -func (u *BigtableTableIamUpdater) DescribeResource() string { - return fmt.Sprintf("Bigtable Table %s/%s-%s", u.project, u.instance, u.table) + return bigtable.BigtableTableIdParseFunc(d, config) } diff --git a/google/provider.go b/google/provider.go index 9149723f39a..20f9225faaa 100644 --- a/google/provider.go +++ b/google/provider.go @@ -694,19 +694,19 @@ func DatasourceMapWithErrors() (map[string]*schema.Resource, error) { return mergeResourceMaps(map[string]*schema.Resource{ // ####### START handwritten datasources ########### // ####### START datasources ########### - "google_access_approval_folder_service_account": DataSourceAccessApprovalFolderServiceAccount(), - "google_access_approval_organization_service_account": DataSourceAccessApprovalOrganizationServiceAccount(), - "google_access_approval_project_service_account": DataSourceAccessApprovalProjectServiceAccount(), + "google_access_approval_folder_service_account": accessapproval.DataSourceAccessApprovalFolderServiceAccount(), + "google_access_approval_organization_service_account": accessapproval.DataSourceAccessApprovalOrganizationServiceAccount(), + "google_access_approval_project_service_account": accessapproval.DataSourceAccessApprovalProjectServiceAccount(), "google_active_folder": DataSourceGoogleActiveFolder(), - "google_alloydb_locations": DataSourceAlloydbLocations(), - "google_alloydb_supported_database_flags": DataSourceAlloydbSupportedDatabaseFlags(), + "google_alloydb_locations": alloydb.DataSourceAlloydbLocations(), + "google_alloydb_supported_database_flags": alloydb.DataSourceAlloydbSupportedDatabaseFlags(), "google_artifact_registry_repository": artifactregistry.DataSourceArtifactRegistryRepository(), - "google_app_engine_default_service_account": DataSourceGoogleAppEngineDefaultServiceAccount(), + "google_app_engine_default_service_account": appengine.DataSourceGoogleAppEngineDefaultServiceAccount(), "google_beyondcorp_app_connection": beyondcorp.DataSourceGoogleBeyondcorpAppConnection(), "google_beyondcorp_app_connector": beyondcorp.DataSourceGoogleBeyondcorpAppConnector(), "google_beyondcorp_app_gateway": beyondcorp.DataSourceGoogleBeyondcorpAppGateway(), "google_billing_account": DataSourceGoogleBillingAccount(), - "google_bigquery_default_service_account": DataSourceGoogleBigqueryDefaultServiceAccount(), + "google_bigquery_default_service_account": bigquery.DataSourceGoogleBigqueryDefaultServiceAccount(), "google_cloudbuild_trigger": cloudbuild.DataSourceGoogleCloudBuildTrigger(), "google_cloudfunctions_function": DataSourceGoogleCloudFunctionsFunction(), "google_cloudfunctions2_function": cloudfunctions2.DataSourceGoogleCloudFunctions2Function(), @@ -716,29 +716,29 @@ func DatasourceMapWithErrors() (map[string]*schema.Resource, error) { "google_cloud_run_service": cloudrun.DataSourceGoogleCloudRunService(), "google_composer_environment": DataSourceGoogleComposerEnvironment(), "google_composer_image_versions": DataSourceGoogleComposerImageVersions(), - "google_compute_address": DataSourceGoogleComputeAddress(), - "google_compute_addresses": DataSourceGoogleComputeAddresses(), + "google_compute_address": compute.DataSourceGoogleComputeAddress(), + "google_compute_addresses": compute.DataSourceGoogleComputeAddresses(), "google_compute_backend_service": compute.DataSourceGoogleComputeBackendService(), "google_compute_backend_bucket": compute.DataSourceGoogleComputeBackendBucket(), - "google_compute_default_service_account": DataSourceGoogleComputeDefaultServiceAccount(), + "google_compute_default_service_account": compute.DataSourceGoogleComputeDefaultServiceAccount(), "google_compute_disk": compute.DataSourceGoogleComputeDisk(), "google_compute_forwarding_rule": compute.DataSourceGoogleComputeForwardingRule(), - "google_compute_global_address": DataSourceGoogleComputeGlobalAddress(), + "google_compute_global_address": compute.DataSourceGoogleComputeGlobalAddress(), "google_compute_global_forwarding_rule": compute.DataSourceGoogleComputeGlobalForwardingRule(), "google_compute_ha_vpn_gateway": compute.DataSourceGoogleComputeHaVpnGateway(), "google_compute_health_check": compute.DataSourceGoogleComputeHealthCheck(), - "google_compute_image": DataSourceGoogleComputeImage(), + "google_compute_image": compute.DataSourceGoogleComputeImage(), "google_compute_instance": compute.DataSourceGoogleComputeInstance(), "google_compute_instance_group": compute.DataSourceGoogleComputeInstanceGroup(), "google_compute_instance_group_manager": compute.DataSourceGoogleComputeInstanceGroupManager(), - "google_compute_instance_serial_port": DataSourceGoogleComputeInstanceSerialPort(), + "google_compute_instance_serial_port": compute.DataSourceGoogleComputeInstanceSerialPort(), "google_compute_instance_template": compute.DataSourceGoogleComputeInstanceTemplate(), - "google_compute_lb_ip_ranges": DataSourceGoogleComputeLbIpRanges(), - "google_compute_network": DataSourceGoogleComputeNetwork(), + "google_compute_lb_ip_ranges": compute.DataSourceGoogleComputeLbIpRanges(), + "google_compute_network": compute.DataSourceGoogleComputeNetwork(), "google_compute_network_endpoint_group": compute.DataSourceGoogleComputeNetworkEndpointGroup(), - "google_compute_network_peering": DataSourceComputeNetworkPeering(), - "google_compute_node_types": DataSourceGoogleComputeNodeTypes(), - "google_compute_regions": DataSourceGoogleComputeRegions(), + "google_compute_network_peering": compute.DataSourceComputeNetworkPeering(), + "google_compute_node_types": compute.DataSourceGoogleComputeNodeTypes(), + "google_compute_regions": compute.DataSourceGoogleComputeRegions(), "google_compute_region_network_endpoint_group": compute.DataSourceGoogleComputeRegionNetworkEndpointGroup(), "google_compute_region_instance_group": compute.DataSourceGoogleComputeRegionInstanceGroup(), "google_compute_region_ssl_certificate": compute.DataSourceGoogleRegionComputeSslCertificate(), @@ -749,9 +749,9 @@ func DatasourceMapWithErrors() (map[string]*schema.Resource, error) { "google_compute_snapshot": compute.DataSourceGoogleComputeSnapshot(), "google_compute_ssl_certificate": compute.DataSourceGoogleComputeSslCertificate(), "google_compute_ssl_policy": compute.DataSourceGoogleComputeSslPolicy(), - "google_compute_subnetwork": DataSourceGoogleComputeSubnetwork(), - "google_compute_vpn_gateway": DataSourceGoogleComputeVpnGateway(), - "google_compute_zones": DataSourceGoogleComputeZones(), + "google_compute_subnetwork": compute.DataSourceGoogleComputeSubnetwork(), + "google_compute_vpn_gateway": compute.DataSourceGoogleComputeVpnGateway(), + "google_compute_zones": compute.DataSourceGoogleComputeZones(), "google_container_azure_versions": DataSourceGoogleContainerAzureVersions(), "google_container_aws_versions": DataSourceGoogleContainerAwsVersions(), "google_container_attached_versions": DataSourceGoogleContainerAttachedVersions(), @@ -890,9 +890,9 @@ func DatasourceMapWithErrors() (map[string]*schema.Resource, error) { }, map[string]*schema.Resource{ // ####### START non-generated IAM datasources ########### - "google_bigtable_instance_iam_policy": tpgiamresource.DataSourceIamPolicy(IamBigtableInstanceSchema, NewBigtableInstanceUpdater), - "google_bigtable_table_iam_policy": tpgiamresource.DataSourceIamPolicy(IamBigtableTableSchema, NewBigtableTableUpdater), - "google_bigquery_dataset_iam_policy": tpgiamresource.DataSourceIamPolicy(IamBigqueryDatasetSchema, NewBigqueryDatasetIamUpdater), + "google_bigtable_instance_iam_policy": tpgiamresource.DataSourceIamPolicy(bigtable.IamBigtableInstanceSchema, bigtable.NewBigtableInstanceUpdater), + "google_bigtable_table_iam_policy": tpgiamresource.DataSourceIamPolicy(bigtable.IamBigtableTableSchema, bigtable.NewBigtableTableUpdater), + "google_bigquery_dataset_iam_policy": tpgiamresource.DataSourceIamPolicy(bigquery.IamBigqueryDatasetSchema, bigquery.NewBigqueryDatasetIamUpdater), "google_billing_account_iam_policy": tpgiamresource.DataSourceIamPolicy(IamBillingAccountSchema, NewBillingAccountIamUpdater), "google_dataproc_cluster_iam_policy": tpgiamresource.DataSourceIamPolicy(IamDataprocClusterSchema, NewDataprocClusterUpdater), "google_dataproc_job_iam_policy": tpgiamresource.DataSourceIamPolicy(IamDataprocJobSchema, NewDataprocJobUpdater), @@ -1394,35 +1394,35 @@ func ResourceMapWithErrors() (map[string]*schema.Resource, error) { }, map[string]*schema.Resource{ // ####### START handwritten resources ########### - "google_app_engine_application": ResourceAppEngineApplication(), - "google_apigee_sharedflow": ResourceApigeeSharedFlow(), - "google_apigee_sharedflow_deployment": ResourceApigeeSharedFlowDeployment(), - "google_apigee_flowhook": ResourceApigeeFlowhook(), - "google_apigee_keystores_aliases_pkcs12": ResourceApigeeKeystoresAliasesPkcs12(), - "google_apigee_keystores_aliases_key_cert_file": ResourceApigeeKeystoresAliasesKeyCertFile(), - "google_bigquery_table": ResourceBigQueryTable(), - "google_bigtable_gc_policy": ResourceBigtableGCPolicy(), - "google_bigtable_instance": ResourceBigtableInstance(), - "google_bigtable_table": ResourceBigtableTable(), + "google_app_engine_application": appengine.ResourceAppEngineApplication(), + "google_apigee_sharedflow": apigee.ResourceApigeeSharedFlow(), + "google_apigee_sharedflow_deployment": apigee.ResourceApigeeSharedFlowDeployment(), + "google_apigee_flowhook": apigee.ResourceApigeeFlowhook(), + "google_apigee_keystores_aliases_pkcs12": apigee.ResourceApigeeKeystoresAliasesPkcs12(), + "google_apigee_keystores_aliases_key_cert_file": apigee.ResourceApigeeKeystoresAliasesKeyCertFile(), + "google_bigquery_table": bigquery.ResourceBigQueryTable(), + "google_bigtable_gc_policy": bigtable.ResourceBigtableGCPolicy(), + "google_bigtable_instance": bigtable.ResourceBigtableInstance(), + "google_bigtable_table": bigtable.ResourceBigtableTable(), "google_billing_subaccount": ResourceBillingSubaccount(), "google_cloudfunctions_function": ResourceCloudFunctionsFunction(), "google_composer_environment": ResourceComposerEnvironment(), - "google_compute_attached_disk": ResourceComputeAttachedDisk(), + "google_compute_attached_disk": compute.ResourceComputeAttachedDisk(), "google_compute_instance": compute.ResourceComputeInstance(), "google_compute_instance_from_template": compute.ResourceComputeInstanceFromTemplate(), "google_compute_instance_group": compute.ResourceComputeInstanceGroup(), "google_compute_instance_group_manager": compute.ResourceComputeInstanceGroupManager(), "google_compute_instance_template": compute.ResourceComputeInstanceTemplate(), - "google_compute_network_peering": ResourceComputeNetworkPeering(), - "google_compute_project_default_network_tier": ResourceComputeProjectDefaultNetworkTier(), + "google_compute_network_peering": compute.ResourceComputeNetworkPeering(), + "google_compute_project_default_network_tier": compute.ResourceComputeProjectDefaultNetworkTier(), "google_compute_project_metadata": compute.ResourceComputeProjectMetadata(), "google_compute_project_metadata_item": compute.ResourceComputeProjectMetadataItem(), "google_compute_region_instance_group_manager": compute.ResourceComputeRegionInstanceGroupManager(), "google_compute_router_interface": compute.ResourceComputeRouterInterface(), - "google_compute_security_policy": ResourceComputeSecurityPolicy(), - "google_compute_shared_vpc_host_project": ResourceComputeSharedVpcHostProject(), - "google_compute_shared_vpc_service_project": ResourceComputeSharedVpcServiceProject(), - "google_compute_target_pool": ResourceComputeTargetPool(), + "google_compute_security_policy": compute.ResourceComputeSecurityPolicy(), + "google_compute_shared_vpc_host_project": compute.ResourceComputeSharedVpcHostProject(), + "google_compute_shared_vpc_service_project": compute.ResourceComputeSharedVpcServiceProject(), + "google_compute_target_pool": compute.ResourceComputeTargetPool(), "google_container_cluster": ResourceContainerCluster(), "google_container_node_pool": ResourceContainerNodePool(), "google_container_registry": ResourceContainerRegistry(), @@ -1475,15 +1475,15 @@ func ResourceMapWithErrors() (map[string]*schema.Resource, error) { }, map[string]*schema.Resource{ // ####### START non-generated IAM resources ########### - "google_bigtable_instance_iam_binding": tpgiamresource.ResourceIamBinding(IamBigtableInstanceSchema, NewBigtableInstanceUpdater, BigtableInstanceIdParseFunc), - "google_bigtable_instance_iam_member": tpgiamresource.ResourceIamMember(IamBigtableInstanceSchema, NewBigtableInstanceUpdater, BigtableInstanceIdParseFunc), - "google_bigtable_instance_iam_policy": tpgiamresource.ResourceIamPolicy(IamBigtableInstanceSchema, NewBigtableInstanceUpdater, BigtableInstanceIdParseFunc), - "google_bigtable_table_iam_binding": tpgiamresource.ResourceIamBinding(IamBigtableTableSchema, NewBigtableTableUpdater, BigtableTableIdParseFunc), - "google_bigtable_table_iam_member": tpgiamresource.ResourceIamMember(IamBigtableTableSchema, NewBigtableTableUpdater, BigtableTableIdParseFunc), - "google_bigtable_table_iam_policy": tpgiamresource.ResourceIamPolicy(IamBigtableTableSchema, NewBigtableTableUpdater, BigtableTableIdParseFunc), - "google_bigquery_dataset_iam_binding": tpgiamresource.ResourceIamBinding(IamBigqueryDatasetSchema, NewBigqueryDatasetIamUpdater, BigqueryDatasetIdParseFunc), - "google_bigquery_dataset_iam_member": tpgiamresource.ResourceIamMember(IamBigqueryDatasetSchema, NewBigqueryDatasetIamUpdater, BigqueryDatasetIdParseFunc), - "google_bigquery_dataset_iam_policy": tpgiamresource.ResourceIamPolicy(IamBigqueryDatasetSchema, NewBigqueryDatasetIamUpdater, BigqueryDatasetIdParseFunc), + "google_bigtable_instance_iam_binding": tpgiamresource.ResourceIamBinding(bigtable.IamBigtableInstanceSchema, bigtable.NewBigtableInstanceUpdater, bigtable.BigtableInstanceIdParseFunc), + "google_bigtable_instance_iam_member": tpgiamresource.ResourceIamMember(bigtable.IamBigtableInstanceSchema, bigtable.NewBigtableInstanceUpdater, bigtable.BigtableInstanceIdParseFunc), + "google_bigtable_instance_iam_policy": tpgiamresource.ResourceIamPolicy(bigtable.IamBigtableInstanceSchema, bigtable.NewBigtableInstanceUpdater, bigtable.BigtableInstanceIdParseFunc), + "google_bigtable_table_iam_binding": tpgiamresource.ResourceIamBinding(bigtable.IamBigtableTableSchema, bigtable.NewBigtableTableUpdater, bigtable.BigtableTableIdParseFunc), + "google_bigtable_table_iam_member": tpgiamresource.ResourceIamMember(bigtable.IamBigtableTableSchema, bigtable.NewBigtableTableUpdater, bigtable.BigtableTableIdParseFunc), + "google_bigtable_table_iam_policy": tpgiamresource.ResourceIamPolicy(bigtable.IamBigtableTableSchema, bigtable.NewBigtableTableUpdater, bigtable.BigtableTableIdParseFunc), + "google_bigquery_dataset_iam_binding": tpgiamresource.ResourceIamBinding(bigquery.IamBigqueryDatasetSchema, bigquery.NewBigqueryDatasetIamUpdater, bigquery.BigqueryDatasetIdParseFunc), + "google_bigquery_dataset_iam_member": tpgiamresource.ResourceIamMember(bigquery.IamBigqueryDatasetSchema, bigquery.NewBigqueryDatasetIamUpdater, bigquery.BigqueryDatasetIdParseFunc), + "google_bigquery_dataset_iam_policy": tpgiamresource.ResourceIamPolicy(bigquery.IamBigqueryDatasetSchema, bigquery.NewBigqueryDatasetIamUpdater, bigquery.BigqueryDatasetIdParseFunc), "google_billing_account_iam_binding": tpgiamresource.ResourceIamBinding(IamBillingAccountSchema, NewBillingAccountIamUpdater, BillingAccountIdParseFunc), "google_billing_account_iam_member": tpgiamresource.ResourceIamMember(IamBillingAccountSchema, NewBillingAccountIamUpdater, BillingAccountIdParseFunc), "google_billing_account_iam_policy": tpgiamresource.ResourceIamPolicy(IamBillingAccountSchema, NewBillingAccountIamUpdater, BillingAccountIdParseFunc), diff --git a/google/resource_bigquery_table_test.go b/google/resource_bigquery_table_test.go index 52e04c33d48..7055efdf546 100644 --- a/google/resource_bigquery_table_test.go +++ b/google/resource_bigquery_table_test.go @@ -3,7 +3,6 @@ package google import ( - "encoding/json" "fmt" "regexp" "strings" @@ -12,389 +11,8 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/hashicorp/terraform-provider-google/google/acctest" - "github.com/hashicorp/terraform-provider-google/google/tpgresource" ) -func TestBigQueryTableSchemaDiffSuppress(t *testing.T) { - t.Parallel() - - cases := map[string]struct { - Old, New string - ExpectDiffSuppress bool - }{ - "empty schema": { - Old: "null", - New: "[]", - ExpectDiffSuppress: true, - }, - "empty schema -> non-empty": { - Old: "null", - New: `[ - { - "name": "PageNo", - "type": "INTEGER" - } - ]`, - ExpectDiffSuppress: false, - }, - "no change": { - Old: "[{\"name\": \"someValue\", \"type\": \"INT64\", \"anotherKey\" : \"anotherValue\", \"finalKey\" : {} }]", - New: "[{\"name\": \"someValue\", \"type\": \"INT64\", \"anotherKey\" : \"anotherValue\", \"finalKey\" : {} }]", - ExpectDiffSuppress: true, - }, - "remove key": { - Old: "[{\"name\": \"someValue\", \"type\": \"INT64\", \"anotherKey\" : \"anotherValue\", \"finalKey\" : {} }]", - New: "[{\"name\": \"someValue\", \"type\": \"INT64\", \"finalKey\" : {} }]", - ExpectDiffSuppress: false, - }, - "empty description -> default description (empty)": { - Old: "[{\"name\": \"someValue\", \"type\": \"INT64\", \"anotherKey\" : \"anotherValue\", \"description\": \"\" }]", - New: "[{\"name\": \"someValue\", \"type\": \"INT64\", \"anotherKey\" : \"anotherValue\" }]", - ExpectDiffSuppress: true, - }, - "empty description -> other description": { - Old: "[{\"name\": \"someValue\", \"type\": \"INT64\", \"anotherKey\" : \"anotherValue\", \"description\": \"\" }]", - New: "[{\"name\": \"someValue\", \"type\": \"INT64\", \"anotherKey\" : \"anotherValue\", \"description\": \"somethingRandom\" }]", - ExpectDiffSuppress: false, - }, - "mode NULLABLE -> other mode": { - Old: "[{\"name\": \"someValue\", \"type\": \"INT64\", \"anotherKey\" : \"anotherValue\", \"mode\": \"NULLABLE\" }]", - New: "[{\"name\": \"someValue\", \"type\": \"INT64\", \"anotherKey\" : \"anotherValue\", \"mode\": \"somethingRandom\" }]", - ExpectDiffSuppress: false, - }, - "mode NULLABLE -> default mode (also NULLABLE)": { - Old: `[ - { - "mode": "NULLABLE", - "name": "PageNo", - "type": "INTEGER" - } - ]`, - New: `[ - { - "name": "PageNo", - "type": "INTEGER" - } - ]`, - ExpectDiffSuppress: true, - }, - "mode & type uppercase -> lowercase": { - Old: `[ - { - "mode": "NULLABLE", - "name": "PageNo", - "type": "INTEGER" - } - ]`, - New: `[ - { - "mode": "nullable", - "name": "PageNo", - "type": "integer" - } - ]`, - ExpectDiffSuppress: true, - }, - "type INTEGER -> INT64": { - Old: "[{\"name\": \"someValue\", \"anotherKey\" : \"anotherValue\", \"type\": \"INTEGER\" }]", - New: "[{\"name\": \"someValue\", \"anotherKey\" : \"anotherValue\", \"type\": \"INT64\" }]", - ExpectDiffSuppress: true, - }, - "type INTEGER -> other": { - Old: "[{\"name\": \"someValue\", \"anotherKey\" : \"anotherValue\", \"type\": \"INTEGER\" }]", - New: "[{\"name\": \"someValue\", \"anotherKey\" : \"anotherValue\", \"type\": \"somethingRandom\" }]", - ExpectDiffSuppress: false, - }, - "type FLOAT -> FLOAT64": { - Old: "[{\"name\": \"someValue\", \"anotherKey\" : \"anotherValue\", \"type\": \"FLOAT\" }]", - New: "[{\"name\": \"someValue\", \"anotherKey\" : \"anotherValue\", \"type\": \"FLOAT64\" }]", - ExpectDiffSuppress: true, - }, - "type FLOAT -> other": { - Old: "[{\"name\": \"someValue\", \"anotherKey\" : \"anotherValue\", \"type\": \"FLOAT\" }]", - New: "[{\"name\": \"someValue\", \"anotherKey\" : \"anotherValue\", \"type\": \"somethingRandom\" }]", - ExpectDiffSuppress: false, - }, - "type BOOLEAN -> BOOL": { - Old: "[{\"name\": \"someValue\", \"anotherKey\" : \"anotherValue\", \"type\": \"BOOLEAN\" }]", - New: "[{\"name\": \"someValue\", \"anotherKey\" : \"anotherValue\", \"type\": \"BOOL\" }]", - ExpectDiffSuppress: true, - }, - "type BOOLEAN -> other": { - Old: "[{\"name\": \"someValue\", \"anotherKey\" : \"anotherValue\", \"type\": \"BOOLEAN\" }]", - New: "[{\"name\": \"someValue\", \"anotherKey\" : \"anotherValue\", \"type\": \"somethingRandom\" }]", - ExpectDiffSuppress: false, - }, - // this is invalid but we need to make sure we don't cause a panic - // if users provide an invalid schema - "invalid - missing type for old": { - Old: "[{\"name\": \"someValue\", \"anotherKey\" : \"anotherValue\" }]", - New: "[{\"name\": \"someValue\", \"anotherKey\" : \"anotherValue\", \"type\": \"BOOLEAN\" }]", - ExpectDiffSuppress: false, - }, - // this is invalid but we need to make sure we don't cause a panic - // if users provide an invalid schema - "invalid - missing type for new": { - Old: "[{\"name\": \"someValue\", \"anotherKey\" : \"anotherValue\", \"type\": \"BOOLEAN\" }]", - New: "[{\"name\": \"someValue\", \"anotherKey\" : \"anotherValue\" }]", - ExpectDiffSuppress: false, - }, - "reordering fields": { - Old: `[ - { - "name": "PageNo", - "type": "INTEGER" - }, - { - "name": "IngestTime", - "type": "TIMESTAMP" - } - ]`, - New: `[ - { - "name": "IngestTime", - "type": "TIMESTAMP" - }, - { - "name": "PageNo", - "type": "INTEGER" - } - ]`, - ExpectDiffSuppress: true, - }, - "reordering fields with value change": { - Old: `[ - { - "name": "PageNo", - "type": "INTEGER", - "description": "someVal" - }, - { - "name": "IngestTime", - "type": "TIMESTAMP" - } - ]`, - New: `[ - { - "name": "IngestTime", - "type": "TIMESTAMP" - }, - { - "name": "PageNo", - "type": "INTEGER", - "description": "otherVal" - } - ]`, - ExpectDiffSuppress: false, - }, - "nested field ordering changes": { - Old: `[ - { - "name": "someValue", - "type": "INTEGER", - "fields": [ - { - "name": "value1", - "type": "INTEGER", - "mode": "NULLABLE", - "description": "someVal" - }, - { - "name": "value2", - "type": "BOOLEAN", - "mode": "NULLABLE", - "description": "someVal" - } - ] - } - ]`, - New: `[ - { - "name": "someValue", - "type": "INTEGER", - "fields": [ - { - "name": "value2", - "type": "BOOLEAN", - "mode": "NULLABLE", - "description": "someVal" - }, - { - "name": "value1", - "type": "INTEGER", - "mode": "NULLABLE", - "description": "someVal" - } - ] - } - ]`, - ExpectDiffSuppress: true, - }, - "policyTags": { - Old: `[ - { - "mode": "NULLABLE", - "name": "providerphone", - "policyTags": { - "names": [ - "projects/my-project/locations/us/taxonomies/12345678/policyTags/12345678" - ] - }, - "type":"STRING" - } - ]`, - New: `[ - { - "name": "providerphone", - "type": "STRING", - "policyTags": { - "names": ["projects/my-project/locations/us/taxonomies/12345678/policyTags/12345678"] - } - } - ]`, - ExpectDiffSuppress: true, - }, - "multiple levels of reordering with policyTags set": { - Old: `[ - { - "mode": "NULLABLE", - "name": "providerphone", - "type":"STRING", - "policyTags": { - "names": [ - "projects/my-project/locations/us/taxonomies/12345678/policyTags/12345678" - ] - }, - "fields": [ - { - "name": "value1", - "type": "INTEGER", - "mode": "NULLABLE", - "description": "someVal", - "policyTags": { - "names": [ - "projects/my-project/locations/us/taxonomies/12345678/policyTags/12345678" - ] - } - }, - { - "name": "value2", - "type": "BOOLEAN", - "mode": "NULLABLE", - "description": "someVal" - } - ] - }, - { - "name": "PageNo", - "type": "INTEGER" - }, - { - "name": "IngestTime", - "type": "TIMESTAMP", - "fields": [ - { - "name": "value3", - "type": "INTEGER", - "mode": "NULLABLE", - "description": "someVal", - "policyTags": { - "names": [ - "projects/my-project/locations/us/taxonomies/12345678/policyTags/12345678" - ] - } - }, - { - "name": "value4", - "type": "BOOLEAN", - "mode": "NULLABLE", - "description": "someVal" - } - ] - } - ]`, - New: `[ - { - "name": "IngestTime", - "type": "TIMESTAMP", - "fields": [ - { - "name": "value4", - "type": "BOOLEAN", - "mode": "NULLABLE", - "description": "someVal" - }, - { - "name": "value3", - "type": "INTEGER", - "mode": "NULLABLE", - "description": "someVal", - "policyTags": { - "names": [ - "projects/my-project/locations/us/taxonomies/12345678/policyTags/12345678" - ] - } - } - ] - }, - { - "mode": "NULLABLE", - "name": "providerphone", - "type":"STRING", - "policyTags": { - "names": [ - "projects/my-project/locations/us/taxonomies/12345678/policyTags/12345678" - ] - }, - "fields": [ - { - "name": "value1", - "type": "INTEGER", - "mode": "NULLABLE", - "description": "someVal", - "policyTags": { - "names": [ - "projects/my-project/locations/us/taxonomies/12345678/policyTags/12345678" - ] - } - }, - { - "name": "value2", - "type": "BOOLEAN", - "mode": "NULLABLE", - "description": "someVal" - } - ] - }, - { - "name": "PageNo", - "type": "INTEGER" - } - ]`, - ExpectDiffSuppress: true, - }, - } - - for tn, tc := range cases { - tn := tn - tc := tc - t.Run(tn, func(t *testing.T) { - t.Parallel() - - var a, b interface{} - if err := json.Unmarshal([]byte(tc.Old), &a); err != nil { - t.Fatalf(fmt.Sprintf("unable to unmarshal old json - %v", err)) - } - if err := json.Unmarshal([]byte(tc.New), &b); err != nil { - t.Fatalf(fmt.Sprintf("unable to unmarshal new json - %v", err)) - } - if bigQueryTableSchemaDiffSuppress("schema", tc.Old, tc.New, nil) != tc.ExpectDiffSuppress { - t.Fatalf("bad: %s, %q => %q expect DiffSuppress to return %t", tn, tc.Old, tc.New, tc.ExpectDiffSuppress) - } - }) - } -} - func TestAccBigQueryTable_Basic(t *testing.T) { t.Parallel() @@ -1220,180 +838,6 @@ func TestAccBigQueryTable_emptySchema(t *testing.T) { }) } -type testUnitBigQueryDataTableJSONChangeableTestCase struct { - name string - jsonOld string - jsonNew string - changeable bool -} - -func (testcase *testUnitBigQueryDataTableJSONChangeableTestCase) check(t *testing.T) { - var old, new interface{} - if err := json.Unmarshal([]byte(testcase.jsonOld), &old); err != nil { - t.Fatalf("unable to unmarshal json - %v", err) - } - if err := json.Unmarshal([]byte(testcase.jsonNew), &new); err != nil { - t.Fatalf("unable to unmarshal json - %v", err) - } - changeable, err := resourceBigQueryTableSchemaIsChangeable(old, new) - if err != nil { - t.Errorf("%s failed unexpectedly: %s", testcase.name, err) - } - if changeable != testcase.changeable { - t.Errorf("expected changeable result of %v but got %v for testcase %s", testcase.changeable, changeable, testcase.name) - } - - d := &tpgresource.ResourceDiffMock{ - Before: map[string]interface{}{}, - After: map[string]interface{}{}, - } - - d.Before["schema"] = testcase.jsonOld - d.After["schema"] = testcase.jsonNew - - err = resourceBigQueryTableSchemaCustomizeDiffFunc(d) - if err != nil { - t.Errorf("error on testcase %s - %v", testcase.name, err) - } - if !testcase.changeable != d.IsForceNew { - t.Errorf("%s: expected d.IsForceNew to be %v, but was %v", testcase.name, !testcase.changeable, d.IsForceNew) - } -} - -var testUnitBigQueryDataTableIsChangableTestCases = []testUnitBigQueryDataTableJSONChangeableTestCase{ - { - name: "defaultEquality", - jsonOld: "[{\"name\": \"someValue\", \"type\" : \"INTEGER\", \"mode\" : \"NULLABLE\", \"description\" : \"someVal\" }]", - jsonNew: "[{\"name\": \"someValue\", \"type\" : \"INTEGER\", \"mode\" : \"NULLABLE\", \"description\" : \"someVal\" }]", - changeable: true, - }, - { - name: "arraySizeIncreases", - jsonOld: "[{\"name\": \"someValue\", \"type\" : \"INTEGER\", \"mode\" : \"NULLABLE\", \"description\" : \"someVal\" }]", - jsonNew: "[{\"name\": \"someValue\", \"type\" : \"INTEGER\", \"mode\" : \"NULLABLE\", \"description\" : \"someVal\" }, {\"name\": \"asomeValue\", \"type\" : \"INTEGER\", \"mode\" : \"NULLABLE\", \"description\" : \"someVal\" }]", - changeable: true, - }, - { - name: "arraySizeDecreases", - jsonOld: "[{\"name\": \"someValue\", \"type\" : \"INTEGER\", \"mode\" : \"NULLABLE\", \"description\" : \"someVal\" }, {\"name\": \"asomeValue\", \"type\" : \"INTEGER\", \"mode\" : \"NULLABLE\", \"description\" : \"someVal\" }]", - jsonNew: "[{\"name\": \"someValue\", \"type\" : \"INTEGER\", \"mode\" : \"NULLABLE\", \"description\" : \"someVal\" }]", - changeable: false, - }, - { - name: "descriptionChanges", - jsonOld: "[{\"name\": \"someValue\", \"type\" : \"INTEGER\", \"mode\" : \"NULLABLE\", \"description\" : \"someVal\" }]", - jsonNew: "[{\"name\": \"someValue\", \"type\" : \"INTEGER\", \"mode\" : \"NULLABLE\", \"description\" : \"some new value\" }]", - changeable: true, - }, - { - name: "typeInteger", - jsonOld: "[{\"name\": \"someValue\", \"type\" : \"INTEGER\", \"mode\" : \"NULLABLE\", \"description\" : \"someVal\" }]", - jsonNew: "[{\"name\": \"someValue\", \"type\" : \"INT64\", \"mode\" : \"NULLABLE\", \"description\" : \"some new value\" }]", - changeable: true, - }, - { - name: "typeFloat", - jsonOld: "[{\"name\": \"someValue\", \"type\" : \"FLOAT\", \"mode\" : \"NULLABLE\", \"description\" : \"someVal\" }]", - jsonNew: "[{\"name\": \"someValue\", \"type\" : \"FLOAT64\", \"mode\" : \"NULLABLE\", \"description\" : \"some new value\" }]", - changeable: true, - }, - { - name: "typeBool", - jsonOld: "[{\"name\": \"someValue\", \"type\" : \"BOOLEAN\", \"mode\" : \"NULLABLE\", \"description\" : \"someVal\" }]", - jsonNew: "[{\"name\": \"someValue\", \"type\" : \"BOOL\", \"mode\" : \"NULLABLE\", \"description\" : \"some new value\" }]", - changeable: true, - }, - { - name: "typeChangeIncompatible", - jsonOld: "[{\"name\": \"someValue\", \"type\" : \"BOOLEAN\", \"mode\" : \"NULLABLE\", \"description\" : \"someVal\" }]", - jsonNew: "[{\"name\": \"someValue\", \"type\" : \"DATETIME\", \"mode\" : \"NULLABLE\", \"description\" : \"some new value\" }]", - changeable: false, - }, - // this is invalid but we need to make sure we don't cause a panic - // if users provide an invalid schema - { - name: "typeChangeIgnoreNewMissingType", - jsonOld: "[{\"name\": \"someValue\", \"anotherKey\" : \"anotherValue\" }]", - jsonNew: "[{\"name\": \"someValue\", \"anotherKey\" : \"anotherValue\", \"type\": \"BOOLEAN\" }]", - changeable: true, - }, - // this is invalid but we need to make sure we don't cause a panic - // if users provide an invalid schema - { - name: "typeChangeIgnoreOldMissingType", - jsonOld: "[{\"name\": \"someValue\", \"anotherKey\" : \"anotherValue\" }]", - jsonNew: "[{\"name\": \"someValue\", \"anotherKey\" : \"anotherValue\", \"type\": \"BOOLEAN\" }]", - changeable: true, - }, - { - name: "typeModeReqToNull", - jsonOld: "[{\"name\": \"someValue\", \"type\" : \"BOOLEAN\", \"mode\" : \"REQUIRED\", \"description\" : \"someVal\" }]", - jsonNew: "[{\"name\": \"someValue\", \"type\" : \"BOOLEAN\", \"mode\" : \"NULLABLE\", \"description\" : \"some new value\" }]", - changeable: true, - }, - { - name: "typeModeIncompatible", - jsonOld: "[{\"name\": \"someValue\", \"type\" : \"BOOLEAN\", \"mode\" : \"REQUIRED\", \"description\" : \"someVal\" }]", - jsonNew: "[{\"name\": \"someValue\", \"type\" : \"BOOLEAN\", \"mode\" : \"REPEATED\", \"description\" : \"some new value\" }]", - changeable: false, - }, - { - name: "modeToDefaultNullable", - jsonOld: "[{\"name\": \"someValue\", \"type\" : \"BOOLEAN\", \"mode\" : \"REQUIRED\", \"description\" : \"someVal\" }]", - jsonNew: "[{\"name\": \"someValue\", \"type\" : \"BOOLEAN\", \"description\" : \"some new value\" }]", - changeable: true, - }, - { - name: "orderOfArrayChangesAndDescriptionChanges", - jsonOld: "[{\"name\": \"value1\", \"type\" : \"INTEGER\", \"mode\" : \"NULLABLE\", \"description\" : \"someVal\" }, {\"name\": \"value2\", \"type\" : \"BOOLEAN\", \"mode\" : \"NULLABLE\", \"description\" : \"someVal\" }]", - jsonNew: "[{\"name\": \"value2\", \"type\" : \"BOOLEAN\", \"mode\" : \"NULLABLE\", \"description\" : \"newVal\" }, {\"name\": \"value1\", \"type\" : \"INTEGER\", \"mode\" : \"NULLABLE\", \"description\" : \"someVal\" }]", - changeable: true, - }, - { - name: "orderOfArrayChangesAndNameChanges", - jsonOld: "[{\"name\": \"value1\", \"type\" : \"INTEGER\", \"mode\" : \"NULLABLE\", \"description\" : \"someVal\" }, {\"name\": \"value2\", \"type\" : \"BOOLEAN\", \"mode\" : \"NULLABLE\", \"description\" : \"someVal\" }]", - jsonNew: "[{\"name\": \"value3\", \"type\" : \"BOOLEAN\", \"mode\" : \"NULLABLE\", \"description\" : \"newVal\" }, {\"name\": \"value1\", \"type\" : \"INTEGER\", \"mode\" : \"NULLABLE\", \"description\" : \"someVal\" }]", - changeable: false, - }, - { - name: "policyTags", - jsonOld: `[ - { - "mode": "NULLABLE", - "name": "providerphone", - "policyTags": { - "names": ["projects/my-project/locations/us/taxonomies/12345678/policyTags/12345678"] - }, - "type":"STRING" - } - ]`, - jsonNew: `[ - { - "name": "providerphone", - "type": "STRING", - "policyTags": { - "names": ["projects/my-project/locations/us/taxonomies/12345678/policyTags/12345678"] - } - } - ]`, - changeable: true, - }, -} - -func TestUnitBigQueryDataTable_schemaIsChangable(t *testing.T) { - t.Parallel() - for _, testcase := range testUnitBigQueryDataTableIsChangableTestCases { - testcase.check(t) - testcaseNested := &testUnitBigQueryDataTableJSONChangeableTestCase{ - testcase.name + "Nested", - fmt.Sprintf("[{\"name\": \"someValue\", \"type\" : \"INTEGER\", \"fields\" : %s }]", testcase.jsonOld), - fmt.Sprintf("[{\"name\": \"someValue\", \"type\" : \"INT64\", \"fields\" : %s }]", testcase.jsonNew), - testcase.changeable, - } - testcaseNested.check(t) - } -} - func testAccCheckBigQueryExtData(t *testing.T, expectedQuoteChar string) resource.TestCheckFunc { return func(s *terraform.State) error { for _, rs := range s.RootModule().Resources { diff --git a/google/resource_bigtable_gc_policy.go b/google/resource_bigtable_gc_policy.go index 433b9d73c9d..ca9722ece38 100644 --- a/google/resource_bigtable_gc_policy.go +++ b/google/resource_bigtable_gc_policy.go @@ -3,595 +3,11 @@ package google import ( - "context" - "encoding/json" - "fmt" - "log" - "strings" - "time" - "cloud.google.com/go/bigtable" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - - "github.com/hashicorp/terraform-provider-google/google/tpgresource" - transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" - "github.com/hashicorp/terraform-provider-google/google/verify" -) - -const ( - GCPolicyModeIntersection = "INTERSECTION" - GCPolicyModeUnion = "UNION" + tpgbigtable "github.com/hashicorp/terraform-provider-google/google/services/bigtable" ) -func resourceBigtableGCPolicyCustomizeDiffFunc(diff tpgresource.TerraformResourceDiff) error { - count := diff.Get("max_age.#").(int) - if count < 1 { - return nil - } - - oldDays, newDays := diff.GetChange("max_age.0.days") - oldDuration, newDuration := diff.GetChange("max_age.0.duration") - log.Printf("days: %v %v", oldDays, newDays) - log.Printf("duration: %v %v", oldDuration, newDuration) - - if oldDuration == "" && newDuration != "" { - // flatten the old days and the new duration to duration... if they are - // equal then do nothing. - do, err := time.ParseDuration(newDuration.(string)) - if err != nil { - return err - } - dn := time.Hour * 24 * time.Duration(oldDays.(int)) - if do == dn { - err := diff.Clear("max_age.0.days") - if err != nil { - return err - } - err = diff.Clear("max_age.0.duration") - if err != nil { - return err - } - } - } - - return nil -} - -func resourceBigtableGCPolicyCustomizeDiff(_ context.Context, d *schema.ResourceDiff, meta interface{}) error { - return resourceBigtableGCPolicyCustomizeDiffFunc(d) -} - -func ResourceBigtableGCPolicy() *schema.Resource { - return &schema.Resource{ - Create: resourceBigtableGCPolicyUpsert, - Read: resourceBigtableGCPolicyRead, - Delete: resourceBigtableGCPolicyDestroy, - Update: resourceBigtableGCPolicyUpsert, - CustomizeDiff: resourceBigtableGCPolicyCustomizeDiff, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "instance_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: tpgresource.CompareResourceNames, - Description: `The name of the Bigtable instance.`, - }, - - "table": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the table.`, - }, - - "column_family": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the column family.`, - }, - - "gc_rules": { - Type: schema.TypeString, - Optional: true, - Description: `Serialized JSON string for garbage collection policy. Conflicts with "mode", "max_age" and "max_version".`, - ValidateFunc: validation.StringIsJSON, - ConflictsWith: []string{"mode", "max_age", "max_version"}, - StateFunc: func(v interface{}) string { - json, _ := structure.NormalizeJsonString(v) - return json - }, - }, - "mode": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `NOTE: 'gc_rules' is more flexible, and should be preferred over this field for new resources. This field may be deprecated in the future. If multiple policies are set, you should choose between UNION OR INTERSECTION.`, - ValidateFunc: validation.StringInSlice([]string{GCPolicyModeIntersection, GCPolicyModeUnion}, false), - ConflictsWith: []string{"gc_rules"}, - }, - - "max_age": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `NOTE: 'gc_rules' is more flexible, and should be preferred over this field for new resources. This field may be deprecated in the future. GC policy that applies to all cells older than the given age.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "days": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - ForceNew: true, - Deprecated: "Deprecated in favor of duration", - Description: `Number of days before applying GC policy.`, - ExactlyOneOf: []string{"max_age.0.days", "max_age.0.duration"}, - }, - "duration": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `Duration before applying GC policy`, - ValidateFunc: verify.ValidateDuration(), - ExactlyOneOf: []string{"max_age.0.days", "max_age.0.duration"}, - }, - }, - }, - ConflictsWith: []string{"gc_rules"}, - }, - - "max_version": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `NOTE: 'gc_rules' is more flexible, and should be preferred over this field for new resources. This field may be deprecated in the future. GC policy that applies to all versions of a cell except for the most recent.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "number": { - Type: schema.TypeInt, - Required: true, - ForceNew: true, - Description: `Number of version before applying the GC policy.`, - }, - }, - }, - ConflictsWith: []string{"gc_rules"}, - }, - - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `The ID of the project in which the resource belongs. If it is not provided, the provider project is used.`, - }, - - "deletion_policy": { - Type: schema.TypeString, - Optional: true, - Description: `The deletion policy for the GC policy. Setting ABANDON allows the resource - to be abandoned rather than deleted. This is useful for GC policy as it cannot be deleted - in a replicated instance. Possible values are: "ABANDON".`, - ValidateFunc: validation.StringInSlice([]string{"ABANDON", ""}, false), - }, - }, - UseJSONNumber: true, - } -} - -func resourceBigtableGCPolicyUpsert(d *schema.ResourceData, meta interface{}) error { - config := meta.(*transport_tpg.Config) - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - ctx := context.Background() - - project, err := tpgresource.GetProject(d, config) - if err != nil { - return err - } - - instanceName := tpgresource.GetResourceNameFromSelfLink(d.Get("instance_name").(string)) - c, err := config.BigTableClientFactory(userAgent).NewAdminClient(project, instanceName) - if err != nil { - return fmt.Errorf("Error starting admin client. %s", err) - } - if err := d.Set("instance_name", instanceName); err != nil { - return fmt.Errorf("Error setting instance_name: %s", err) - } - - defer c.Close() - - gcPolicy, err := generateBigtableGCPolicy(d) - if err != nil { - return err - } - - tableName := d.Get("table").(string) - columnFamily := d.Get("column_family").(string) - - retryFunc := func() error { - reqErr := c.SetGCPolicy(ctx, tableName, columnFamily, gcPolicy) - return reqErr - } - // The default create timeout is 20 minutes. - timeout := d.Timeout(schema.TimeoutCreate) - pollInterval := time.Duration(30) * time.Second - // Mutations to gc policies can only happen one-at-a-time and take some amount of time. - // Use a fixed polling rate of 30s based on the RetryInfo returned by the server rather than - // the standard up-to-10s exponential backoff for those operations. - err = transport_tpg.Retry(transport_tpg.RetryOptions{ - RetryFunc: retryFunc, - Timeout: timeout, - PollInterval: pollInterval, - ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsBigTableRetryableError}, - }) - if err != nil { - return err - } - - table, err := c.TableInfo(ctx, tableName) - if err != nil { - return fmt.Errorf("Error retrieving table. Could not find %s in %s. %s", tableName, instanceName, err) - } - - for _, i := range table.FamilyInfos { - if i.Name == columnFamily { - d.SetId(i.GCPolicy) - } - } - - return resourceBigtableGCPolicyRead(d, meta) -} - -func resourceBigtableGCPolicyRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*transport_tpg.Config) - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - ctx := context.Background() - - project, err := tpgresource.GetProject(d, config) - if err != nil { - return err - } - - instanceName := tpgresource.GetResourceNameFromSelfLink(d.Get("instance_name").(string)) - c, err := config.BigTableClientFactory(userAgent).NewAdminClient(project, instanceName) - if err != nil { - return fmt.Errorf("Error starting admin client. %s", err) - } - - defer c.Close() - - name := d.Get("table").(string) - columnFamily := d.Get("column_family").(string) - ti, err := c.TableInfo(ctx, name) - if err != nil { - if tpgresource.IsNotFoundGrpcError(err) { - log.Printf("[WARN] Removing the GC policy because the parent table %s is gone", name) - d.SetId("") - return nil - } - return err - } - - for _, fi := range ti.FamilyInfos { - if fi.Name != columnFamily { - continue - } - - d.SetId(fi.GCPolicy) - - // No GC Policy. - if fi.FullGCPolicy.String() == "" { - return nil - } - - // Only set `gc_rules`` when the legacy fields are not set. We are not planning to support legacy fields. - maxAge := d.Get("max_age") - maxVersion := d.Get("max_version") - if d.Get("mode") == "" && len(maxAge.([]interface{})) == 0 && len(maxVersion.([]interface{})) == 0 { - gcRuleString, err := gcPolicyToGCRuleString(fi.FullGCPolicy, true) - if err != nil { - return err - } - gcRuleJsonString, err := json.Marshal(gcRuleString) - if err != nil { - return fmt.Errorf("Error marshaling GC policy to json: %s", err) - } - d.Set("gc_rules", string(gcRuleJsonString)) - } - break - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error setting project: %s", err) - } - - return nil -} - // Recursively convert Bigtable GC policy to JSON format in a map. func gcPolicyToGCRuleString(gc bigtable.GCPolicy, topLevel bool) (map[string]interface{}, error) { - result := make(map[string]interface{}) - switch bigtable.GetPolicyType(gc) { - case bigtable.PolicyMaxAge: - age := gc.(bigtable.MaxAgeGCPolicy).GetDurationString() - if topLevel { - rule := make(map[string]interface{}) - rule["max_age"] = age - rules := []interface{}{} - rules = append(rules, rule) - result["rules"] = rules - } else { - result["max_age"] = age - } - break - case bigtable.PolicyMaxVersion: - // bigtable.MaxVersionsGCPolicy is an int. - // Not sure why max_version is a float64. - // TODO: Maybe change max_version to an int. - version := float64(int(gc.(bigtable.MaxVersionsGCPolicy))) - if topLevel { - rule := make(map[string]interface{}) - rule["max_version"] = version - rules := []interface{}{} - rules = append(rules, rule) - result["rules"] = rules - } else { - result["max_version"] = version - } - break - case bigtable.PolicyUnion: - result["mode"] = "union" - rules := []interface{}{} - for _, c := range gc.(bigtable.UnionGCPolicy).Children { - gcRuleString, err := gcPolicyToGCRuleString(c, false) - if err != nil { - return nil, err - } - rules = append(rules, gcRuleString) - } - result["rules"] = rules - break - case bigtable.PolicyIntersection: - result["mode"] = "intersection" - rules := []interface{}{} - for _, c := range gc.(bigtable.IntersectionGCPolicy).Children { - gcRuleString, err := gcPolicyToGCRuleString(c, false) - if err != nil { - return nil, err - } - rules = append(rules, gcRuleString) - } - result["rules"] = rules - default: - break - } - - if err := validateNestedPolicy(result, topLevel); err != nil { - return nil, err - } - - return result, nil -} - -func resourceBigtableGCPolicyDestroy(d *schema.ResourceData, meta interface{}) error { - config := meta.(*transport_tpg.Config) - - if deletionPolicy := d.Get("deletion_policy"); deletionPolicy == "ABANDON" { - // Allows for the GC policy to be abandoned without deletion to avoid possible - // deletion failure in a replicated instance. - log.Printf("[WARN] The GC policy is abandoned") - return nil - } - - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - ctx := context.Background() - - project, err := tpgresource.GetProject(d, config) - if err != nil { - return err - } - - instanceName := tpgresource.GetResourceNameFromSelfLink(d.Get("instance_name").(string)) - c, err := config.BigTableClientFactory(userAgent).NewAdminClient(project, instanceName) - if err != nil { - return fmt.Errorf("Error starting admin client. %s", err) - } - - defer c.Close() - - retryFunc := func() error { - reqErr := c.SetGCPolicy(ctx, d.Get("table").(string), d.Get("column_family").(string), bigtable.NoGcPolicy()) - return reqErr - } - // The default delete timeout is 20 minutes. - timeout := d.Timeout(schema.TimeoutDelete) - pollInterval := time.Duration(30) * time.Second - err = transport_tpg.Retry(transport_tpg.RetryOptions{ - RetryFunc: retryFunc, - Timeout: timeout, - PollInterval: pollInterval, - ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsBigTableRetryableError}, - }) - if err != nil { - return err - } - - d.SetId("") - - return nil -} - -func generateBigtableGCPolicy(d *schema.ResourceData) (bigtable.GCPolicy, error) { - var policies []bigtable.GCPolicy - mode := d.Get("mode").(string) - ma, aok := d.GetOk("max_age") - mv, vok := d.GetOk("max_version") - gcRules, gok := d.GetOk("gc_rules") - - if !aok && !vok && !gok { - return bigtable.NoGcPolicy(), nil - } - - if mode == "" && aok && vok { - return nil, fmt.Errorf("if multiple policies are set, mode can't be empty") - } - - if gok { - var topLevelPolicy map[string]interface{} - if err := json.Unmarshal([]byte(gcRules.(string)), &topLevelPolicy); err != nil { - return nil, err - } - return getGCPolicyFromJSON(topLevelPolicy /*isTopLevel=*/, true) - } - - if aok { - l, _ := ma.([]interface{}) - d, err := getMaxAgeDuration(l[0].(map[string]interface{})) - if err != nil { - return nil, err - } - - policies = append(policies, bigtable.MaxAgePolicy(d)) - } - - if vok { - l, _ := mv.([]interface{}) - n, _ := l[0].(map[string]interface{})["number"].(int) - - policies = append(policies, bigtable.MaxVersionsPolicy(n)) - } - - switch mode { - case GCPolicyModeUnion: - return bigtable.UnionPolicy(policies...), nil - case GCPolicyModeIntersection: - return bigtable.IntersectionPolicy(policies...), nil - } - - return policies[0], nil -} - -func getGCPolicyFromJSON(inputPolicy map[string]interface{}, isTopLevel bool) (bigtable.GCPolicy, error) { - policy := []bigtable.GCPolicy{} - - if err := validateNestedPolicy(inputPolicy, isTopLevel); err != nil { - return nil, err - } - - for _, p := range inputPolicy["rules"].([]interface{}) { - childPolicy := p.(map[string]interface{}) - if err := validateNestedPolicy(childPolicy /*isTopLevel=*/, false); err != nil { - return nil, err - } - - if childPolicy["max_age"] != nil { - maxAge := childPolicy["max_age"].(string) - duration, err := time.ParseDuration(maxAge) - if err != nil { - return nil, fmt.Errorf("invalid duration string: %v", maxAge) - } - policy = append(policy, bigtable.MaxAgePolicy(duration)) - } - - if childPolicy["max_version"] != nil { - version := childPolicy["max_version"].(float64) - policy = append(policy, bigtable.MaxVersionsPolicy(int(version))) - } - - if childPolicy["mode"] != nil { - n, err := getGCPolicyFromJSON(childPolicy /*isTopLevel=*/, false) - if err != nil { - return nil, err - } - policy = append(policy, n) - } - } - - switch inputPolicy["mode"] { - case strings.ToLower(GCPolicyModeUnion): - return bigtable.UnionPolicy(policy...), nil - case strings.ToLower(GCPolicyModeIntersection): - return bigtable.IntersectionPolicy(policy...), nil - default: - return policy[0], nil - } -} - -func validateNestedPolicy(p map[string]interface{}, isTopLevel bool) error { - if len(p) > 2 { - return fmt.Errorf("rules has more than 2 fields") - } - maxVersion, maxVersionOk := p["max_version"] - maxAge, maxAgeOk := p["max_age"] - rulesObj, rulesOk := p["rules"] - - _, modeOk := p["mode"] - rules, arrOk := rulesObj.([]interface{}) - _, vCastOk := maxVersion.(float64) - _, aCastOk := maxAge.(string) - - if rulesOk && !arrOk { - return fmt.Errorf("`rules` must be array") - } - - if modeOk && len(rules) < 2 { - return fmt.Errorf("`rules` need at least 2 GC rule when mode is specified") - } - - if isTopLevel && !rulesOk { - return fmt.Errorf("invalid nested policy, need `rules`") - } - - if isTopLevel && !modeOk && len(rules) != 1 { - return fmt.Errorf("when `mode` is not specified, `rules` can only have 1 child rule") - } - - if !isTopLevel && len(p) == 2 && (!modeOk || !rulesOk) { - return fmt.Errorf("need `mode` and `rules` for child nested policies") - } - - if !isTopLevel && len(p) == 1 && !maxVersionOk && !maxAgeOk { - return fmt.Errorf("need `max_version` or `max_age` for the rule") - } - - if maxVersionOk && !vCastOk { - return fmt.Errorf("`max_version` must be a number") - } - - if maxAgeOk && !aCastOk { - return fmt.Errorf("`max_age must be a string") - } - - return nil -} - -func getMaxAgeDuration(values map[string]interface{}) (time.Duration, error) { - d := values["duration"].(string) - if d != "" { - return time.ParseDuration(d) - } - - days := values["days"].(int) - - return time.Hour * 24 * time.Duration(days), nil + return tpgbigtable.GcPolicyToGCRuleString(gc, topLevel) } diff --git a/google/resource_bigtable_gc_policy_test.go b/google/resource_bigtable_gc_policy_test.go index fdaea1278e1..0ddc39634d4 100644 --- a/google/resource_bigtable_gc_policy_test.go +++ b/google/resource_bigtable_gc_policy_test.go @@ -13,7 +13,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/hashicorp/terraform-provider-google/google/acctest" - "github.com/hashicorp/terraform-provider-google/google/tpgresource" + tpgbigtable "github.com/hashicorp/terraform-provider-google/google/services/bigtable" ) func TestAccBigtableGCPolicy_basic(t *testing.T) { @@ -203,187 +203,6 @@ func TestAccBigtableGCPolicy_gcRulesPolicy(t *testing.T) { }) } -func TestUnitBigtableGCPolicy_customizeDiff(t *testing.T) { - for _, tc := range testUnitBigtableGCPolicyCustomizeDiffTestcases { - tc.check(t) - } -} - -func (testcase *testUnitBigtableGCPolicyCustomizeDiffTestcase) check(t *testing.T) { - d := &tpgresource.ResourceDiffMock{ - Before: map[string]interface{}{}, - After: map[string]interface{}{}, - } - - d.Before["max_age.0.days"] = testcase.oldDays - d.Before["max_age.0.duration"] = testcase.oldDuration - - d.After["max_age.#"] = testcase.arraySize - d.After["max_age.0.days"] = testcase.newDays - d.After["max_age.0.duration"] = testcase.newDuration - - err := resourceBigtableGCPolicyCustomizeDiffFunc(d) - if err != nil { - t.Errorf("error on testcase %s - %v", testcase.testName, err) - } - - var cleared bool = d.Cleared != nil && d.Cleared["max_age.0.duration"] == true && d.Cleared["max_age.0.days"] == true - if cleared != testcase.cleared { - t.Errorf("%s: expected diff clear to be %v, but was %v", testcase.testName, testcase.cleared, cleared) - } -} - -type testUnitBigtableGCPolicyJSONRules struct { - name string - gcJSONString string - want string - errorExpected bool -} - -var testUnitBigtableGCPolicyRulesTestCases = []testUnitBigtableGCPolicyJSONRules{ - { - name: "Simple policy", - gcJSONString: `{"rules":[{"max_age":"10h"}]}`, - want: "age() > 10h", - errorExpected: false, - }, - { - name: "Simple multiple policies", - gcJSONString: `{"mode":"union", "rules":[{"max_age":"10h"},{"max_version":2}]}`, - want: "(age() > 10h || versions() > 2)", - errorExpected: false, - }, - { - name: "Nested policy", - gcJSONString: `{"mode":"union", "rules":[{"max_age":"10h"},{"mode": "intersection", "rules":[{"max_age":"2h"}, {"max_version":2}]}]}`, - want: "(age() > 10h || (age() > 2h && versions() > 2))", - errorExpected: false, - }, - { - name: "JSON with no `rules`", - gcJSONString: `{"mode": "union"}`, - errorExpected: true, - }, - { - name: "Empty JSON", - gcJSONString: "{}", - errorExpected: true, - }, - { - name: "Invalid duration string", - errorExpected: true, - gcJSONString: `{"mode":"union","rules":[{"max_age":"12o"},{"max_version":2}]}`, - }, - { - name: "Empty mode policy with more than 1 rules", - gcJSONString: `{"rules":[{"max_age":"10h"}, {"max_version":2}]}`, - errorExpected: true, - }, - { - name: "Less than 2 rules with mode specified", - gcJSONString: `{"mode":"union", "rules":[{"max_version":2}]}`, - errorExpected: true, - }, - { - name: "Invalid GC rule object", - gcJSONString: `{"mode": "union", "rules": [{"mode": "intersection"}]}`, - errorExpected: true, - }, - { - name: "Invalid GC rule field: not max_version or max_age", - gcJSONString: `{"mode": "union", "rules": [{"max_versions": 2}]}`, - errorExpected: true, - }, - { - name: "Invalid GC rule field: additional fields", - gcJSONString: `{"mode": "union", "rules": [{"max_age": "10h", "something_else": 100}]}`, - errorExpected: true, - }, - { - name: "Invalid GC rule field: more than 2 fields in a gc rule object", - gcJSONString: `{"mode": "union", "rules": [{"max_age": "10h", "max_version": 10, "something": 100}]}`, - errorExpected: true, - }, - { - name: "Invalid GC rule field: max_version or max_age is in the wrong type", - gcJSONString: `{"mode": "union", "rules": [{"max_age": "10d", "max_version": 2}]}`, - errorExpected: true, - }, - { - name: "Invalid GC rule: wrong data type for child gc_rule", - gcJSONString: `{"rules": {"max_version": "456"}}`, - errorExpected: true, - }, -} - -func TestUnitBigtableGCPolicy_getGCPolicyFromJSON(t *testing.T) { - for _, tc := range testUnitBigtableGCPolicyRulesTestCases { - t.Run(tc.name, func(t *testing.T) { - var topLevelPolicy map[string]interface{} - err := json.Unmarshal([]byte(tc.gcJSONString), &topLevelPolicy) - if err != nil { - t.Fatalf("error unmarshalling JSON string: %v", err) - } - got, err := getGCPolicyFromJSON(topLevelPolicy /*isTopLevel=*/, true) - if tc.errorExpected && err == nil { - t.Fatal("expect error, got nil") - } else if !tc.errorExpected && err != nil { - t.Fatalf("unexpected error: %v", err) - } else { - if got != nil && got.String() != tc.want { - t.Errorf("error getting policy from JSON, got: %v, want: %v", got, tc.want) - } - } - }) - } -} - -type testUnitBigtableGCPolicyCustomizeDiffTestcase struct { - testName string - arraySize int - oldDays int - newDays int - oldDuration string - newDuration string - cleared bool -} - -var testUnitBigtableGCPolicyCustomizeDiffTestcases = []testUnitBigtableGCPolicyCustomizeDiffTestcase{ - { - testName: "ArraySize0", - arraySize: 0, - cleared: false, - }, - { - testName: "DaysChange", - arraySize: 1, - oldDays: 3, - newDays: 2, - cleared: false, - }, - { - testName: "DurationChanges", - arraySize: 1, - oldDuration: "3h", - newDuration: "4h", - cleared: false, - }, - { - testName: "DaysToDurationEq", - arraySize: 1, - oldDays: 3, - newDuration: "72h", - cleared: true, - }, - { - testName: "DaysToDurationNotEq", - arraySize: 1, - oldDays: 3, - newDuration: "70h", - cleared: false, - }, -} - type testUnitGcPolicyToGCRuleString struct { name string policy bigtable.GCPolicy @@ -454,7 +273,7 @@ var testUnitGcPolicyToGCRuleStringTestCases = []testUnitGcPolicyToGCRuleString{ func TestUnitBigtableGCPolicy_gcPolicyToGCRuleString(t *testing.T) { for _, tc := range testUnitGcPolicyToGCRuleStringTestCases { t.Run(tc.name, func(t *testing.T) { - got, err := gcPolicyToGCRuleString(tc.policy, tc.topLevel) + got, err := tpgbigtable.GcPolicyToGCRuleString(tc.policy, tc.topLevel) if tc.errorExpected && err == nil { t.Fatal("expect error, got nil") } else if !tc.errorExpected && err != nil { @@ -540,7 +359,7 @@ func testAccBigtableGCPolicyExists(t *testing.T, n string, compareGcRules bool) if !compareGcRules { return nil } - gcRuleString, err := gcPolicyToGCRuleString(familyInfo.FullGCPolicy /*isTopLevel=*/, true) + gcRuleString, err := tpgbigtable.GcPolicyToGCRuleString(familyInfo.FullGCPolicy /*isTopLevel=*/, true) if err != nil { return fmt.Errorf("Error converting GC policy to JSON string: %s", err) } diff --git a/google/resource_bigtable_table.go b/google/resource_bigtable_table.go index 9212b162e52..acbb5dbd8a2 100644 --- a/google/resource_bigtable_table.go +++ b/google/resource_bigtable_table.go @@ -3,360 +3,9 @@ package google import ( - "context" - "fmt" - "log" - "time" - - "cloud.google.com/go/bigtable" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - - "github.com/hashicorp/terraform-provider-google/google/tpgresource" - transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/services/bigtable" ) -func ResourceBigtableTable() *schema.Resource { - return &schema.Resource{ - Create: resourceBigtableTableCreate, - Read: resourceBigtableTableRead, - Update: resourceBigtableTableUpdate, - Delete: resourceBigtableTableDestroy, - - Importer: &schema.ResourceImporter{ - State: resourceBigtableTableImport, - }, - - // Set a longer timeout for table creation as adding column families can be slow. - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(45 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - }, - - // ---------------------------------------------------------------------- - // IMPORTANT: Do not add any additional ForceNew fields to this resource. - // Destroying/recreating tables can lead to data loss for users. - // ---------------------------------------------------------------------- - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the table. Must be 1-50 characters and must only contain hyphens, underscores, periods, letters and numbers.`, - }, - - "column_family": { - Type: schema.TypeSet, - Optional: true, - Description: `A group of columns within a table which share a common configuration. This can be specified multiple times.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "family": { - Type: schema.TypeString, - Required: true, - Description: `The name of the column family.`, - }, - }, - }, - }, - - "instance_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: tpgresource.CompareResourceNames, - Description: `The name of the Bigtable instance.`, - }, - - "split_keys": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Description: `A list of predefined keys to split the table on. !> Warning: Modifying the split_keys of an existing table will cause Terraform to delete/recreate the entire google_bigtable_table resource.`, - }, - - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `The ID of the project in which the resource belongs. If it is not provided, the provider project is used.`, - }, - - "deletion_protection": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validation.StringInSlice([]string{"PROTECTED", "UNPROTECTED"}, false), - Elem: &schema.Schema{Type: schema.TypeString}, - Description: `A field to make the table protected against data loss i.e. when set to PROTECTED, deleting the table, the column families in the table, and the instance containing the table would be prohibited. If not provided, currently deletion protection will be set to UNPROTECTED as it is the API default value.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceBigtableTableCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*transport_tpg.Config) - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - ctx := context.Background() - - project, err := tpgresource.GetProject(d, config) - if err != nil { - return err - } - - instanceName := tpgresource.GetResourceNameFromSelfLink(d.Get("instance_name").(string)) - c, err := config.BigTableClientFactory(userAgent).NewAdminClient(project, instanceName) - if err != nil { - return fmt.Errorf("Error starting admin client. %s", err) - } - if err := d.Set("instance_name", instanceName); err != nil { - return fmt.Errorf("Error setting instance_name: %s", err) - } - - defer c.Close() - - tableId := d.Get("name").(string) - tblConf := bigtable.TableConf{TableID: tableId} - - // Check if deletion protection is given - // If not given, currently tblConf.DeletionProtection will be set to false in the API - deletionProtection := d.Get("deletion_protection") - if deletionProtection == "PROTECTED" { - tblConf.DeletionProtection = bigtable.Protected - } else if deletionProtection == "UNPROTECTED" { - tblConf.DeletionProtection = bigtable.Unprotected - } - - // Set the split keys if given. - if v, ok := d.GetOk("split_keys"); ok { - tblConf.SplitKeys = tpgresource.ConvertStringArr(v.([]interface{})) - } - - // Set the column families if given. - columnFamilies := make(map[string]bigtable.GCPolicy) - if d.Get("column_family.#").(int) > 0 { - columns := d.Get("column_family").(*schema.Set).List() - - for _, co := range columns { - column := co.(map[string]interface{}) - - if v, ok := column["family"]; ok { - // By default, there is no GC rules. - columnFamilies[v.(string)] = bigtable.NoGcPolicy() - } - } - } - tblConf.Families = columnFamilies - - // This method may return before the table's creation is complete - we may need to wait until - // it exists in the future. - // Set a longer timeout as creating table and adding column families can be pretty slow. - ctxWithTimeout, cancel := context.WithTimeout(ctx, d.Timeout(schema.TimeoutCreate)) - defer cancel() // Always call cancel. - err = c.CreateTableFromConf(ctxWithTimeout, &tblConf) - if err != nil { - return fmt.Errorf("Error creating table. %s", err) - } - - id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/instances/{{instance_name}}/tables/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return resourceBigtableTableRead(d, meta) -} - -func resourceBigtableTableRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*transport_tpg.Config) - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - ctx := context.Background() - - project, err := tpgresource.GetProject(d, config) - if err != nil { - return err - } - - instanceName := tpgresource.GetResourceNameFromSelfLink(d.Get("instance_name").(string)) - c, err := config.BigTableClientFactory(userAgent).NewAdminClient(project, instanceName) - if err != nil { - return fmt.Errorf("Error starting admin client. %s", err) - } - - defer c.Close() - - name := d.Get("name").(string) - table, err := c.TableInfo(ctx, name) - if err != nil { - if tpgresource.IsNotFoundGrpcError(err) { - log.Printf("[WARN] Removing %s because it's gone", name) - d.SetId("") - return nil - } - return err - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("column_family", flattenColumnFamily(table.Families)); err != nil { - return fmt.Errorf("Error setting column_family: %s", err) - } - - deletionProtection := table.DeletionProtection - if deletionProtection == bigtable.Protected { - if err := d.Set("deletion_protection", "PROTECTED"); err != nil { - return fmt.Errorf("Error setting deletion_protection: %s", err) - } - } else if deletionProtection == bigtable.Unprotected { - if err := d.Set("deletion_protection", "UNPROTECTED"); err != nil { - return fmt.Errorf("Error setting deletion_protection: %s", err) - } - } else { - return fmt.Errorf("Error setting deletion_protection, it should be either PROTECTED or UNPROTECTED") - } - return nil -} - -func resourceBigtableTableUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*transport_tpg.Config) - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - ctx := context.Background() - - project, err := tpgresource.GetProject(d, config) - if err != nil { - return err - } - - instanceName := tpgresource.GetResourceNameFromSelfLink(d.Get("instance_name").(string)) - c, err := config.BigTableClientFactory(userAgent).NewAdminClient(project, instanceName) - if err != nil { - return fmt.Errorf("Error starting admin client. %s", err) - } - defer c.Close() - - o, n := d.GetChange("column_family") - oSet := o.(*schema.Set) - nSet := n.(*schema.Set) - name := d.Get("name").(string) - - // Add column families that are in new but not in old - for _, new := range nSet.Difference(oSet).List() { - column := new.(map[string]interface{}) - - if v, ok := column["family"]; ok { - log.Printf("[DEBUG] adding column family %q", v) - if err := c.CreateColumnFamily(ctx, name, v.(string)); err != nil { - return fmt.Errorf("Error creating column family %q: %s", v, err) - } - } - } - - // Remove column families that are in old but not in new - for _, old := range oSet.Difference(nSet).List() { - column := old.(map[string]interface{}) - - if v, ok := column["family"]; ok { - log.Printf("[DEBUG] removing column family %q", v) - if err := c.DeleteColumnFamily(ctx, name, v.(string)); err != nil { - return fmt.Errorf("Error deleting column family %q: %s", v, err) - } - } - } - - ctxWithTimeout, cancel := context.WithTimeout(ctx, d.Timeout(schema.TimeoutCreate)) - defer cancel() - if d.HasChange("deletion_protection") { - deletionProtection := d.Get("deletion_protection") - if deletionProtection == "PROTECTED" { - if err := c.UpdateTableWithDeletionProtection(ctxWithTimeout, name, bigtable.Protected); err != nil { - return fmt.Errorf("Error updating deletion protection in table %v: %s", name, err) - } - } else if deletionProtection == "UNPROTECTED" { - if err := c.UpdateTableWithDeletionProtection(ctxWithTimeout, name, bigtable.Unprotected); err != nil { - return fmt.Errorf("Error updating deletion protection in table %v: %s", name, err) - } - } - } - - return resourceBigtableTableRead(d, meta) -} - -func resourceBigtableTableDestroy(d *schema.ResourceData, meta interface{}) error { - config := meta.(*transport_tpg.Config) - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - ctx := context.Background() - - project, err := tpgresource.GetProject(d, config) - if err != nil { - return err - } - - instanceName := tpgresource.GetResourceNameFromSelfLink(d.Get("instance_name").(string)) - c, err := config.BigTableClientFactory(userAgent).NewAdminClient(project, instanceName) - if err != nil { - return fmt.Errorf("Error starting admin client. %s", err) - } - - defer c.Close() - - name := d.Get("name").(string) - err = c.DeleteTable(ctx, name) - if err != nil { - return fmt.Errorf("Error deleting table. %s", err) - } - - d.SetId("") - - return nil -} - func flattenColumnFamily(families []string) []map[string]interface{} { - result := make([]map[string]interface{}, 0, len(families)) - - for _, f := range families { - data := make(map[string]interface{}) - data["family"] = f - result = append(result, data) - } - - return result -} - -// TODO(rileykarson): Fix the stored import format after rebasing 3.0.0 -func resourceBigtableTableImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*transport_tpg.Config) - if err := tpgresource.ParseImportId([]string{ - "projects/(?P[^/]+)/instances/(?P[^/]+)/tables/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/instances/{{instance_name}}/tables/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil + return bigtable.FlattenColumnFamily(families) } diff --git a/google/resource_bigtable_table_test.go b/google/resource_bigtable_table_test.go index bb716a04ac0..72fac88f4f5 100644 --- a/google/resource_bigtable_table_test.go +++ b/google/resource_bigtable_table_test.go @@ -9,6 +9,7 @@ import ( "testing" "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/services/bigtable" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" @@ -317,7 +318,7 @@ func testAccBigtableColumnFamilyExists(t *testing.T, table_name_space, family st if err != nil { return fmt.Errorf("Error retrieving table. Could not find %s in %s.", rs.Primary.Attributes["name"], rs.Primary.Attributes["instance_name"]) } - for _, data := range flattenColumnFamily(table.Families) { + for _, data := range bigtable.FlattenColumnFamily(table.Families) { if data["family"] != family { return fmt.Errorf("Error checking column family. Could not find column family %s in %s.", family, rs.Primary.Attributes["name"]) } diff --git a/google/resource_compute_attached_disk.go b/google/resource_compute_attached_disk.go index 8a906bbaf8f..cd80d4f0de0 100644 --- a/google/resource_compute_attached_disk.go +++ b/google/resource_compute_attached_disk.go @@ -3,257 +3,10 @@ package google import ( - "fmt" - "log" - "strings" - "time" - - "github.com/hashicorp/terraform-provider-google/google/tpgresource" - transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - + tpgcompute "github.com/hashicorp/terraform-provider-google/google/services/compute" "google.golang.org/api/compute/v1" ) -func ResourceComputeAttachedDisk() *schema.Resource { - return &schema.Resource{ - Create: resourceAttachedDiskCreate, - Read: resourceAttachedDiskRead, - Delete: resourceAttachedDiskDelete, - - Importer: &schema.ResourceImporter{ - State: resourceAttachedDiskImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(300 * time.Second), - Delete: schema.DefaultTimeout(300 * time.Second), - }, - - Schema: map[string]*schema.Schema{ - "disk": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `name or self_link of the disk that will be attached.`, - DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, - }, - "instance": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `name or self_link of the compute instance that the disk will be attached to. If the self_link is provided then zone and project are extracted from the self link. If only the name is used then zone and project must be defined as properties on the resource or provider.`, - DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, - }, - "project": { - Type: schema.TypeString, - ForceNew: true, - Computed: true, - Optional: true, - Description: `The project that the referenced compute instance is a part of. If instance is referenced by its self_link the project defined in the link will take precedence.`, - }, - "zone": { - Type: schema.TypeString, - ForceNew: true, - Computed: true, - Optional: true, - Description: `The zone that the referenced compute instance is located within. If instance is referenced by its self_link the zone defined in the link will take precedence.`, - }, - "device_name": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - Computed: true, - Description: `Specifies a unique device name of your choice that is reflected into the /dev/disk/by-id/google-* tree of a Linux operating system running within the instance. This name can be used to reference the device for mounting, resizing, and so on, from within the instance. If not specified, the server chooses a default device name to apply to this disk, in the form persistent-disks-x, where x is a number assigned by Google Compute Engine.`, - }, - "mode": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - Default: "READ_WRITE", - Description: `The mode in which to attach this disk, either READ_WRITE or READ_ONLY. If not specified, the default is to attach the disk in READ_WRITE mode.`, - ValidateFunc: validation.StringInSlice([]string{"READ_ONLY", "READ_WRITE"}, false), - }, - }, - UseJSONNumber: true, - } -} - -func resourceAttachedDiskCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*transport_tpg.Config) - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - zv, err := tpgresource.ParseZonalFieldValue("instances", d.Get("instance").(string), "project", "zone", d, config, false) - if err != nil { - return err - } - - disk := d.Get("disk").(string) - diskName := tpgresource.GetResourceNameFromSelfLink(disk) - diskSrc := fmt.Sprintf("projects/%s/zones/%s/disks/%s", zv.Project, zv.Zone, diskName) - - // Check if the disk is a regional disk - if strings.Contains(disk, "regions") { - rv, err := tpgresource.ParseRegionDiskFieldValue(disk, d, config) - if err != nil { - return err - } - diskSrc = rv.RelativeLink() - } - - attachedDisk := compute.AttachedDisk{ - Source: diskSrc, - Mode: d.Get("mode").(string), - DeviceName: d.Get("device_name").(string), - } - - op, err := config.NewComputeClient(userAgent).Instances.AttachDisk(zv.Project, zv.Zone, zv.Name, &attachedDisk).Do() - if err != nil { - return err - } - - d.SetId(fmt.Sprintf("projects/%s/zones/%s/instances/%s/%s", zv.Project, zv.Zone, zv.Name, diskName)) - - waitErr := ComputeOperationWaitTime(config, op, zv.Project, - "disk to attach", userAgent, d.Timeout(schema.TimeoutCreate)) - if waitErr != nil { - d.SetId("") - return waitErr - } - - return resourceAttachedDiskRead(d, meta) -} - -func resourceAttachedDiskRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*transport_tpg.Config) - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - zv, err := tpgresource.ParseZonalFieldValue("instances", d.Get("instance").(string), "project", "zone", d, config, false) - if err != nil { - return err - } - if err := d.Set("project", zv.Project); err != nil { - return fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("zone", zv.Zone); err != nil { - return fmt.Errorf("Error setting zone: %s", err) - } - - diskName := tpgresource.GetResourceNameFromSelfLink(d.Get("disk").(string)) - - instance, err := config.NewComputeClient(userAgent).Instances.Get(zv.Project, zv.Zone, zv.Name).Do() - if err != nil { - return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("AttachedDisk %q", d.Id())) - } - - // Iterate through the instance's attached disks as this is the only way to - // confirm the disk is actually attached - ad := findDiskByName(instance.Disks, diskName) - if ad == nil { - log.Printf("[WARN] Referenced disk wasn't found attached to this compute instance. Removing from state.") - d.SetId("") - return nil - } - - if err := d.Set("device_name", ad.DeviceName); err != nil { - return fmt.Errorf("Error setting device_name: %s", err) - } - if err := d.Set("mode", ad.Mode); err != nil { - return fmt.Errorf("Error setting mode: %s", err) - } - - // Force the referenced resources to a self-link in state because it's more specific then name. - instancePath, err := tpgresource.GetRelativePath(instance.SelfLink) - if err != nil { - return err - } - if err := d.Set("instance", instancePath); err != nil { - return fmt.Errorf("Error setting instance: %s", err) - } - diskPath, err := tpgresource.GetRelativePath(ad.Source) - if err != nil { - return err - } - if err := d.Set("disk", diskPath); err != nil { - return fmt.Errorf("Error setting disk: %s", err) - } - - return nil -} - -func resourceAttachedDiskDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*transport_tpg.Config) - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - zv, err := tpgresource.ParseZonalFieldValue("instances", d.Get("instance").(string), "project", "zone", d, config, false) - if err != nil { - return err - } - - diskName := tpgresource.GetResourceNameFromSelfLink(d.Get("disk").(string)) - - instance, err := config.NewComputeClient(userAgent).Instances.Get(zv.Project, zv.Zone, zv.Name).Do() - if err != nil { - return err - } - - // Confirm the disk is still attached before making the call to detach it. If the disk isn't listed as an attached - // disk on the compute instance then return as though the delete call succeed since this is the desired state. - ad := findDiskByName(instance.Disks, diskName) - if ad == nil { - return nil - } - - op, err := config.NewComputeClient(userAgent).Instances.DetachDisk(zv.Project, zv.Zone, zv.Name, ad.DeviceName).Do() - if err != nil { - return err - } - - waitErr := ComputeOperationWaitTime(config, op, zv.Project, - fmt.Sprintf("Detaching disk from %s", zv.Name), userAgent, d.Timeout(schema.TimeoutDelete)) - if waitErr != nil { - return waitErr - } - - return nil -} - -func resourceAttachedDiskImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*transport_tpg.Config) - - err := tpgresource.ParseImportId( - []string{"projects/(?P[^/]+)/zones/(?P[^/]+)/instances/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)"}, d, config) - if err != nil { - return nil, err - } - - id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/zones/{{zone}}/instances/{{instance}}/{{disk}}") - if err != nil { - return nil, err - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - func findDiskByName(disks []*compute.AttachedDisk, id string) *compute.AttachedDisk { - for _, disk := range disks { - if tpgresource.CompareSelfLinkOrResourceName("", disk.Source, id, nil) { - return disk - } - } - - return nil + return tpgcompute.FindDiskByName(disks, id) } diff --git a/google/resource_compute_attached_disk_test.go b/google/resource_compute_attached_disk_test.go index 7e7585331bc..3d408fb851f 100644 --- a/google/resource_compute_attached_disk_test.go +++ b/google/resource_compute_attached_disk_test.go @@ -9,6 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/services/compute" ) func TestAccComputeAttachedDisk_basic(t *testing.T) { @@ -136,7 +137,7 @@ func testCheckAttachedDiskIsNowDetached(t *testing.T, instanceName, diskName str return err } - ad := findDiskByName(instance.Disks, diskName) + ad := compute.FindDiskByName(instance.Disks, diskName) if ad != nil { return fmt.Errorf("compute disk is still attached to compute instance") } diff --git a/google/resource_storage_bucket_object.go b/google/resource_storage_bucket_object.go index 4f8452c1c41..06d1ef738fd 100644 --- a/google/resource_storage_bucket_object.go +++ b/google/resource_storage_bucket_object.go @@ -15,10 +15,8 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "crypto/md5" "crypto/sha256" "encoding/base64" - "io/ioutil" "net/http" "google.golang.org/api/googleapi" @@ -135,11 +133,11 @@ func ResourceStorageBucketObject() *schema.Resource { DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { localMd5Hash := "" if source, ok := d.GetOkExists("source"); ok { - localMd5Hash = getFileMd5Hash(source.(string)) + localMd5Hash = tpgresource.GetFileMd5Hash(source.(string)) } if content, ok := d.GetOkExists("content"); ok { - localMd5Hash = getContentMd5Hash([]byte(content.(string))) + localMd5Hash = tpgresource.GetContentMd5Hash([]byte(content.(string))) } // If `source` or `content` is dynamically set, both field will be empty. @@ -497,20 +495,11 @@ func setEncryptionHeaders(customerEncryption map[string]string, headers http.Hea } func getFileMd5Hash(filename string) string { - data, err := ioutil.ReadFile(filename) - if err != nil { - log.Printf("[WARN] Failed to read source file %q. Cannot compute md5 hash for it.", filename) - return "" - } - return getContentMd5Hash(data) + return tpgresource.GetFileMd5Hash(filename) } func getContentMd5Hash(content []byte) string { - h := md5.New() - if _, err := h.Write(content); err != nil { - log.Printf("[WARN] Failed to compute md5 hash for content: %v", err) - } - return base64.StdEncoding.EncodeToString(h.Sum(nil)) + return tpgresource.GetContentMd5Hash(content) } func expandCustomerEncryption(input []interface{}) map[string]string { diff --git a/google/data_source_access_approval_folder_service_account.go b/google/services/accessapproval/data_source_access_approval_folder_service_account.go similarity index 98% rename from google/data_source_access_approval_folder_service_account.go rename to google/services/accessapproval/data_source_access_approval_folder_service_account.go index 6b4e44d69cc..33e3406d667 100644 --- a/google/data_source_access_approval_folder_service_account.go +++ b/google/services/accessapproval/data_source_access_approval_folder_service_account.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package accessapproval import ( "fmt" diff --git a/google/data_source_access_approval_organization_service_account.go b/google/services/accessapproval/data_source_access_approval_organization_service_account.go similarity index 98% rename from google/data_source_access_approval_organization_service_account.go rename to google/services/accessapproval/data_source_access_approval_organization_service_account.go index e00c02f07f4..7d6011a9d1a 100644 --- a/google/data_source_access_approval_organization_service_account.go +++ b/google/services/accessapproval/data_source_access_approval_organization_service_account.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package accessapproval import ( "fmt" diff --git a/google/data_source_access_approval_project_service_account.go b/google/services/accessapproval/data_source_access_approval_project_service_account.go similarity index 98% rename from google/data_source_access_approval_project_service_account.go rename to google/services/accessapproval/data_source_access_approval_project_service_account.go index 962d8260789..761f3ccc756 100644 --- a/google/data_source_access_approval_project_service_account.go +++ b/google/services/accessapproval/data_source_access_approval_project_service_account.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package accessapproval import ( "fmt" diff --git a/google/data_source_alloydb_locations.go b/google/services/alloydb/data_source_alloydb_locations.go similarity index 99% rename from google/data_source_alloydb_locations.go rename to google/services/alloydb/data_source_alloydb_locations.go index 6fcbaca4b6e..8351f5bce24 100644 --- a/google/data_source_alloydb_locations.go +++ b/google/services/alloydb/data_source_alloydb_locations.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package alloydb import ( "fmt" diff --git a/google/data_source_alloydb_supported_database_flags.go b/google/services/alloydb/data_source_alloydb_supported_database_flags.go similarity index 99% rename from google/data_source_alloydb_supported_database_flags.go rename to google/services/alloydb/data_source_alloydb_supported_database_flags.go index 42614ed2fda..3687efd7c7a 100644 --- a/google/data_source_alloydb_supported_database_flags.go +++ b/google/services/alloydb/data_source_alloydb_supported_database_flags.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package alloydb import ( "fmt" diff --git a/google/resource_apigee_env_keystore_alias_pkcs12.go b/google/services/apigee/resource_apigee_env_keystore_alias_pkcs12.go similarity index 99% rename from google/resource_apigee_env_keystore_alias_pkcs12.go rename to google/services/apigee/resource_apigee_env_keystore_alias_pkcs12.go index 1f6a52a9ccd..e86b7661bc6 100644 --- a/google/resource_apigee_env_keystore_alias_pkcs12.go +++ b/google/services/apigee/resource_apigee_env_keystore_alias_pkcs12.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package apigee import ( "bytes" diff --git a/google/resource_apigee_flowhook.go b/google/services/apigee/resource_apigee_flowhook.go similarity index 99% rename from google/resource_apigee_flowhook.go rename to google/services/apigee/resource_apigee_flowhook.go index 0b3f2446dea..78dfaba8fe5 100644 --- a/google/resource_apigee_flowhook.go +++ b/google/services/apigee/resource_apigee_flowhook.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package apigee import ( "fmt" diff --git a/google/resource_apigee_keystores_aliases_key_cert_file.go b/google/services/apigee/resource_apigee_keystores_aliases_key_cert_file.go similarity index 99% rename from google/resource_apigee_keystores_aliases_key_cert_file.go rename to google/services/apigee/resource_apigee_keystores_aliases_key_cert_file.go index d0d02cd1d37..7117593ee8f 100644 --- a/google/resource_apigee_keystores_aliases_key_cert_file.go +++ b/google/services/apigee/resource_apigee_keystores_aliases_key_cert_file.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package apigee import ( "bytes" diff --git a/google/resource_apigee_sharedflow.go b/google/services/apigee/resource_apigee_sharedflow.go similarity index 98% rename from google/resource_apigee_sharedflow.go rename to google/services/apigee/resource_apigee_sharedflow.go index f6bba016084..9022a0ea0f3 100644 --- a/google/resource_apigee_sharedflow.go +++ b/google/services/apigee/resource_apigee_sharedflow.go @@ -7,7 +7,7 @@ // // ---------------------------------------------------------------------------- -package google +package apigee import ( "context" @@ -124,7 +124,7 @@ func ResourceApigeeSharedFlow() *schema.Resource { DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { localMd5Hash := "" if config_bundle, ok := d.GetOkExists("config_bundle"); ok { - localMd5Hash = getFileMd5Hash(config_bundle.(string)) + localMd5Hash = tpgresource.GetFileMd5Hash(config_bundle.(string)) } if localMd5Hash == "" { return false @@ -167,7 +167,7 @@ func resourceApigeeSharedFlowCreate(d *schema.ResourceData, meta interface{}) er if err != nil { return err } - localMd5Hash = getFileMd5Hash(configBundlePath.(string)) + localMd5Hash = tpgresource.GetFileMd5Hash(configBundlePath.(string)) } else { return fmt.Errorf("Error, \"config_bundle\" must be specified") } @@ -466,7 +466,7 @@ func apigeeSharedflowDetectBundleUpdate(_ context.Context, diff *schema.Resource oldBundleHash := tmp.(string) currentBundleHash := "" if config_bundle, ok := diff.GetOkExists("config_bundle"); ok { - currentBundleHash = getFileMd5Hash(config_bundle.(string)) + currentBundleHash = tpgresource.GetFileMd5Hash(config_bundle.(string)) } log.Printf("[DEBUG] apigeeSharedflowDetectUpdate detect_md5hash: %s -> %s", oldBundleHash, currentBundleHash) diff --git a/google/resource_apigee_sharedflow_deployment.go b/google/services/apigee/resource_apigee_sharedflow_deployment.go similarity index 99% rename from google/resource_apigee_sharedflow_deployment.go rename to google/services/apigee/resource_apigee_sharedflow_deployment.go index 5a553cfa4b0..95f1c166876 100644 --- a/google/resource_apigee_sharedflow_deployment.go +++ b/google/services/apigee/resource_apigee_sharedflow_deployment.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package apigee import ( "fmt" diff --git a/google/data_source_google_app_engine_default_service_account.go b/google/services/appengine/data_source_google_app_engine_default_service_account.go similarity index 99% rename from google/data_source_google_app_engine_default_service_account.go rename to google/services/appengine/data_source_google_app_engine_default_service_account.go index 9546b03b407..33695e4d259 100644 --- a/google/data_source_google_app_engine_default_service_account.go +++ b/google/services/appengine/data_source_google_app_engine_default_service_account.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package appengine import ( "fmt" diff --git a/google/resource_app_engine_application.go b/google/services/appengine/resource_app_engine_application.go similarity index 99% rename from google/resource_app_engine_application.go rename to google/services/appengine/resource_app_engine_application.go index faf24412eea..7b69cd52d0b 100644 --- a/google/resource_app_engine_application.go +++ b/google/services/appengine/resource_app_engine_application.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package appengine import ( "context" diff --git a/google/data_source_google_bigquery_default_service_account.go b/google/services/bigquery/data_source_google_bigquery_default_service_account.go similarity index 99% rename from google/data_source_google_bigquery_default_service_account.go rename to google/services/bigquery/data_source_google_bigquery_default_service_account.go index da7ddf0b499..2e5eb0e6043 100644 --- a/google/data_source_google_bigquery_default_service_account.go +++ b/google/services/bigquery/data_source_google_bigquery_default_service_account.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package bigquery import ( "fmt" diff --git a/google/services/bigquery/iam_bigquery_dataset.go b/google/services/bigquery/iam_bigquery_dataset.go new file mode 100644 index 00000000000..b8bdaa28cd9 --- /dev/null +++ b/google/services/bigquery/iam_bigquery_dataset.go @@ -0,0 +1,289 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package bigquery + +import ( + "errors" + "fmt" + "strings" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" +) + +var IamBigqueryDatasetSchema = map[string]*schema.Schema{ + "dataset_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, +} + +var bigqueryAccessPrimitiveToRoleMap = map[string]string{ + "OWNER": "roles/bigquery.dataOwner", + "WRITER": "roles/bigquery.dataEditor", + "READER": "roles/bigquery.dataViewer", +} + +type BigqueryDatasetIamUpdater struct { + project string + datasetId string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func NewBigqueryDatasetIamUpdater(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + project, err := tpgresource.GetProject(d, config) + if err != nil { + return nil, err + } + + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + + return &BigqueryDatasetIamUpdater{ + project: project, + datasetId: d.Get("dataset_id").(string), + d: d, + Config: config, + }, nil +} + +func BigqueryDatasetIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + fv, err := tpgresource.ParseProjectFieldValue("datasets", d.Id(), "project", d, config, false) + if err != nil { + return err + } + + if err := d.Set("project", fv.Project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("dataset_id", fv.Name); err != nil { + return fmt.Errorf("Error setting dataset_id: %s", err) + } + + // Explicitly set the id so imported resources have the same ID format as non-imported ones. + d.SetId(fv.RelativeLink()) + return nil +} + +func (u *BigqueryDatasetIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url := fmt.Sprintf("%s%s", u.Config.BigQueryBasePath, u.GetResourceId()) + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "GET", + Project: u.project, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + policy, err := accessToPolicy(res["access"]) + if err != nil { + return nil, err + } + return policy, nil +} + +func (u *BigqueryDatasetIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + url := fmt.Sprintf("%s%s", u.Config.BigQueryBasePath, u.GetResourceId()) + + access, err := policyToAccess(policy) + if err != nil { + return err + } + obj := map[string]interface{}{ + "access": access, + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "PATCH", + Project: u.project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return fmt.Errorf("Error creating DatasetAccess: %s", err) + } + + return nil +} + +func accessToPolicy(access interface{}) (*cloudresourcemanager.Policy, error) { + if access == nil { + return nil, nil + } + roleToBinding := make(map[string]*cloudresourcemanager.Binding) + + accessArr := access.([]interface{}) + for _, v := range accessArr { + memberRole := v.(map[string]interface{}) + rawRole, ok := memberRole["role"] + if !ok { + // "view" allows role to not be defined. It is a special dataset access construct, so ignore + // If a user wants to manage "view" access they should use the `bigquery_dataset_access` resource + continue + } + role := rawRole.(string) + if iamRole, ok := bigqueryAccessPrimitiveToRoleMap[role]; ok { + // API changes certain IAM roles to legacy roles. Revert these changes + role = iamRole + } + member, err := accessToIamMember(memberRole) + if err != nil { + return nil, err + } + // We have to combine bindings manually + binding, ok := roleToBinding[role] + if !ok { + binding = &cloudresourcemanager.Binding{Role: role, Members: []string{}} + } + binding.Members = append(binding.Members, member) + + roleToBinding[role] = binding + } + bindings := make([]*cloudresourcemanager.Binding, 0) + for _, v := range roleToBinding { + bindings = append(bindings, v) + } + + return &cloudresourcemanager.Policy{Bindings: bindings}, nil +} + +func policyToAccess(policy *cloudresourcemanager.Policy) ([]map[string]interface{}, error) { + res := make([]map[string]interface{}, 0) + if len(policy.AuditConfigs) != 0 { + return nil, errors.New("Access policies not allowed on BigQuery Dataset IAM policies") + } + for _, binding := range policy.Bindings { + if binding.Condition != nil { + return nil, errors.New("IAM conditions not allowed on BigQuery Dataset IAM") + } + if fullRole, ok := bigqueryAccessPrimitiveToRoleMap[binding.Role]; ok { + return nil, fmt.Errorf("BigQuery Dataset legacy role %s is not allowed when using google_bigquery_dataset_iam resources. Please use the full form: %s", binding.Role, fullRole) + } + for _, member := range binding.Members { + // Do not append any deleted members + if strings.HasPrefix(member, "deleted:") { + continue + } + access := map[string]interface{}{ + "role": binding.Role, + } + memberType, member, err := iamMemberToAccess(member) + if err != nil { + return nil, err + } + access[memberType] = member + res = append(res, access) + } + } + + return res, nil +} + +// Returns the member access type and member for an IAM member. +// Dataset access uses different member types to identify groups, domains, etc. +// these types are used as keys in the access JSON payload +func iamMemberToAccess(member string) (string, string, error) { + if strings.HasPrefix(member, "deleted:") { + return "", "", fmt.Errorf("BigQuery Dataset IAM member is deleted: %s", member) + } + + pieces := strings.SplitN(member, ":", 2) + if len(pieces) > 1 { + switch pieces[0] { + case "group": + return "groupByEmail", pieces[1], nil + case "domain": + return "domain", pieces[1], nil + case "user": + return "userByEmail", pieces[1], nil + case "serviceAccount": + return "userByEmail", pieces[1], nil + default: + return "", "", fmt.Errorf("Failed to parse BigQuery Dataset IAM member type: %s", member) + } + } + if member == "projectOwners" || member == "projectReaders" || member == "projectWriters" || member == "allAuthenticatedUsers" { + // These are special BigQuery Dataset permissions + return "specialGroup", member, nil + } + return "iamMember", member, nil +} + +func accessToIamMember(access map[string]interface{}) (string, error) { + // One of the fields must be set, we have to find which IAM member type this maps to + if member, ok := access["groupByEmail"]; ok { + return fmt.Sprintf("group:%s", member.(string)), nil + } + if member, ok := access["domain"]; ok { + return fmt.Sprintf("domain:%s", member.(string)), nil + } + if member, ok := access["specialGroup"]; ok { + return member.(string), nil + } + if member, ok := access["iamMember"]; ok { + return member.(string), nil + } + if _, ok := access["view"]; ok { + // view does not map to an IAM member, use access instead + return "", fmt.Errorf("Failed to convert BigQuery Dataset access to IAM member. To use views with a dataset, please use dataset_access") + } + if _, ok := access["dataset"]; ok { + // dataset does not map to an IAM member, use access instead + return "", fmt.Errorf("Failed to convert BigQuery Dataset access to IAM member. To use views with a dataset, please use dataset_access") + } + if _, ok := access["routine"]; ok { + // dataset does not map to an IAM member, use access instead + return "", fmt.Errorf("Failed to convert BigQuery Dataset access to IAM member. To use views with a dataset, please use dataset_access") + } + if member, ok := access["userByEmail"]; ok { + // service accounts have "gservice" in their email. This is best guess due to lost information + if strings.Contains(member.(string), "gserviceaccount") { + return fmt.Sprintf("serviceAccount:%s", member.(string)), nil + } + return fmt.Sprintf("user:%s", member.(string)), nil + } + return "", fmt.Errorf("Failed to identify IAM member from BigQuery Dataset access: %v", access) +} + +func (u *BigqueryDatasetIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/datasets/%s", u.project, u.datasetId) +} + +// Matches the mutex of google_big_query_dataset_access +func (u *BigqueryDatasetIamUpdater) GetMutexKey() string { + return fmt.Sprintf("%s", u.datasetId) +} + +func (u *BigqueryDatasetIamUpdater) DescribeResource() string { + return fmt.Sprintf("Bigquery Dataset %s/%s", u.project, u.datasetId) +} diff --git a/google/resource_bigquery_table.go b/google/services/bigquery/resource_bigquery_table.go similarity index 99% rename from google/resource_bigquery_table.go rename to google/services/bigquery/resource_bigquery_table.go index fd04995ecfe..1f435bcb699 100644 --- a/google/resource_bigquery_table.go +++ b/google/services/bigquery/resource_bigquery_table.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package bigquery import ( "context" diff --git a/google/services/bigquery/resource_bigquery_table_internal_test.go b/google/services/bigquery/resource_bigquery_table_internal_test.go new file mode 100644 index 00000000000..0cb19078781 --- /dev/null +++ b/google/services/bigquery/resource_bigquery_table_internal_test.go @@ -0,0 +1,565 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package bigquery + +import ( + "encoding/json" + "fmt" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" +) + +func TestBigQueryTableSchemaDiffSuppress(t *testing.T) { + t.Parallel() + + cases := map[string]struct { + Old, New string + ExpectDiffSuppress bool + }{ + "empty schema": { + Old: "null", + New: "[]", + ExpectDiffSuppress: true, + }, + "empty schema -> non-empty": { + Old: "null", + New: `[ + { + "name": "PageNo", + "type": "INTEGER" + } + ]`, + ExpectDiffSuppress: false, + }, + "no change": { + Old: "[{\"name\": \"someValue\", \"type\": \"INT64\", \"anotherKey\" : \"anotherValue\", \"finalKey\" : {} }]", + New: "[{\"name\": \"someValue\", \"type\": \"INT64\", \"anotherKey\" : \"anotherValue\", \"finalKey\" : {} }]", + ExpectDiffSuppress: true, + }, + "remove key": { + Old: "[{\"name\": \"someValue\", \"type\": \"INT64\", \"anotherKey\" : \"anotherValue\", \"finalKey\" : {} }]", + New: "[{\"name\": \"someValue\", \"type\": \"INT64\", \"finalKey\" : {} }]", + ExpectDiffSuppress: false, + }, + "empty description -> default description (empty)": { + Old: "[{\"name\": \"someValue\", \"type\": \"INT64\", \"anotherKey\" : \"anotherValue\", \"description\": \"\" }]", + New: "[{\"name\": \"someValue\", \"type\": \"INT64\", \"anotherKey\" : \"anotherValue\" }]", + ExpectDiffSuppress: true, + }, + "empty description -> other description": { + Old: "[{\"name\": \"someValue\", \"type\": \"INT64\", \"anotherKey\" : \"anotherValue\", \"description\": \"\" }]", + New: "[{\"name\": \"someValue\", \"type\": \"INT64\", \"anotherKey\" : \"anotherValue\", \"description\": \"somethingRandom\" }]", + ExpectDiffSuppress: false, + }, + "mode NULLABLE -> other mode": { + Old: "[{\"name\": \"someValue\", \"type\": \"INT64\", \"anotherKey\" : \"anotherValue\", \"mode\": \"NULLABLE\" }]", + New: "[{\"name\": \"someValue\", \"type\": \"INT64\", \"anotherKey\" : \"anotherValue\", \"mode\": \"somethingRandom\" }]", + ExpectDiffSuppress: false, + }, + "mode NULLABLE -> default mode (also NULLABLE)": { + Old: `[ + { + "mode": "NULLABLE", + "name": "PageNo", + "type": "INTEGER" + } + ]`, + New: `[ + { + "name": "PageNo", + "type": "INTEGER" + } + ]`, + ExpectDiffSuppress: true, + }, + "mode & type uppercase -> lowercase": { + Old: `[ + { + "mode": "NULLABLE", + "name": "PageNo", + "type": "INTEGER" + } + ]`, + New: `[ + { + "mode": "nullable", + "name": "PageNo", + "type": "integer" + } + ]`, + ExpectDiffSuppress: true, + }, + "type INTEGER -> INT64": { + Old: "[{\"name\": \"someValue\", \"anotherKey\" : \"anotherValue\", \"type\": \"INTEGER\" }]", + New: "[{\"name\": \"someValue\", \"anotherKey\" : \"anotherValue\", \"type\": \"INT64\" }]", + ExpectDiffSuppress: true, + }, + "type INTEGER -> other": { + Old: "[{\"name\": \"someValue\", \"anotherKey\" : \"anotherValue\", \"type\": \"INTEGER\" }]", + New: "[{\"name\": \"someValue\", \"anotherKey\" : \"anotherValue\", \"type\": \"somethingRandom\" }]", + ExpectDiffSuppress: false, + }, + "type FLOAT -> FLOAT64": { + Old: "[{\"name\": \"someValue\", \"anotherKey\" : \"anotherValue\", \"type\": \"FLOAT\" }]", + New: "[{\"name\": \"someValue\", \"anotherKey\" : \"anotherValue\", \"type\": \"FLOAT64\" }]", + ExpectDiffSuppress: true, + }, + "type FLOAT -> other": { + Old: "[{\"name\": \"someValue\", \"anotherKey\" : \"anotherValue\", \"type\": \"FLOAT\" }]", + New: "[{\"name\": \"someValue\", \"anotherKey\" : \"anotherValue\", \"type\": \"somethingRandom\" }]", + ExpectDiffSuppress: false, + }, + "type BOOLEAN -> BOOL": { + Old: "[{\"name\": \"someValue\", \"anotherKey\" : \"anotherValue\", \"type\": \"BOOLEAN\" }]", + New: "[{\"name\": \"someValue\", \"anotherKey\" : \"anotherValue\", \"type\": \"BOOL\" }]", + ExpectDiffSuppress: true, + }, + "type BOOLEAN -> other": { + Old: "[{\"name\": \"someValue\", \"anotherKey\" : \"anotherValue\", \"type\": \"BOOLEAN\" }]", + New: "[{\"name\": \"someValue\", \"anotherKey\" : \"anotherValue\", \"type\": \"somethingRandom\" }]", + ExpectDiffSuppress: false, + }, + // this is invalid but we need to make sure we don't cause a panic + // if users provide an invalid schema + "invalid - missing type for old": { + Old: "[{\"name\": \"someValue\", \"anotherKey\" : \"anotherValue\" }]", + New: "[{\"name\": \"someValue\", \"anotherKey\" : \"anotherValue\", \"type\": \"BOOLEAN\" }]", + ExpectDiffSuppress: false, + }, + // this is invalid but we need to make sure we don't cause a panic + // if users provide an invalid schema + "invalid - missing type for new": { + Old: "[{\"name\": \"someValue\", \"anotherKey\" : \"anotherValue\", \"type\": \"BOOLEAN\" }]", + New: "[{\"name\": \"someValue\", \"anotherKey\" : \"anotherValue\" }]", + ExpectDiffSuppress: false, + }, + "reordering fields": { + Old: `[ + { + "name": "PageNo", + "type": "INTEGER" + }, + { + "name": "IngestTime", + "type": "TIMESTAMP" + } + ]`, + New: `[ + { + "name": "IngestTime", + "type": "TIMESTAMP" + }, + { + "name": "PageNo", + "type": "INTEGER" + } + ]`, + ExpectDiffSuppress: true, + }, + "reordering fields with value change": { + Old: `[ + { + "name": "PageNo", + "type": "INTEGER", + "description": "someVal" + }, + { + "name": "IngestTime", + "type": "TIMESTAMP" + } + ]`, + New: `[ + { + "name": "IngestTime", + "type": "TIMESTAMP" + }, + { + "name": "PageNo", + "type": "INTEGER", + "description": "otherVal" + } + ]`, + ExpectDiffSuppress: false, + }, + "nested field ordering changes": { + Old: `[ + { + "name": "someValue", + "type": "INTEGER", + "fields": [ + { + "name": "value1", + "type": "INTEGER", + "mode": "NULLABLE", + "description": "someVal" + }, + { + "name": "value2", + "type": "BOOLEAN", + "mode": "NULLABLE", + "description": "someVal" + } + ] + } + ]`, + New: `[ + { + "name": "someValue", + "type": "INTEGER", + "fields": [ + { + "name": "value2", + "type": "BOOLEAN", + "mode": "NULLABLE", + "description": "someVal" + }, + { + "name": "value1", + "type": "INTEGER", + "mode": "NULLABLE", + "description": "someVal" + } + ] + } + ]`, + ExpectDiffSuppress: true, + }, + "policyTags": { + Old: `[ + { + "mode": "NULLABLE", + "name": "providerphone", + "policyTags": { + "names": [ + "projects/my-project/locations/us/taxonomies/12345678/policyTags/12345678" + ] + }, + "type":"STRING" + } + ]`, + New: `[ + { + "name": "providerphone", + "type": "STRING", + "policyTags": { + "names": ["projects/my-project/locations/us/taxonomies/12345678/policyTags/12345678"] + } + } + ]`, + ExpectDiffSuppress: true, + }, + "multiple levels of reordering with policyTags set": { + Old: `[ + { + "mode": "NULLABLE", + "name": "providerphone", + "type":"STRING", + "policyTags": { + "names": [ + "projects/my-project/locations/us/taxonomies/12345678/policyTags/12345678" + ] + }, + "fields": [ + { + "name": "value1", + "type": "INTEGER", + "mode": "NULLABLE", + "description": "someVal", + "policyTags": { + "names": [ + "projects/my-project/locations/us/taxonomies/12345678/policyTags/12345678" + ] + } + }, + { + "name": "value2", + "type": "BOOLEAN", + "mode": "NULLABLE", + "description": "someVal" + } + ] + }, + { + "name": "PageNo", + "type": "INTEGER" + }, + { + "name": "IngestTime", + "type": "TIMESTAMP", + "fields": [ + { + "name": "value3", + "type": "INTEGER", + "mode": "NULLABLE", + "description": "someVal", + "policyTags": { + "names": [ + "projects/my-project/locations/us/taxonomies/12345678/policyTags/12345678" + ] + } + }, + { + "name": "value4", + "type": "BOOLEAN", + "mode": "NULLABLE", + "description": "someVal" + } + ] + } + ]`, + New: `[ + { + "name": "IngestTime", + "type": "TIMESTAMP", + "fields": [ + { + "name": "value4", + "type": "BOOLEAN", + "mode": "NULLABLE", + "description": "someVal" + }, + { + "name": "value3", + "type": "INTEGER", + "mode": "NULLABLE", + "description": "someVal", + "policyTags": { + "names": [ + "projects/my-project/locations/us/taxonomies/12345678/policyTags/12345678" + ] + } + } + ] + }, + { + "mode": "NULLABLE", + "name": "providerphone", + "type":"STRING", + "policyTags": { + "names": [ + "projects/my-project/locations/us/taxonomies/12345678/policyTags/12345678" + ] + }, + "fields": [ + { + "name": "value1", + "type": "INTEGER", + "mode": "NULLABLE", + "description": "someVal", + "policyTags": { + "names": [ + "projects/my-project/locations/us/taxonomies/12345678/policyTags/12345678" + ] + } + }, + { + "name": "value2", + "type": "BOOLEAN", + "mode": "NULLABLE", + "description": "someVal" + } + ] + }, + { + "name": "PageNo", + "type": "INTEGER" + } + ]`, + ExpectDiffSuppress: true, + }, + } + + for tn, tc := range cases { + tn := tn + tc := tc + t.Run(tn, func(t *testing.T) { + t.Parallel() + + var a, b interface{} + if err := json.Unmarshal([]byte(tc.Old), &a); err != nil { + t.Fatalf(fmt.Sprintf("unable to unmarshal old json - %v", err)) + } + if err := json.Unmarshal([]byte(tc.New), &b); err != nil { + t.Fatalf(fmt.Sprintf("unable to unmarshal new json - %v", err)) + } + if bigQueryTableSchemaDiffSuppress("schema", tc.Old, tc.New, nil) != tc.ExpectDiffSuppress { + t.Fatalf("bad: %s, %q => %q expect DiffSuppress to return %t", tn, tc.Old, tc.New, tc.ExpectDiffSuppress) + } + }) + } +} + +type testUnitBigQueryDataTableJSONChangeableTestCase struct { + name string + jsonOld string + jsonNew string + changeable bool +} + +func (testcase *testUnitBigQueryDataTableJSONChangeableTestCase) check(t *testing.T) { + var old, new interface{} + if err := json.Unmarshal([]byte(testcase.jsonOld), &old); err != nil { + t.Fatalf("unable to unmarshal json - %v", err) + } + if err := json.Unmarshal([]byte(testcase.jsonNew), &new); err != nil { + t.Fatalf("unable to unmarshal json - %v", err) + } + changeable, err := resourceBigQueryTableSchemaIsChangeable(old, new) + if err != nil { + t.Errorf("%s failed unexpectedly: %s", testcase.name, err) + } + if changeable != testcase.changeable { + t.Errorf("expected changeable result of %v but got %v for testcase %s", testcase.changeable, changeable, testcase.name) + } + + d := &tpgresource.ResourceDiffMock{ + Before: map[string]interface{}{}, + After: map[string]interface{}{}, + } + + d.Before["schema"] = testcase.jsonOld + d.After["schema"] = testcase.jsonNew + + err = resourceBigQueryTableSchemaCustomizeDiffFunc(d) + if err != nil { + t.Errorf("error on testcase %s - %v", testcase.name, err) + } + if !testcase.changeable != d.IsForceNew { + t.Errorf("%s: expected d.IsForceNew to be %v, but was %v", testcase.name, !testcase.changeable, d.IsForceNew) + } +} + +var testUnitBigQueryDataTableIsChangableTestCases = []testUnitBigQueryDataTableJSONChangeableTestCase{ + { + name: "defaultEquality", + jsonOld: "[{\"name\": \"someValue\", \"type\" : \"INTEGER\", \"mode\" : \"NULLABLE\", \"description\" : \"someVal\" }]", + jsonNew: "[{\"name\": \"someValue\", \"type\" : \"INTEGER\", \"mode\" : \"NULLABLE\", \"description\" : \"someVal\" }]", + changeable: true, + }, + { + name: "arraySizeIncreases", + jsonOld: "[{\"name\": \"someValue\", \"type\" : \"INTEGER\", \"mode\" : \"NULLABLE\", \"description\" : \"someVal\" }]", + jsonNew: "[{\"name\": \"someValue\", \"type\" : \"INTEGER\", \"mode\" : \"NULLABLE\", \"description\" : \"someVal\" }, {\"name\": \"asomeValue\", \"type\" : \"INTEGER\", \"mode\" : \"NULLABLE\", \"description\" : \"someVal\" }]", + changeable: true, + }, + { + name: "arraySizeDecreases", + jsonOld: "[{\"name\": \"someValue\", \"type\" : \"INTEGER\", \"mode\" : \"NULLABLE\", \"description\" : \"someVal\" }, {\"name\": \"asomeValue\", \"type\" : \"INTEGER\", \"mode\" : \"NULLABLE\", \"description\" : \"someVal\" }]", + jsonNew: "[{\"name\": \"someValue\", \"type\" : \"INTEGER\", \"mode\" : \"NULLABLE\", \"description\" : \"someVal\" }]", + changeable: false, + }, + { + name: "descriptionChanges", + jsonOld: "[{\"name\": \"someValue\", \"type\" : \"INTEGER\", \"mode\" : \"NULLABLE\", \"description\" : \"someVal\" }]", + jsonNew: "[{\"name\": \"someValue\", \"type\" : \"INTEGER\", \"mode\" : \"NULLABLE\", \"description\" : \"some new value\" }]", + changeable: true, + }, + { + name: "typeInteger", + jsonOld: "[{\"name\": \"someValue\", \"type\" : \"INTEGER\", \"mode\" : \"NULLABLE\", \"description\" : \"someVal\" }]", + jsonNew: "[{\"name\": \"someValue\", \"type\" : \"INT64\", \"mode\" : \"NULLABLE\", \"description\" : \"some new value\" }]", + changeable: true, + }, + { + name: "typeFloat", + jsonOld: "[{\"name\": \"someValue\", \"type\" : \"FLOAT\", \"mode\" : \"NULLABLE\", \"description\" : \"someVal\" }]", + jsonNew: "[{\"name\": \"someValue\", \"type\" : \"FLOAT64\", \"mode\" : \"NULLABLE\", \"description\" : \"some new value\" }]", + changeable: true, + }, + { + name: "typeBool", + jsonOld: "[{\"name\": \"someValue\", \"type\" : \"BOOLEAN\", \"mode\" : \"NULLABLE\", \"description\" : \"someVal\" }]", + jsonNew: "[{\"name\": \"someValue\", \"type\" : \"BOOL\", \"mode\" : \"NULLABLE\", \"description\" : \"some new value\" }]", + changeable: true, + }, + { + name: "typeChangeIncompatible", + jsonOld: "[{\"name\": \"someValue\", \"type\" : \"BOOLEAN\", \"mode\" : \"NULLABLE\", \"description\" : \"someVal\" }]", + jsonNew: "[{\"name\": \"someValue\", \"type\" : \"DATETIME\", \"mode\" : \"NULLABLE\", \"description\" : \"some new value\" }]", + changeable: false, + }, + // this is invalid but we need to make sure we don't cause a panic + // if users provide an invalid schema + { + name: "typeChangeIgnoreNewMissingType", + jsonOld: "[{\"name\": \"someValue\", \"anotherKey\" : \"anotherValue\" }]", + jsonNew: "[{\"name\": \"someValue\", \"anotherKey\" : \"anotherValue\", \"type\": \"BOOLEAN\" }]", + changeable: true, + }, + // this is invalid but we need to make sure we don't cause a panic + // if users provide an invalid schema + { + name: "typeChangeIgnoreOldMissingType", + jsonOld: "[{\"name\": \"someValue\", \"anotherKey\" : \"anotherValue\" }]", + jsonNew: "[{\"name\": \"someValue\", \"anotherKey\" : \"anotherValue\", \"type\": \"BOOLEAN\" }]", + changeable: true, + }, + { + name: "typeModeReqToNull", + jsonOld: "[{\"name\": \"someValue\", \"type\" : \"BOOLEAN\", \"mode\" : \"REQUIRED\", \"description\" : \"someVal\" }]", + jsonNew: "[{\"name\": \"someValue\", \"type\" : \"BOOLEAN\", \"mode\" : \"NULLABLE\", \"description\" : \"some new value\" }]", + changeable: true, + }, + { + name: "typeModeIncompatible", + jsonOld: "[{\"name\": \"someValue\", \"type\" : \"BOOLEAN\", \"mode\" : \"REQUIRED\", \"description\" : \"someVal\" }]", + jsonNew: "[{\"name\": \"someValue\", \"type\" : \"BOOLEAN\", \"mode\" : \"REPEATED\", \"description\" : \"some new value\" }]", + changeable: false, + }, + { + name: "modeToDefaultNullable", + jsonOld: "[{\"name\": \"someValue\", \"type\" : \"BOOLEAN\", \"mode\" : \"REQUIRED\", \"description\" : \"someVal\" }]", + jsonNew: "[{\"name\": \"someValue\", \"type\" : \"BOOLEAN\", \"description\" : \"some new value\" }]", + changeable: true, + }, + { + name: "orderOfArrayChangesAndDescriptionChanges", + jsonOld: "[{\"name\": \"value1\", \"type\" : \"INTEGER\", \"mode\" : \"NULLABLE\", \"description\" : \"someVal\" }, {\"name\": \"value2\", \"type\" : \"BOOLEAN\", \"mode\" : \"NULLABLE\", \"description\" : \"someVal\" }]", + jsonNew: "[{\"name\": \"value2\", \"type\" : \"BOOLEAN\", \"mode\" : \"NULLABLE\", \"description\" : \"newVal\" }, {\"name\": \"value1\", \"type\" : \"INTEGER\", \"mode\" : \"NULLABLE\", \"description\" : \"someVal\" }]", + changeable: true, + }, + { + name: "orderOfArrayChangesAndNameChanges", + jsonOld: "[{\"name\": \"value1\", \"type\" : \"INTEGER\", \"mode\" : \"NULLABLE\", \"description\" : \"someVal\" }, {\"name\": \"value2\", \"type\" : \"BOOLEAN\", \"mode\" : \"NULLABLE\", \"description\" : \"someVal\" }]", + jsonNew: "[{\"name\": \"value3\", \"type\" : \"BOOLEAN\", \"mode\" : \"NULLABLE\", \"description\" : \"newVal\" }, {\"name\": \"value1\", \"type\" : \"INTEGER\", \"mode\" : \"NULLABLE\", \"description\" : \"someVal\" }]", + changeable: false, + }, + { + name: "policyTags", + jsonOld: `[ + { + "mode": "NULLABLE", + "name": "providerphone", + "policyTags": { + "names": ["projects/my-project/locations/us/taxonomies/12345678/policyTags/12345678"] + }, + "type":"STRING" + } + ]`, + jsonNew: `[ + { + "name": "providerphone", + "type": "STRING", + "policyTags": { + "names": ["projects/my-project/locations/us/taxonomies/12345678/policyTags/12345678"] + } + } + ]`, + changeable: true, + }, +} + +func TestUnitBigQueryDataTable_schemaIsChangable(t *testing.T) { + t.Parallel() + for _, testcase := range testUnitBigQueryDataTableIsChangableTestCases { + testcase.check(t) + testcaseNested := &testUnitBigQueryDataTableJSONChangeableTestCase{ + testcase.name + "Nested", + fmt.Sprintf("[{\"name\": \"someValue\", \"type\" : \"INTEGER\", \"fields\" : %s }]", testcase.jsonOld), + fmt.Sprintf("[{\"name\": \"someValue\", \"type\" : \"INT64\", \"fields\" : %s }]", testcase.jsonNew), + testcase.changeable, + } + testcaseNested.check(t) + } +} diff --git a/google/services/bigtable/iam_bigtable_instance.go b/google/services/bigtable/iam_bigtable_instance.go new file mode 100644 index 00000000000..4d62d152069 --- /dev/null +++ b/google/services/bigtable/iam_bigtable_instance.go @@ -0,0 +1,145 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package bigtable + +import ( + "fmt" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "google.golang.org/api/bigtableadmin/v2" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" +) + +var IamBigtableInstanceSchema = map[string]*schema.Schema{ + "instance": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, +} + +type BigtableInstanceIamUpdater struct { + project string + instance string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func NewBigtableInstanceUpdater(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + project, err := tpgresource.GetProject(d, config) + if err != nil { + return nil, err + } + + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + + return &BigtableInstanceIamUpdater{ + project: project, + instance: d.Get("instance").(string), + d: d, + Config: config, + }, nil +} + +func BigtableInstanceIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + fv, err := tpgresource.ParseProjectFieldValue("instances", d.Id(), "project", d, config, false) + if err != nil { + return err + } + + if err := d.Set("project", fv.Project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("instance", fv.Name); err != nil { + return fmt.Errorf("Error setting instance: %s", err) + } + + // Explicitly set the id so imported resources have the same ID format as non-imported ones. + d.SetId(fv.RelativeLink()) + return nil +} + +func (u *BigtableInstanceIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + req := &bigtableadmin.GetIamPolicyRequest{} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + p, err := u.Config.NewBigTableProjectsInstancesClient(userAgent).GetIamPolicy(u.GetResourceId(), req).Do() + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + cloudResourcePolicy, err := bigtableToResourceManagerPolicy(p) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return cloudResourcePolicy, nil +} + +func (u *BigtableInstanceIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + bigtablePolicy, err := resourceManagerToBigtablePolicy(policy) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + req := &bigtableadmin.SetIamPolicyRequest{Policy: bigtablePolicy} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = u.Config.NewBigTableProjectsInstancesClient(userAgent).SetIamPolicy(u.GetResourceId(), req).Do() + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *BigtableInstanceIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/instances/%s", u.project, u.instance) +} + +func (u *BigtableInstanceIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-bigtable-instance-%s-%s", u.project, u.instance) +} + +func (u *BigtableInstanceIamUpdater) DescribeResource() string { + return fmt.Sprintf("Bigtable Instance %s/%s", u.project, u.instance) +} + +func resourceManagerToBigtablePolicy(p *cloudresourcemanager.Policy) (*bigtableadmin.Policy, error) { + out := &bigtableadmin.Policy{} + err := tpgresource.Convert(p, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a bigtable policy to a cloudresourcemanager policy: {{err}}", err) + } + return out, nil +} + +func bigtableToResourceManagerPolicy(p *bigtableadmin.Policy) (*cloudresourcemanager.Policy, error) { + out := &cloudresourcemanager.Policy{} + err := tpgresource.Convert(p, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a cloudresourcemanager policy to a bigtable policy: {{err}}", err) + } + return out, nil +} diff --git a/google/services/bigtable/iam_bigtable_table.go b/google/services/bigtable/iam_bigtable_table.go new file mode 100644 index 00000000000..73c7e5d0ab2 --- /dev/null +++ b/google/services/bigtable/iam_bigtable_table.go @@ -0,0 +1,147 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package bigtable + +import ( + "fmt" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "google.golang.org/api/bigtableadmin/v2" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" +) + +var IamBigtableTableSchema = map[string]*schema.Schema{ + "instance": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "table": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, +} + +type BigtableTableIamUpdater struct { + project string + instance string + table string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func NewBigtableTableUpdater(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + project, err := tpgresource.GetProject(d, config) + if err != nil { + return nil, err + } + + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + + return &BigtableTableIamUpdater{ + project: project, + instance: d.Get("instance").(string), + table: d.Get("table").(string), + d: d, + Config: config, + }, nil +} + +func BigtableTableIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/instances/(?P[^/]+)/tables/(?P
[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + project, _ := tpgresource.GetProject(d, config) + + for k, v := range m { + values[k] = v + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + + if err := d.Set("instance", values["instance"]); err != nil { + return fmt.Errorf("Error setting instance: %s", err) + } + + if err := d.Set("table", values["table"]); err != nil { + return fmt.Errorf("Error setting table: %s", err) + } + + // Explicitly set the id so imported resources have the same ID format as non-imported ones. + d.SetId(fmt.Sprintf("projects/%s/instances/%s/tables/%s", project, values["instance"], values["table"])) + return nil +} + +func (u *BigtableTableIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + req := &bigtableadmin.GetIamPolicyRequest{} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + p, err := u.Config.NewBigTableProjectsInstancesTablesClient(userAgent).GetIamPolicy(u.GetResourceId(), req).Do() + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + cloudResourcePolicy, err := bigtableToResourceManagerPolicy(p) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return cloudResourcePolicy, nil +} + +func (u *BigtableTableIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + bigtablePolicy, err := resourceManagerToBigtablePolicy(policy) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + req := &bigtableadmin.SetIamPolicyRequest{Policy: bigtablePolicy} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = u.Config.NewBigTableProjectsInstancesTablesClient(userAgent).SetIamPolicy(u.GetResourceId(), req).Do() + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *BigtableTableIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/instances/%s/tables/%s", u.project, u.instance, u.table) +} + +func (u *BigtableTableIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-bigtable-instance-%s-%s-%s", u.project, u.instance, u.table) +} + +func (u *BigtableTableIamUpdater) DescribeResource() string { + return fmt.Sprintf("Bigtable Table %s/%s-%s", u.project, u.instance, u.table) +} diff --git a/google/services/bigtable/resource_bigtable_gc_policy.go b/google/services/bigtable/resource_bigtable_gc_policy.go new file mode 100644 index 00000000000..d54fd84f838 --- /dev/null +++ b/google/services/bigtable/resource_bigtable_gc_policy.go @@ -0,0 +1,597 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package bigtable + +import ( + "context" + "encoding/json" + "fmt" + "log" + "strings" + "time" + + "cloud.google.com/go/bigtable" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +const ( + GCPolicyModeIntersection = "INTERSECTION" + GCPolicyModeUnion = "UNION" +) + +func resourceBigtableGCPolicyCustomizeDiffFunc(diff tpgresource.TerraformResourceDiff) error { + count := diff.Get("max_age.#").(int) + if count < 1 { + return nil + } + + oldDays, newDays := diff.GetChange("max_age.0.days") + oldDuration, newDuration := diff.GetChange("max_age.0.duration") + log.Printf("days: %v %v", oldDays, newDays) + log.Printf("duration: %v %v", oldDuration, newDuration) + + if oldDuration == "" && newDuration != "" { + // flatten the old days and the new duration to duration... if they are + // equal then do nothing. + do, err := time.ParseDuration(newDuration.(string)) + if err != nil { + return err + } + dn := time.Hour * 24 * time.Duration(oldDays.(int)) + if do == dn { + err := diff.Clear("max_age.0.days") + if err != nil { + return err + } + err = diff.Clear("max_age.0.duration") + if err != nil { + return err + } + } + } + + return nil +} + +func resourceBigtableGCPolicyCustomizeDiff(_ context.Context, d *schema.ResourceDiff, meta interface{}) error { + return resourceBigtableGCPolicyCustomizeDiffFunc(d) +} + +func ResourceBigtableGCPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceBigtableGCPolicyUpsert, + Read: resourceBigtableGCPolicyRead, + Delete: resourceBigtableGCPolicyDestroy, + Update: resourceBigtableGCPolicyUpsert, + CustomizeDiff: resourceBigtableGCPolicyCustomizeDiff, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "instance_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareResourceNames, + Description: `The name of the Bigtable instance.`, + }, + + "table": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the table.`, + }, + + "column_family": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the column family.`, + }, + + "gc_rules": { + Type: schema.TypeString, + Optional: true, + Description: `Serialized JSON string for garbage collection policy. Conflicts with "mode", "max_age" and "max_version".`, + ValidateFunc: validation.StringIsJSON, + ConflictsWith: []string{"mode", "max_age", "max_version"}, + StateFunc: func(v interface{}) string { + json, _ := structure.NormalizeJsonString(v) + return json + }, + }, + "mode": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `NOTE: 'gc_rules' is more flexible, and should be preferred over this field for new resources. This field may be deprecated in the future. If multiple policies are set, you should choose between UNION OR INTERSECTION.`, + ValidateFunc: validation.StringInSlice([]string{GCPolicyModeIntersection, GCPolicyModeUnion}, false), + ConflictsWith: []string{"gc_rules"}, + }, + + "max_age": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `NOTE: 'gc_rules' is more flexible, and should be preferred over this field for new resources. This field may be deprecated in the future. GC policy that applies to all cells older than the given age.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "days": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + Deprecated: "Deprecated in favor of duration", + Description: `Number of days before applying GC policy.`, + ExactlyOneOf: []string{"max_age.0.days", "max_age.0.duration"}, + }, + "duration": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `Duration before applying GC policy`, + ValidateFunc: verify.ValidateDuration(), + ExactlyOneOf: []string{"max_age.0.days", "max_age.0.duration"}, + }, + }, + }, + ConflictsWith: []string{"gc_rules"}, + }, + + "max_version": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `NOTE: 'gc_rules' is more flexible, and should be preferred over this field for new resources. This field may be deprecated in the future. GC policy that applies to all versions of a cell except for the most recent.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "number": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: `Number of version before applying the GC policy.`, + }, + }, + }, + ConflictsWith: []string{"gc_rules"}, + }, + + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The ID of the project in which the resource belongs. If it is not provided, the provider project is used.`, + }, + + "deletion_policy": { + Type: schema.TypeString, + Optional: true, + Description: `The deletion policy for the GC policy. Setting ABANDON allows the resource + to be abandoned rather than deleted. This is useful for GC policy as it cannot be deleted + in a replicated instance. Possible values are: "ABANDON".`, + ValidateFunc: validation.StringInSlice([]string{"ABANDON", ""}, false), + }, + }, + UseJSONNumber: true, + } +} + +func resourceBigtableGCPolicyUpsert(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + ctx := context.Background() + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + instanceName := tpgresource.GetResourceNameFromSelfLink(d.Get("instance_name").(string)) + c, err := config.BigTableClientFactory(userAgent).NewAdminClient(project, instanceName) + if err != nil { + return fmt.Errorf("Error starting admin client. %s", err) + } + if err := d.Set("instance_name", instanceName); err != nil { + return fmt.Errorf("Error setting instance_name: %s", err) + } + + defer c.Close() + + gcPolicy, err := generateBigtableGCPolicy(d) + if err != nil { + return err + } + + tableName := d.Get("table").(string) + columnFamily := d.Get("column_family").(string) + + retryFunc := func() error { + reqErr := c.SetGCPolicy(ctx, tableName, columnFamily, gcPolicy) + return reqErr + } + // The default create timeout is 20 minutes. + timeout := d.Timeout(schema.TimeoutCreate) + pollInterval := time.Duration(30) * time.Second + // Mutations to gc policies can only happen one-at-a-time and take some amount of time. + // Use a fixed polling rate of 30s based on the RetryInfo returned by the server rather than + // the standard up-to-10s exponential backoff for those operations. + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: retryFunc, + Timeout: timeout, + PollInterval: pollInterval, + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsBigTableRetryableError}, + }) + if err != nil { + return err + } + + table, err := c.TableInfo(ctx, tableName) + if err != nil { + return fmt.Errorf("Error retrieving table. Could not find %s in %s. %s", tableName, instanceName, err) + } + + for _, i := range table.FamilyInfos { + if i.Name == columnFamily { + d.SetId(i.GCPolicy) + } + } + + return resourceBigtableGCPolicyRead(d, meta) +} + +func resourceBigtableGCPolicyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + ctx := context.Background() + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + instanceName := tpgresource.GetResourceNameFromSelfLink(d.Get("instance_name").(string)) + c, err := config.BigTableClientFactory(userAgent).NewAdminClient(project, instanceName) + if err != nil { + return fmt.Errorf("Error starting admin client. %s", err) + } + + defer c.Close() + + name := d.Get("table").(string) + columnFamily := d.Get("column_family").(string) + ti, err := c.TableInfo(ctx, name) + if err != nil { + if tpgresource.IsNotFoundGrpcError(err) { + log.Printf("[WARN] Removing the GC policy because the parent table %s is gone", name) + d.SetId("") + return nil + } + return err + } + + for _, fi := range ti.FamilyInfos { + if fi.Name != columnFamily { + continue + } + + d.SetId(fi.GCPolicy) + + // No GC Policy. + if fi.FullGCPolicy.String() == "" { + return nil + } + + // Only set `gc_rules`` when the legacy fields are not set. We are not planning to support legacy fields. + maxAge := d.Get("max_age") + maxVersion := d.Get("max_version") + if d.Get("mode") == "" && len(maxAge.([]interface{})) == 0 && len(maxVersion.([]interface{})) == 0 { + gcRuleString, err := GcPolicyToGCRuleString(fi.FullGCPolicy, true) + if err != nil { + return err + } + gcRuleJsonString, err := json.Marshal(gcRuleString) + if err != nil { + return fmt.Errorf("Error marshaling GC policy to json: %s", err) + } + d.Set("gc_rules", string(gcRuleJsonString)) + } + break + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + + return nil +} + +// Recursively convert Bigtable GC policy to JSON format in a map. +func GcPolicyToGCRuleString(gc bigtable.GCPolicy, topLevel bool) (map[string]interface{}, error) { + result := make(map[string]interface{}) + switch bigtable.GetPolicyType(gc) { + case bigtable.PolicyMaxAge: + age := gc.(bigtable.MaxAgeGCPolicy).GetDurationString() + if topLevel { + rule := make(map[string]interface{}) + rule["max_age"] = age + rules := []interface{}{} + rules = append(rules, rule) + result["rules"] = rules + } else { + result["max_age"] = age + } + break + case bigtable.PolicyMaxVersion: + // bigtable.MaxVersionsGCPolicy is an int. + // Not sure why max_version is a float64. + // TODO: Maybe change max_version to an int. + version := float64(int(gc.(bigtable.MaxVersionsGCPolicy))) + if topLevel { + rule := make(map[string]interface{}) + rule["max_version"] = version + rules := []interface{}{} + rules = append(rules, rule) + result["rules"] = rules + } else { + result["max_version"] = version + } + break + case bigtable.PolicyUnion: + result["mode"] = "union" + rules := []interface{}{} + for _, c := range gc.(bigtable.UnionGCPolicy).Children { + gcRuleString, err := GcPolicyToGCRuleString(c, false) + if err != nil { + return nil, err + } + rules = append(rules, gcRuleString) + } + result["rules"] = rules + break + case bigtable.PolicyIntersection: + result["mode"] = "intersection" + rules := []interface{}{} + for _, c := range gc.(bigtable.IntersectionGCPolicy).Children { + gcRuleString, err := GcPolicyToGCRuleString(c, false) + if err != nil { + return nil, err + } + rules = append(rules, gcRuleString) + } + result["rules"] = rules + default: + break + } + + if err := validateNestedPolicy(result, topLevel); err != nil { + return nil, err + } + + return result, nil +} + +func resourceBigtableGCPolicyDestroy(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + if deletionPolicy := d.Get("deletion_policy"); deletionPolicy == "ABANDON" { + // Allows for the GC policy to be abandoned without deletion to avoid possible + // deletion failure in a replicated instance. + log.Printf("[WARN] The GC policy is abandoned") + return nil + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + ctx := context.Background() + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + instanceName := tpgresource.GetResourceNameFromSelfLink(d.Get("instance_name").(string)) + c, err := config.BigTableClientFactory(userAgent).NewAdminClient(project, instanceName) + if err != nil { + return fmt.Errorf("Error starting admin client. %s", err) + } + + defer c.Close() + + retryFunc := func() error { + reqErr := c.SetGCPolicy(ctx, d.Get("table").(string), d.Get("column_family").(string), bigtable.NoGcPolicy()) + return reqErr + } + // The default delete timeout is 20 minutes. + timeout := d.Timeout(schema.TimeoutDelete) + pollInterval := time.Duration(30) * time.Second + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: retryFunc, + Timeout: timeout, + PollInterval: pollInterval, + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsBigTableRetryableError}, + }) + if err != nil { + return err + } + + d.SetId("") + + return nil +} + +func generateBigtableGCPolicy(d *schema.ResourceData) (bigtable.GCPolicy, error) { + var policies []bigtable.GCPolicy + mode := d.Get("mode").(string) + ma, aok := d.GetOk("max_age") + mv, vok := d.GetOk("max_version") + gcRules, gok := d.GetOk("gc_rules") + + if !aok && !vok && !gok { + return bigtable.NoGcPolicy(), nil + } + + if mode == "" && aok && vok { + return nil, fmt.Errorf("if multiple policies are set, mode can't be empty") + } + + if gok { + var topLevelPolicy map[string]interface{} + if err := json.Unmarshal([]byte(gcRules.(string)), &topLevelPolicy); err != nil { + return nil, err + } + return getGCPolicyFromJSON(topLevelPolicy /*isTopLevel=*/, true) + } + + if aok { + l, _ := ma.([]interface{}) + d, err := getMaxAgeDuration(l[0].(map[string]interface{})) + if err != nil { + return nil, err + } + + policies = append(policies, bigtable.MaxAgePolicy(d)) + } + + if vok { + l, _ := mv.([]interface{}) + n, _ := l[0].(map[string]interface{})["number"].(int) + + policies = append(policies, bigtable.MaxVersionsPolicy(n)) + } + + switch mode { + case GCPolicyModeUnion: + return bigtable.UnionPolicy(policies...), nil + case GCPolicyModeIntersection: + return bigtable.IntersectionPolicy(policies...), nil + } + + return policies[0], nil +} + +func getGCPolicyFromJSON(inputPolicy map[string]interface{}, isTopLevel bool) (bigtable.GCPolicy, error) { + policy := []bigtable.GCPolicy{} + + if err := validateNestedPolicy(inputPolicy, isTopLevel); err != nil { + return nil, err + } + + for _, p := range inputPolicy["rules"].([]interface{}) { + childPolicy := p.(map[string]interface{}) + if err := validateNestedPolicy(childPolicy /*isTopLevel=*/, false); err != nil { + return nil, err + } + + if childPolicy["max_age"] != nil { + maxAge := childPolicy["max_age"].(string) + duration, err := time.ParseDuration(maxAge) + if err != nil { + return nil, fmt.Errorf("invalid duration string: %v", maxAge) + } + policy = append(policy, bigtable.MaxAgePolicy(duration)) + } + + if childPolicy["max_version"] != nil { + version := childPolicy["max_version"].(float64) + policy = append(policy, bigtable.MaxVersionsPolicy(int(version))) + } + + if childPolicy["mode"] != nil { + n, err := getGCPolicyFromJSON(childPolicy /*isTopLevel=*/, false) + if err != nil { + return nil, err + } + policy = append(policy, n) + } + } + + switch inputPolicy["mode"] { + case strings.ToLower(GCPolicyModeUnion): + return bigtable.UnionPolicy(policy...), nil + case strings.ToLower(GCPolicyModeIntersection): + return bigtable.IntersectionPolicy(policy...), nil + default: + return policy[0], nil + } +} + +func validateNestedPolicy(p map[string]interface{}, isTopLevel bool) error { + if len(p) > 2 { + return fmt.Errorf("rules has more than 2 fields") + } + maxVersion, maxVersionOk := p["max_version"] + maxAge, maxAgeOk := p["max_age"] + rulesObj, rulesOk := p["rules"] + + _, modeOk := p["mode"] + rules, arrOk := rulesObj.([]interface{}) + _, vCastOk := maxVersion.(float64) + _, aCastOk := maxAge.(string) + + if rulesOk && !arrOk { + return fmt.Errorf("`rules` must be array") + } + + if modeOk && len(rules) < 2 { + return fmt.Errorf("`rules` need at least 2 GC rule when mode is specified") + } + + if isTopLevel && !rulesOk { + return fmt.Errorf("invalid nested policy, need `rules`") + } + + if isTopLevel && !modeOk && len(rules) != 1 { + return fmt.Errorf("when `mode` is not specified, `rules` can only have 1 child rule") + } + + if !isTopLevel && len(p) == 2 && (!modeOk || !rulesOk) { + return fmt.Errorf("need `mode` and `rules` for child nested policies") + } + + if !isTopLevel && len(p) == 1 && !maxVersionOk && !maxAgeOk { + return fmt.Errorf("need `max_version` or `max_age` for the rule") + } + + if maxVersionOk && !vCastOk { + return fmt.Errorf("`max_version` must be a number") + } + + if maxAgeOk && !aCastOk { + return fmt.Errorf("`max_age must be a string") + } + + return nil +} + +func getMaxAgeDuration(values map[string]interface{}) (time.Duration, error) { + d := values["duration"].(string) + if d != "" { + return time.ParseDuration(d) + } + + days := values["days"].(int) + + return time.Hour * 24 * time.Duration(days), nil +} diff --git a/google/services/bigtable/resource_bigtable_gc_policy_internal_test.go b/google/services/bigtable/resource_bigtable_gc_policy_internal_test.go new file mode 100644 index 00000000000..45f60412ef3 --- /dev/null +++ b/google/services/bigtable/resource_bigtable_gc_policy_internal_test.go @@ -0,0 +1,191 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package bigtable + +import ( + "encoding/json" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" +) + +func TestUnitBigtableGCPolicy_customizeDiff(t *testing.T) { + for _, tc := range testUnitBigtableGCPolicyCustomizeDiffTestcases { + tc.check(t) + } +} + +func (testcase *testUnitBigtableGCPolicyCustomizeDiffTestcase) check(t *testing.T) { + d := &tpgresource.ResourceDiffMock{ + Before: map[string]interface{}{}, + After: map[string]interface{}{}, + } + + d.Before["max_age.0.days"] = testcase.oldDays + d.Before["max_age.0.duration"] = testcase.oldDuration + + d.After["max_age.#"] = testcase.arraySize + d.After["max_age.0.days"] = testcase.newDays + d.After["max_age.0.duration"] = testcase.newDuration + + err := resourceBigtableGCPolicyCustomizeDiffFunc(d) + if err != nil { + t.Errorf("error on testcase %s - %v", testcase.testName, err) + } + + var cleared bool = d.Cleared != nil && d.Cleared["max_age.0.duration"] == true && d.Cleared["max_age.0.days"] == true + if cleared != testcase.cleared { + t.Errorf("%s: expected diff clear to be %v, but was %v", testcase.testName, testcase.cleared, cleared) + } +} + +type testUnitBigtableGCPolicyJSONRules struct { + name string + gcJSONString string + want string + errorExpected bool +} + +var testUnitBigtableGCPolicyRulesTestCases = []testUnitBigtableGCPolicyJSONRules{ + { + name: "Simple policy", + gcJSONString: `{"rules":[{"max_age":"10h"}]}`, + want: "age() > 10h", + errorExpected: false, + }, + { + name: "Simple multiple policies", + gcJSONString: `{"mode":"union", "rules":[{"max_age":"10h"},{"max_version":2}]}`, + want: "(age() > 10h || versions() > 2)", + errorExpected: false, + }, + { + name: "Nested policy", + gcJSONString: `{"mode":"union", "rules":[{"max_age":"10h"},{"mode": "intersection", "rules":[{"max_age":"2h"}, {"max_version":2}]}]}`, + want: "(age() > 10h || (age() > 2h && versions() > 2))", + errorExpected: false, + }, + { + name: "JSON with no `rules`", + gcJSONString: `{"mode": "union"}`, + errorExpected: true, + }, + { + name: "Empty JSON", + gcJSONString: "{}", + errorExpected: true, + }, + { + name: "Invalid duration string", + errorExpected: true, + gcJSONString: `{"mode":"union","rules":[{"max_age":"12o"},{"max_version":2}]}`, + }, + { + name: "Empty mode policy with more than 1 rules", + gcJSONString: `{"rules":[{"max_age":"10h"}, {"max_version":2}]}`, + errorExpected: true, + }, + { + name: "Less than 2 rules with mode specified", + gcJSONString: `{"mode":"union", "rules":[{"max_version":2}]}`, + errorExpected: true, + }, + { + name: "Invalid GC rule object", + gcJSONString: `{"mode": "union", "rules": [{"mode": "intersection"}]}`, + errorExpected: true, + }, + { + name: "Invalid GC rule field: not max_version or max_age", + gcJSONString: `{"mode": "union", "rules": [{"max_versions": 2}]}`, + errorExpected: true, + }, + { + name: "Invalid GC rule field: additional fields", + gcJSONString: `{"mode": "union", "rules": [{"max_age": "10h", "something_else": 100}]}`, + errorExpected: true, + }, + { + name: "Invalid GC rule field: more than 2 fields in a gc rule object", + gcJSONString: `{"mode": "union", "rules": [{"max_age": "10h", "max_version": 10, "something": 100}]}`, + errorExpected: true, + }, + { + name: "Invalid GC rule field: max_version or max_age is in the wrong type", + gcJSONString: `{"mode": "union", "rules": [{"max_age": "10d", "max_version": 2}]}`, + errorExpected: true, + }, + { + name: "Invalid GC rule: wrong data type for child gc_rule", + gcJSONString: `{"rules": {"max_version": "456"}}`, + errorExpected: true, + }, +} + +func TestUnitBigtableGCPolicy_getGCPolicyFromJSON(t *testing.T) { + for _, tc := range testUnitBigtableGCPolicyRulesTestCases { + t.Run(tc.name, func(t *testing.T) { + var topLevelPolicy map[string]interface{} + err := json.Unmarshal([]byte(tc.gcJSONString), &topLevelPolicy) + if err != nil { + t.Fatalf("error unmarshalling JSON string: %v", err) + } + got, err := getGCPolicyFromJSON(topLevelPolicy /*isTopLevel=*/, true) + if tc.errorExpected && err == nil { + t.Fatal("expect error, got nil") + } else if !tc.errorExpected && err != nil { + t.Fatalf("unexpected error: %v", err) + } else { + if got != nil && got.String() != tc.want { + t.Errorf("error getting policy from JSON, got: %v, want: %v", got, tc.want) + } + } + }) + } +} + +type testUnitBigtableGCPolicyCustomizeDiffTestcase struct { + testName string + arraySize int + oldDays int + newDays int + oldDuration string + newDuration string + cleared bool +} + +var testUnitBigtableGCPolicyCustomizeDiffTestcases = []testUnitBigtableGCPolicyCustomizeDiffTestcase{ + { + testName: "ArraySize0", + arraySize: 0, + cleared: false, + }, + { + testName: "DaysChange", + arraySize: 1, + oldDays: 3, + newDays: 2, + cleared: false, + }, + { + testName: "DurationChanges", + arraySize: 1, + oldDuration: "3h", + newDuration: "4h", + cleared: false, + }, + { + testName: "DaysToDurationEq", + arraySize: 1, + oldDays: 3, + newDuration: "72h", + cleared: true, + }, + { + testName: "DaysToDurationNotEq", + arraySize: 1, + oldDays: 3, + newDuration: "70h", + cleared: false, + }, +} diff --git a/google/resource_bigtable_instance.go b/google/services/bigtable/resource_bigtable_instance.go similarity index 99% rename from google/resource_bigtable_instance.go rename to google/services/bigtable/resource_bigtable_instance.go index f864ebe2550..349f62116d9 100644 --- a/google/resource_bigtable_instance.go +++ b/google/services/bigtable/resource_bigtable_instance.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package bigtable import ( "context" diff --git a/google/resource_bigtable_instance_migrate.go b/google/services/bigtable/resource_bigtable_instance_migrate.go similarity index 99% rename from google/resource_bigtable_instance_migrate.go rename to google/services/bigtable/resource_bigtable_instance_migrate.go index d5f06c7b2c0..f798f5075ee 100644 --- a/google/resource_bigtable_instance_migrate.go +++ b/google/services/bigtable/resource_bigtable_instance_migrate.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package bigtable import ( "context" diff --git a/google/services/bigtable/resource_bigtable_table.go b/google/services/bigtable/resource_bigtable_table.go new file mode 100644 index 00000000000..0364cdd276a --- /dev/null +++ b/google/services/bigtable/resource_bigtable_table.go @@ -0,0 +1,362 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package bigtable + +import ( + "context" + "fmt" + "log" + "time" + + "cloud.google.com/go/bigtable" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceBigtableTable() *schema.Resource { + return &schema.Resource{ + Create: resourceBigtableTableCreate, + Read: resourceBigtableTableRead, + Update: resourceBigtableTableUpdate, + Delete: resourceBigtableTableDestroy, + + Importer: &schema.ResourceImporter{ + State: resourceBigtableTableImport, + }, + + // Set a longer timeout for table creation as adding column families can be slow. + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(45 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + }, + + // ---------------------------------------------------------------------- + // IMPORTANT: Do not add any additional ForceNew fields to this resource. + // Destroying/recreating tables can lead to data loss for users. + // ---------------------------------------------------------------------- + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the table. Must be 1-50 characters and must only contain hyphens, underscores, periods, letters and numbers.`, + }, + + "column_family": { + Type: schema.TypeSet, + Optional: true, + Description: `A group of columns within a table which share a common configuration. This can be specified multiple times.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "family": { + Type: schema.TypeString, + Required: true, + Description: `The name of the column family.`, + }, + }, + }, + }, + + "instance_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareResourceNames, + Description: `The name of the Bigtable instance.`, + }, + + "split_keys": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `A list of predefined keys to split the table on. !> Warning: Modifying the split_keys of an existing table will cause Terraform to delete/recreate the entire google_bigtable_table resource.`, + }, + + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The ID of the project in which the resource belongs. If it is not provided, the provider project is used.`, + }, + + "deletion_protection": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice([]string{"PROTECTED", "UNPROTECTED"}, false), + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `A field to make the table protected against data loss i.e. when set to PROTECTED, deleting the table, the column families in the table, and the instance containing the table would be prohibited. If not provided, currently deletion protection will be set to UNPROTECTED as it is the API default value.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceBigtableTableCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + ctx := context.Background() + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + instanceName := tpgresource.GetResourceNameFromSelfLink(d.Get("instance_name").(string)) + c, err := config.BigTableClientFactory(userAgent).NewAdminClient(project, instanceName) + if err != nil { + return fmt.Errorf("Error starting admin client. %s", err) + } + if err := d.Set("instance_name", instanceName); err != nil { + return fmt.Errorf("Error setting instance_name: %s", err) + } + + defer c.Close() + + tableId := d.Get("name").(string) + tblConf := bigtable.TableConf{TableID: tableId} + + // Check if deletion protection is given + // If not given, currently tblConf.DeletionProtection will be set to false in the API + deletionProtection := d.Get("deletion_protection") + if deletionProtection == "PROTECTED" { + tblConf.DeletionProtection = bigtable.Protected + } else if deletionProtection == "UNPROTECTED" { + tblConf.DeletionProtection = bigtable.Unprotected + } + + // Set the split keys if given. + if v, ok := d.GetOk("split_keys"); ok { + tblConf.SplitKeys = tpgresource.ConvertStringArr(v.([]interface{})) + } + + // Set the column families if given. + columnFamilies := make(map[string]bigtable.GCPolicy) + if d.Get("column_family.#").(int) > 0 { + columns := d.Get("column_family").(*schema.Set).List() + + for _, co := range columns { + column := co.(map[string]interface{}) + + if v, ok := column["family"]; ok { + // By default, there is no GC rules. + columnFamilies[v.(string)] = bigtable.NoGcPolicy() + } + } + } + tblConf.Families = columnFamilies + + // This method may return before the table's creation is complete - we may need to wait until + // it exists in the future. + // Set a longer timeout as creating table and adding column families can be pretty slow. + ctxWithTimeout, cancel := context.WithTimeout(ctx, d.Timeout(schema.TimeoutCreate)) + defer cancel() // Always call cancel. + err = c.CreateTableFromConf(ctxWithTimeout, &tblConf) + if err != nil { + return fmt.Errorf("Error creating table. %s", err) + } + + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/instances/{{instance_name}}/tables/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return resourceBigtableTableRead(d, meta) +} + +func resourceBigtableTableRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + ctx := context.Background() + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + instanceName := tpgresource.GetResourceNameFromSelfLink(d.Get("instance_name").(string)) + c, err := config.BigTableClientFactory(userAgent).NewAdminClient(project, instanceName) + if err != nil { + return fmt.Errorf("Error starting admin client. %s", err) + } + + defer c.Close() + + name := d.Get("name").(string) + table, err := c.TableInfo(ctx, name) + if err != nil { + if tpgresource.IsNotFoundGrpcError(err) { + log.Printf("[WARN] Removing %s because it's gone", name) + d.SetId("") + return nil + } + return err + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("column_family", FlattenColumnFamily(table.Families)); err != nil { + return fmt.Errorf("Error setting column_family: %s", err) + } + + deletionProtection := table.DeletionProtection + if deletionProtection == bigtable.Protected { + if err := d.Set("deletion_protection", "PROTECTED"); err != nil { + return fmt.Errorf("Error setting deletion_protection: %s", err) + } + } else if deletionProtection == bigtable.Unprotected { + if err := d.Set("deletion_protection", "UNPROTECTED"); err != nil { + return fmt.Errorf("Error setting deletion_protection: %s", err) + } + } else { + return fmt.Errorf("Error setting deletion_protection, it should be either PROTECTED or UNPROTECTED") + } + return nil +} + +func resourceBigtableTableUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + ctx := context.Background() + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + instanceName := tpgresource.GetResourceNameFromSelfLink(d.Get("instance_name").(string)) + c, err := config.BigTableClientFactory(userAgent).NewAdminClient(project, instanceName) + if err != nil { + return fmt.Errorf("Error starting admin client. %s", err) + } + defer c.Close() + + o, n := d.GetChange("column_family") + oSet := o.(*schema.Set) + nSet := n.(*schema.Set) + name := d.Get("name").(string) + + // Add column families that are in new but not in old + for _, new := range nSet.Difference(oSet).List() { + column := new.(map[string]interface{}) + + if v, ok := column["family"]; ok { + log.Printf("[DEBUG] adding column family %q", v) + if err := c.CreateColumnFamily(ctx, name, v.(string)); err != nil { + return fmt.Errorf("Error creating column family %q: %s", v, err) + } + } + } + + // Remove column families that are in old but not in new + for _, old := range oSet.Difference(nSet).List() { + column := old.(map[string]interface{}) + + if v, ok := column["family"]; ok { + log.Printf("[DEBUG] removing column family %q", v) + if err := c.DeleteColumnFamily(ctx, name, v.(string)); err != nil { + return fmt.Errorf("Error deleting column family %q: %s", v, err) + } + } + } + + ctxWithTimeout, cancel := context.WithTimeout(ctx, d.Timeout(schema.TimeoutCreate)) + defer cancel() + if d.HasChange("deletion_protection") { + deletionProtection := d.Get("deletion_protection") + if deletionProtection == "PROTECTED" { + if err := c.UpdateTableWithDeletionProtection(ctxWithTimeout, name, bigtable.Protected); err != nil { + return fmt.Errorf("Error updating deletion protection in table %v: %s", name, err) + } + } else if deletionProtection == "UNPROTECTED" { + if err := c.UpdateTableWithDeletionProtection(ctxWithTimeout, name, bigtable.Unprotected); err != nil { + return fmt.Errorf("Error updating deletion protection in table %v: %s", name, err) + } + } + } + + return resourceBigtableTableRead(d, meta) +} + +func resourceBigtableTableDestroy(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + ctx := context.Background() + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + instanceName := tpgresource.GetResourceNameFromSelfLink(d.Get("instance_name").(string)) + c, err := config.BigTableClientFactory(userAgent).NewAdminClient(project, instanceName) + if err != nil { + return fmt.Errorf("Error starting admin client. %s", err) + } + + defer c.Close() + + name := d.Get("name").(string) + err = c.DeleteTable(ctx, name) + if err != nil { + return fmt.Errorf("Error deleting table. %s", err) + } + + d.SetId("") + + return nil +} + +func FlattenColumnFamily(families []string) []map[string]interface{} { + result := make([]map[string]interface{}, 0, len(families)) + + for _, f := range families { + data := make(map[string]interface{}) + data["family"] = f + result = append(result, data) + } + + return result +} + +// TODO(rileykarson): Fix the stored import format after rebasing 3.0.0 +func resourceBigtableTableImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/instances/(?P[^/]+)/tables/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/instances/{{instance_name}}/tables/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} diff --git a/google/data_source_compute_lb_ip_ranges.go b/google/services/compute/data_source_compute_lb_ip_ranges.go similarity index 98% rename from google/data_source_compute_lb_ip_ranges.go rename to google/services/compute/data_source_compute_lb_ip_ranges.go index a61b138a9b8..8f030defaff 100644 --- a/google/data_source_compute_lb_ip_ranges.go +++ b/google/services/compute/data_source_compute_lb_ip_ranges.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package compute import ( "fmt" diff --git a/google/data_source_compute_network_peering.go b/google/services/compute/data_source_compute_network_peering.go similarity index 98% rename from google/data_source_compute_network_peering.go rename to google/services/compute/data_source_compute_network_peering.go index f50e43aaabd..174c5e04eb4 100644 --- a/google/data_source_compute_network_peering.go +++ b/google/services/compute/data_source_compute_network_peering.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package compute import ( "fmt" diff --git a/google/services/compute/data_source_google_compute_address.go b/google/services/compute/data_source_google_compute_address.go new file mode 100644 index 00000000000..6c36d08e5f8 --- /dev/null +++ b/google/services/compute/data_source_google_compute_address.go @@ -0,0 +1,214 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute + +import ( + "fmt" + "regexp" + "strings" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +var ( + computeAddressIdTemplate = "projects/%s/regions/%s/addresses/%s" + computeAddressLinkRegex = regexp.MustCompile("projects/(.+)/regions/(.+)/addresses/(.+)$") +) + +func DataSourceGoogleComputeAddress() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleComputeAddressRead, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + + "address": { + Type: schema.TypeString, + Computed: true, + }, + + "address_type": { + Type: schema.TypeString, + Computed: true, + }, + + "network": { + Type: schema.TypeString, + Computed: true, + }, + + "network_tier": { + Type: schema.TypeString, + Computed: true, + }, + + "prefix_length": { + Type: schema.TypeInt, + Computed: true, + }, + + "purpose": { + Type: schema.TypeString, + Computed: true, + }, + + "subnetwork": { + Type: schema.TypeString, + Computed: true, + }, + + "users": { + Type: schema.TypeString, + Computed: true, + }, + + "status": { + Type: schema.TypeString, + Computed: true, + }, + + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + + "region": { + Type: schema.TypeString, + Computed: true, + Optional: true, + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + }, + }, + } +} + +func dataSourceGoogleComputeAddressRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return err + } + name := d.Get("name").(string) + + address, err := config.NewComputeClient(userAgent).Addresses.Get(project, region, name).Do() + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Address Not Found : %s", name)) + } + + if err := d.Set("address", address.Address); err != nil { + return fmt.Errorf("Error setting address: %s", err) + } + if err := d.Set("address_type", address.AddressType); err != nil { + return fmt.Errorf("Error setting address_type: %s", err) + } + if err := d.Set("network", address.Network); err != nil { + return fmt.Errorf("Error setting network: %s", err) + } + if err := d.Set("network_tier", address.NetworkTier); err != nil { + return fmt.Errorf("Error setting network_tier: %s", err) + } + if err := d.Set("prefix_length", address.PrefixLength); err != nil { + return fmt.Errorf("Error setting prefix_length: %s", err) + } + if err := d.Set("purpose", address.Purpose); err != nil { + return fmt.Errorf("Error setting purpose: %s", err) + } + if err := d.Set("subnetwork", address.Subnetwork); err != nil { + return fmt.Errorf("Error setting subnetwork: %s", err) + } + if err := d.Set("status", address.Status); err != nil { + return fmt.Errorf("Error setting status: %s", err) + } + if err := d.Set("self_link", address.SelfLink); err != nil { + return fmt.Errorf("Error setting self_link: %s", err) + } + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("region", region); err != nil { + return fmt.Errorf("Error setting region: %s", err) + } + + d.SetId(fmt.Sprintf("projects/%s/regions/%s/addresses/%s", project, region, name)) + return nil +} + +type ComputeAddressId struct { + Project string + Region string + Name string +} + +func (s ComputeAddressId) CanonicalId() string { + return fmt.Sprintf(computeAddressIdTemplate, s.Project, s.Region, s.Name) +} + +func ParseComputeAddressId(id string, config *transport_tpg.Config) (*ComputeAddressId, error) { + var parts []string + if computeAddressLinkRegex.MatchString(id) { + parts = computeAddressLinkRegex.FindStringSubmatch(id) + + return &ComputeAddressId{ + Project: parts[1], + Region: parts[2], + Name: parts[3], + }, nil + } else { + parts = strings.Split(id, "/") + } + + if len(parts) == 3 { + return &ComputeAddressId{ + Project: parts[0], + Region: parts[1], + Name: parts[2], + }, nil + } else if len(parts) == 2 { + // Project is optional. + if config.Project == "" { + return nil, fmt.Errorf("The default project for the provider must be set when using the `{region}/{name}` id format.") + } + + return &ComputeAddressId{ + Project: config.Project, + Region: parts[0], + Name: parts[1], + }, nil + } else if len(parts) == 1 { + // Project and region is optional + if config.Project == "" { + return nil, fmt.Errorf("The default project for the provider must be set when using the `{name}` id format.") + } + if config.Region == "" { + return nil, fmt.Errorf("The default region for the provider must be set when using the `{name}` id format.") + } + + return &ComputeAddressId{ + Project: config.Project, + Region: config.Region, + Name: parts[0], + }, nil + } + + return nil, fmt.Errorf("Invalid compute address id. Expecting resource link, `{project}/{region}/{name}`, `{region}/{name}` or `{name}` format.") +} diff --git a/google/data_source_google_compute_addresses.go b/google/services/compute/data_source_google_compute_addresses.go similarity index 99% rename from google/data_source_google_compute_addresses.go rename to google/services/compute/data_source_google_compute_addresses.go index 2cde66de767..a7ad6ad8d26 100644 --- a/google/data_source_google_compute_addresses.go +++ b/google/services/compute/data_source_google_compute_addresses.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package compute import ( "context" diff --git a/google/data_source_google_compute_default_service_account.go b/google/services/compute/data_source_google_compute_default_service_account.go similarity index 99% rename from google/data_source_google_compute_default_service_account.go rename to google/services/compute/data_source_google_compute_default_service_account.go index e1f9f370e0b..877a1b0675d 100644 --- a/google/data_source_google_compute_default_service_account.go +++ b/google/services/compute/data_source_google_compute_default_service_account.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package compute import ( "fmt" diff --git a/google/data_source_google_compute_global_address.go b/google/services/compute/data_source_google_compute_global_address.go similarity index 99% rename from google/data_source_google_compute_global_address.go rename to google/services/compute/data_source_google_compute_global_address.go index 2178bd0a658..9762f076e90 100644 --- a/google/data_source_google_compute_global_address.go +++ b/google/services/compute/data_source_google_compute_global_address.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package compute import ( "fmt" diff --git a/google/data_source_google_compute_image.go b/google/services/compute/data_source_google_compute_image.go similarity index 99% rename from google/data_source_google_compute_image.go rename to google/services/compute/data_source_google_compute_image.go index 0a78771eda9..ef62709f828 100644 --- a/google/data_source_google_compute_image.go +++ b/google/services/compute/data_source_google_compute_image.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package compute import ( "fmt" diff --git a/google/data_source_google_compute_instance_serial_port.go b/google/services/compute/data_source_google_compute_instance_serial_port.go similarity index 99% rename from google/data_source_google_compute_instance_serial_port.go rename to google/services/compute/data_source_google_compute_instance_serial_port.go index 33178092444..131cd29429a 100644 --- a/google/data_source_google_compute_instance_serial_port.go +++ b/google/services/compute/data_source_google_compute_instance_serial_port.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package compute import ( "fmt" diff --git a/google/data_source_google_compute_network.go b/google/services/compute/data_source_google_compute_network.go similarity index 99% rename from google/data_source_google_compute_network.go rename to google/services/compute/data_source_google_compute_network.go index 33bea435f0a..bc317f9d06f 100644 --- a/google/data_source_google_compute_network.go +++ b/google/services/compute/data_source_google_compute_network.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package compute import ( "fmt" diff --git a/google/data_source_google_compute_node_types.go b/google/services/compute/data_source_google_compute_node_types.go similarity index 99% rename from google/data_source_google_compute_node_types.go rename to google/services/compute/data_source_google_compute_node_types.go index 28bf6cef304..6d5dd9b4baf 100644 --- a/google/data_source_google_compute_node_types.go +++ b/google/services/compute/data_source_google_compute_node_types.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package compute import ( "fmt" diff --git a/google/data_source_google_compute_regions.go b/google/services/compute/data_source_google_compute_regions.go similarity index 99% rename from google/data_source_google_compute_regions.go rename to google/services/compute/data_source_google_compute_regions.go index ce7672264be..f73c4a7e47b 100644 --- a/google/data_source_google_compute_regions.go +++ b/google/services/compute/data_source_google_compute_regions.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package compute import ( "fmt" diff --git a/google/data_source_google_compute_subnetwork.go b/google/services/compute/data_source_google_compute_subnetwork.go similarity index 99% rename from google/data_source_google_compute_subnetwork.go rename to google/services/compute/data_source_google_compute_subnetwork.go index 2b6f6bfeaff..e94b5ecf1f4 100644 --- a/google/data_source_google_compute_subnetwork.go +++ b/google/services/compute/data_source_google_compute_subnetwork.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package compute import ( "fmt" diff --git a/google/data_source_google_compute_vpn_gateway.go b/google/services/compute/data_source_google_compute_vpn_gateway.go similarity index 99% rename from google/data_source_google_compute_vpn_gateway.go rename to google/services/compute/data_source_google_compute_vpn_gateway.go index 6972bdd0b53..3fc6d855490 100644 --- a/google/data_source_google_compute_vpn_gateway.go +++ b/google/services/compute/data_source_google_compute_vpn_gateway.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package compute import ( "fmt" diff --git a/google/data_source_google_compute_zones.go b/google/services/compute/data_source_google_compute_zones.go similarity index 99% rename from google/data_source_google_compute_zones.go rename to google/services/compute/data_source_google_compute_zones.go index c3427ff6ebe..97b8dfae1e0 100644 --- a/google/data_source_google_compute_zones.go +++ b/google/services/compute/data_source_google_compute_zones.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package compute import ( "fmt" diff --git a/google/services/compute/resource_compute_attached_disk.go b/google/services/compute/resource_compute_attached_disk.go new file mode 100644 index 00000000000..7e5b610c2cd --- /dev/null +++ b/google/services/compute/resource_compute_attached_disk.go @@ -0,0 +1,259 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute + +import ( + "fmt" + "log" + "strings" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "google.golang.org/api/compute/v1" +) + +func ResourceComputeAttachedDisk() *schema.Resource { + return &schema.Resource{ + Create: resourceAttachedDiskCreate, + Read: resourceAttachedDiskRead, + Delete: resourceAttachedDiskDelete, + + Importer: &schema.ResourceImporter{ + State: resourceAttachedDiskImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(300 * time.Second), + Delete: schema.DefaultTimeout(300 * time.Second), + }, + + Schema: map[string]*schema.Schema{ + "disk": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `name or self_link of the disk that will be attached.`, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, + "instance": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `name or self_link of the compute instance that the disk will be attached to. If the self_link is provided then zone and project are extracted from the self link. If only the name is used then zone and project must be defined as properties on the resource or provider.`, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, + "project": { + Type: schema.TypeString, + ForceNew: true, + Computed: true, + Optional: true, + Description: `The project that the referenced compute instance is a part of. If instance is referenced by its self_link the project defined in the link will take precedence.`, + }, + "zone": { + Type: schema.TypeString, + ForceNew: true, + Computed: true, + Optional: true, + Description: `The zone that the referenced compute instance is located within. If instance is referenced by its self_link the zone defined in the link will take precedence.`, + }, + "device_name": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Computed: true, + Description: `Specifies a unique device name of your choice that is reflected into the /dev/disk/by-id/google-* tree of a Linux operating system running within the instance. This name can be used to reference the device for mounting, resizing, and so on, from within the instance. If not specified, the server chooses a default device name to apply to this disk, in the form persistent-disks-x, where x is a number assigned by Google Compute Engine.`, + }, + "mode": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Default: "READ_WRITE", + Description: `The mode in which to attach this disk, either READ_WRITE or READ_ONLY. If not specified, the default is to attach the disk in READ_WRITE mode.`, + ValidateFunc: validation.StringInSlice([]string{"READ_ONLY", "READ_WRITE"}, false), + }, + }, + UseJSONNumber: true, + } +} + +func resourceAttachedDiskCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + zv, err := tpgresource.ParseZonalFieldValue("instances", d.Get("instance").(string), "project", "zone", d, config, false) + if err != nil { + return err + } + + disk := d.Get("disk").(string) + diskName := tpgresource.GetResourceNameFromSelfLink(disk) + diskSrc := fmt.Sprintf("projects/%s/zones/%s/disks/%s", zv.Project, zv.Zone, diskName) + + // Check if the disk is a regional disk + if strings.Contains(disk, "regions") { + rv, err := tpgresource.ParseRegionDiskFieldValue(disk, d, config) + if err != nil { + return err + } + diskSrc = rv.RelativeLink() + } + + attachedDisk := compute.AttachedDisk{ + Source: diskSrc, + Mode: d.Get("mode").(string), + DeviceName: d.Get("device_name").(string), + } + + op, err := config.NewComputeClient(userAgent).Instances.AttachDisk(zv.Project, zv.Zone, zv.Name, &attachedDisk).Do() + if err != nil { + return err + } + + d.SetId(fmt.Sprintf("projects/%s/zones/%s/instances/%s/%s", zv.Project, zv.Zone, zv.Name, diskName)) + + waitErr := ComputeOperationWaitTime(config, op, zv.Project, + "disk to attach", userAgent, d.Timeout(schema.TimeoutCreate)) + if waitErr != nil { + d.SetId("") + return waitErr + } + + return resourceAttachedDiskRead(d, meta) +} + +func resourceAttachedDiskRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + zv, err := tpgresource.ParseZonalFieldValue("instances", d.Get("instance").(string), "project", "zone", d, config, false) + if err != nil { + return err + } + if err := d.Set("project", zv.Project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("zone", zv.Zone); err != nil { + return fmt.Errorf("Error setting zone: %s", err) + } + + diskName := tpgresource.GetResourceNameFromSelfLink(d.Get("disk").(string)) + + instance, err := config.NewComputeClient(userAgent).Instances.Get(zv.Project, zv.Zone, zv.Name).Do() + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("AttachedDisk %q", d.Id())) + } + + // Iterate through the instance's attached disks as this is the only way to + // confirm the disk is actually attached + ad := FindDiskByName(instance.Disks, diskName) + if ad == nil { + log.Printf("[WARN] Referenced disk wasn't found attached to this compute instance. Removing from state.") + d.SetId("") + return nil + } + + if err := d.Set("device_name", ad.DeviceName); err != nil { + return fmt.Errorf("Error setting device_name: %s", err) + } + if err := d.Set("mode", ad.Mode); err != nil { + return fmt.Errorf("Error setting mode: %s", err) + } + + // Force the referenced resources to a self-link in state because it's more specific then name. + instancePath, err := tpgresource.GetRelativePath(instance.SelfLink) + if err != nil { + return err + } + if err := d.Set("instance", instancePath); err != nil { + return fmt.Errorf("Error setting instance: %s", err) + } + diskPath, err := tpgresource.GetRelativePath(ad.Source) + if err != nil { + return err + } + if err := d.Set("disk", diskPath); err != nil { + return fmt.Errorf("Error setting disk: %s", err) + } + + return nil +} + +func resourceAttachedDiskDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + zv, err := tpgresource.ParseZonalFieldValue("instances", d.Get("instance").(string), "project", "zone", d, config, false) + if err != nil { + return err + } + + diskName := tpgresource.GetResourceNameFromSelfLink(d.Get("disk").(string)) + + instance, err := config.NewComputeClient(userAgent).Instances.Get(zv.Project, zv.Zone, zv.Name).Do() + if err != nil { + return err + } + + // Confirm the disk is still attached before making the call to detach it. If the disk isn't listed as an attached + // disk on the compute instance then return as though the delete call succeed since this is the desired state. + ad := FindDiskByName(instance.Disks, diskName) + if ad == nil { + return nil + } + + op, err := config.NewComputeClient(userAgent).Instances.DetachDisk(zv.Project, zv.Zone, zv.Name, ad.DeviceName).Do() + if err != nil { + return err + } + + waitErr := ComputeOperationWaitTime(config, op, zv.Project, + fmt.Sprintf("Detaching disk from %s", zv.Name), userAgent, d.Timeout(schema.TimeoutDelete)) + if waitErr != nil { + return waitErr + } + + return nil +} + +func resourceAttachedDiskImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + err := tpgresource.ParseImportId( + []string{"projects/(?P[^/]+)/zones/(?P[^/]+)/instances/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)"}, d, config) + if err != nil { + return nil, err + } + + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/zones/{{zone}}/instances/{{instance}}/{{disk}}") + if err != nil { + return nil, err + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func FindDiskByName(disks []*compute.AttachedDisk, id string) *compute.AttachedDisk { + for _, disk := range disks { + if tpgresource.CompareSelfLinkOrResourceName("", disk.Source, id, nil) { + return disk + } + } + + return nil +} diff --git a/google/resource_compute_disk_async_replication.go b/google/services/compute/resource_compute_disk_async_replication.go similarity index 81% rename from google/resource_compute_disk_async_replication.go rename to google/services/compute/resource_compute_disk_async_replication.go index 9821d4cf9b2..8db869f0659 100644 --- a/google/resource_compute_disk_async_replication.go +++ b/google/services/compute/resource_compute_disk_async_replication.go @@ -1,3 +1,3 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package compute diff --git a/google/resource_compute_network_peering.go b/google/services/compute/resource_compute_network_peering.go similarity index 99% rename from google/resource_compute_network_peering.go rename to google/services/compute/resource_compute_network_peering.go index 1956239c19e..cf598da404d 100644 --- a/google/resource_compute_network_peering.go +++ b/google/services/compute/resource_compute_network_peering.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package compute import ( "fmt" diff --git a/google/resource_compute_project_default_network_tier.go b/google/services/compute/resource_compute_project_default_network_tier.go similarity index 99% rename from google/resource_compute_project_default_network_tier.go rename to google/services/compute/resource_compute_project_default_network_tier.go index e3ab5087264..f59bd43d1c6 100644 --- a/google/resource_compute_project_default_network_tier.go +++ b/google/services/compute/resource_compute_project_default_network_tier.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package compute import ( "fmt" diff --git a/google/resource_compute_security_policy.go b/google/services/compute/resource_compute_security_policy.go similarity index 99% rename from google/resource_compute_security_policy.go rename to google/services/compute/resource_compute_security_policy.go index 45dc4c09be6..3dc0a3ce377 100644 --- a/google/resource_compute_security_policy.go +++ b/google/services/compute/resource_compute_security_policy.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package compute import ( "context" diff --git a/google/resource_compute_shared_vpc_host_project.go b/google/services/compute/resource_compute_shared_vpc_host_project.go similarity index 99% rename from google/resource_compute_shared_vpc_host_project.go rename to google/services/compute/resource_compute_shared_vpc_host_project.go index 234cca8835a..41c23e1b1ec 100644 --- a/google/resource_compute_shared_vpc_host_project.go +++ b/google/services/compute/resource_compute_shared_vpc_host_project.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package compute import ( "fmt" diff --git a/google/resource_compute_shared_vpc_service_project.go b/google/services/compute/resource_compute_shared_vpc_service_project.go similarity index 99% rename from google/resource_compute_shared_vpc_service_project.go rename to google/services/compute/resource_compute_shared_vpc_service_project.go index 956c5592836..e9e9a8139ec 100644 --- a/google/resource_compute_shared_vpc_service_project.go +++ b/google/services/compute/resource_compute_shared_vpc_service_project.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package compute import ( "fmt" diff --git a/google/resource_compute_target_pool.go b/google/services/compute/resource_compute_target_pool.go similarity index 99% rename from google/resource_compute_target_pool.go rename to google/services/compute/resource_compute_target_pool.go index 371a4a72828..98f320765b3 100644 --- a/google/resource_compute_target_pool.go +++ b/google/services/compute/resource_compute_target_pool.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package compute import ( "fmt" diff --git a/google/tpgresource/utils.go b/google/tpgresource/utils.go index a78c14bf1ac..ef1cb067619 100644 --- a/google/tpgresource/utils.go +++ b/google/tpgresource/utils.go @@ -3,8 +3,12 @@ package tpgresource import ( + "crypto/md5" + "encoding/base64" "errors" "fmt" + "io/ioutil" + "log" "net/url" "reflect" "regexp" @@ -677,3 +681,20 @@ func BuildReplacementFunc(re *regexp.Regexp, d TerraformResourceData, config *tr return f, nil } + +func GetFileMd5Hash(filename string) string { + data, err := ioutil.ReadFile(filename) + if err != nil { + log.Printf("[WARN] Failed to read source file %q. Cannot compute md5 hash for it.", filename) + return "" + } + return GetContentMd5Hash(data) +} + +func GetContentMd5Hash(content []byte) string { + h := md5.New() + if _, err := h.Write(content); err != nil { + log.Printf("[WARN] Failed to compute md5 hash for content: %v", err) + } + return base64.StdEncoding.EncodeToString(h.Sum(nil)) +}