From f1a8b7c2bd2272b6aa69f165b1b5d4f9ee5e7433 Mon Sep 17 00:00:00 2001 From: The Magician Date: Tue, 20 Jun 2023 13:08:08 -0700 Subject: [PATCH] Move handwritten resources to the service packages (#8147) (#14940) * Move handwritten resources to the service packages * Fix import path Signed-off-by: Modular Magician --- .changelog/8147.txt | 3 + ...a_source_storage_object_signed_url_test.go | 87 --- google/iam_dataproc_cluster.go | 131 +--- google/iam_dataproc_job.go | 149 +--- google/logging_exclusion_billing_account.go | 101 +-- google/logging_exclusion_folder.go | 101 +-- google/logging_exclusion_organization.go | 101 +-- google/logging_exclusion_project.go | 108 +-- google/logging_utils.go | 53 +- google/provider.go | 109 +-- google/resource_dataflow_job.go | 663 +---------------- google/resource_dataflow_job_test.go | 9 +- google/resource_dataproc_cluster_test.go | 116 --- google/resource_endpoints_service_test.go | 53 -- google/resource_logging_exclusion.go | 303 +------- .../resource_logging_folder_exclusion_test.go | 7 +- google/resource_logging_project_sink_test.go | 57 -- google/resource_sql_database_instance_test.go | 33 - google/resource_sql_database_test.go | 34 - google/resource_storage_bucket_acl.go | 398 +---------- google/resource_storage_bucket_acl_test.go | 5 +- google/resource_storage_bucket_test.go | 79 --- ...esource_storage_default_object_acl_test.go | 5 +- google/resource_storage_notification.go | 191 +---- google/resource_storage_notification_test.go | 5 +- google/resource_storage_object_acl_test.go | 5 +- .../compute}/resource_usage_export_bucket.go | 7 +- .../resource_dataflow_flex_template_job.go | 2 +- .../dataflow/resource_dataflow_job.go | 668 ++++++++++++++++++ .../services/dataproc/iam_dataproc_cluster.go | 144 ++++ google/services/dataproc/iam_dataproc_job.go | 162 +++++ .../dataproc}/resource_dataproc_cluster.go | 8 +- ...resource_dataproc_cluster_internal_test.go | 123 ++++ .../dataproc}/resource_dataproc_job.go | 30 +- .../resource_dialogflow_cx_environment.go | 2 +- .../resource_dialogflow_cx_version.go | 2 +- ...ata_source_google_kms_secret_asymmetric.go | 2 +- ...ce_google_logging_project_cmek_settings.go | 2 +- .../data_source_google_logging_sink.go | 2 +- .../logging_exclusion_billing_account.go | 109 +++ .../logging/logging_exclusion_folder.go | 110 +++ .../logging/logging_exclusion_organization.go | 109 +++ .../logging/logging_exclusion_project.go | 116 +++ google/services/logging/logging_utils.go | 62 ++ .../logging}/logging_utils_test.go | 4 +- ...e_logging_billing_account_bucket_config.go | 2 +- .../resource_logging_billing_account_sink.go | 2 +- .../resource_logging_bucket_config.go | 8 +- .../logging/resource_logging_exclusion.go | 307 ++++++++ .../resource_logging_folder_bucket_config.go | 2 +- .../logging}/resource_logging_folder_sink.go | 2 +- ...urce_logging_organization_bucket_config.go | 2 +- .../resource_logging_organization_sink.go | 2 +- .../resource_logging_project_bucket_config.go | 2 +- .../logging}/resource_logging_project_sink.go | 2 +- ...urce_logging_project_sink_internal_test.go | 65 ++ .../logging}/resource_logging_sink.go | 4 +- ...urce_google_monitoring_uptime_check_ips.go | 2 +- .../resource_monitoring_dashboard.go | 2 +- ...ta_source_secret_manager_secret_version.go | 2 +- ...ce_secret_manager_secret_version_access.go | 2 +- .../resource_endpoints_service.go | 2 +- ...esource_endpoints_service_internal_test.go | 63 ++ .../resource_endpoints_service_migration.go | 2 +- .../sql}/data_source_google_sql_ca_certs.go | 2 +- .../sql}/data_source_google_sql_tiers.go | 2 +- .../sql}/data_source_sql_backup_run.go | 2 +- .../sql}/data_source_sql_database_instance.go | 2 +- .../data_source_sql_database_instances.go | 2 +- .../sql}/resource_sql_database_instance.go | 2 +- ...rce_sql_database_instance_internal_test.go | 40 ++ .../resource_sql_database_internal_test.go | 41 ++ .../sql}/resource_sql_ssl_cert.go | 2 +- .../{ => services/sql}/resource_sql_user.go | 2 +- .../sql}/resource_sql_user_migrate.go | 2 +- .../sql}/resource_sql_user_migrate_test.go | 2 +- .../data_source_google_storage_bucket.go | 2 +- ...ata_source_google_storage_bucket_object.go | 2 +- ..._google_storage_project_service_account.go | 2 +- ...ta_source_storage_bucket_object_content.go | 2 +- .../data_source_storage_object_signed_url.go | 2 +- ...storage_object_signed_url_internal_test.go | 96 +++ .../storage}/resource_storage_bucket.go | 4 +- .../storage/resource_storage_bucket_acl.go | 403 +++++++++++ .../resource_storage_bucket_internal_test.go | 86 +++ .../resource_storage_bucket_object.go | 2 +- .../resource_storage_default_object_acl.go | 2 +- .../storage/resource_storage_notification.go | 198 ++++++ .../storage}/resource_storage_object_acl.go | 2 +- ...torage_transfer_project_service_account.go | 2 +- .../resource_storage_transfer_job.go | 2 +- .../tags}/data_source_tags_tag_key.go | 2 +- .../tags}/data_source_tags_tag_value.go | 2 +- .../resource_tags_location_tag_bindings.go | 2 +- .../data_source_tpu_tensorflow_versions.go | 2 +- 95 files changed, 3103 insertions(+), 2858 deletions(-) create mode 100644 .changelog/8147.txt rename google/{ => services/compute}/resource_usage_export_bucket.go (93%) rename google/{ => services/dataflow}/resource_dataflow_flex_template_job.go (80%) create mode 100644 google/services/dataflow/resource_dataflow_job.go create mode 100644 google/services/dataproc/iam_dataproc_cluster.go create mode 100644 google/services/dataproc/iam_dataproc_job.go rename google/{ => services/dataproc}/resource_dataproc_cluster.go (99%) create mode 100644 google/services/dataproc/resource_dataproc_cluster_internal_test.go rename google/{ => services/dataproc}/resource_dataproc_job.go (97%) rename google/{ => services/dialogflowcx}/resource_dialogflow_cx_environment.go (99%) rename google/{ => services/dialogflowcx}/resource_dialogflow_cx_version.go (99%) rename google/{ => services/kms}/data_source_google_kms_secret_asymmetric.go (82%) rename google/{ => services/logging}/data_source_google_logging_project_cmek_settings.go (99%) rename google/{ => services/logging}/data_source_google_logging_sink.go (98%) create mode 100644 google/services/logging/logging_exclusion_billing_account.go create mode 100644 google/services/logging/logging_exclusion_folder.go create mode 100644 google/services/logging/logging_exclusion_organization.go create mode 100644 google/services/logging/logging_exclusion_project.go create mode 100644 google/services/logging/logging_utils.go rename google/{ => services/logging}/logging_utils_test.go (96%) rename google/{ => services/logging}/resource_logging_billing_account_bucket_config.go (98%) rename google/{ => services/logging}/resource_logging_billing_account_sink.go (99%) rename google/{ => services/logging}/resource_logging_bucket_config.go (98%) create mode 100644 google/services/logging/resource_logging_exclusion.go rename google/{ => services/logging}/resource_logging_folder_bucket_config.go (98%) rename google/{ => services/logging}/resource_logging_folder_sink.go (99%) rename google/{ => services/logging}/resource_logging_organization_bucket_config.go (98%) rename google/{ => services/logging}/resource_logging_organization_sink.go (99%) rename google/{ => services/logging}/resource_logging_project_bucket_config.go (99%) rename google/{ => services/logging}/resource_logging_project_sink.go (99%) create mode 100644 google/services/logging/resource_logging_project_sink_internal_test.go rename google/{ => services/logging}/resource_logging_sink.go (99%) rename google/{ => services/monitoring}/data_source_google_monitoring_uptime_check_ips.go (99%) rename google/{ => services/monitoring}/resource_monitoring_dashboard.go (99%) rename google/{ => services/secretmanager}/data_source_secret_manager_secret_version.go (99%) rename google/{ => services/secretmanager}/data_source_secret_manager_secret_version_access.go (99%) rename google/{ => services/servicemanagement}/resource_endpoints_service.go (99%) create mode 100644 google/services/servicemanagement/resource_endpoints_service_internal_test.go rename google/{ => services/servicemanagement}/resource_endpoints_service_migration.go (96%) rename google/{ => services/sql}/data_source_google_sql_ca_certs.go (99%) rename google/{ => services/sql}/data_source_google_sql_tiers.go (99%) rename google/{ => services/sql}/data_source_sql_backup_run.go (99%) rename google/{ => services/sql}/data_source_sql_database_instance.go (97%) rename google/{ => services/sql}/data_source_sql_database_instances.go (99%) rename google/{ => services/sql}/resource_sql_database_instance.go (99%) create mode 100644 google/services/sql/resource_sql_database_instance_internal_test.go create mode 100644 google/services/sql/resource_sql_database_internal_test.go rename google/{ => services/sql}/resource_sql_ssl_cert.go (99%) rename google/{ => services/sql}/resource_sql_user.go (99%) rename google/{ => services/sql}/resource_sql_user_migrate.go (98%) rename google/{ => services/sql}/resource_sql_user_migrate_test.go (99%) rename google/{ => services/storage}/data_source_google_storage_bucket.go (98%) rename google/{ => services/storage}/data_source_google_storage_bucket_object.go (99%) rename google/{ => services/storage}/data_source_google_storage_project_service_account.go (99%) rename google/{ => services/storage}/data_source_storage_bucket_object_content.go (99%) rename google/{ => services/storage}/data_source_storage_object_signed_url.go (99%) create mode 100644 google/services/storage/data_source_storage_object_signed_url_internal_test.go rename google/{ => services/storage}/resource_storage_bucket.go (99%) create mode 100644 google/services/storage/resource_storage_bucket_acl.go create mode 100644 google/services/storage/resource_storage_bucket_internal_test.go rename google/{ => services/storage}/resource_storage_bucket_object.go (99%) rename google/{ => services/storage}/resource_storage_default_object_acl.go (99%) create mode 100644 google/services/storage/resource_storage_notification.go rename google/{ => services/storage}/resource_storage_object_acl.go (99%) rename google/{ => services/storagetransfer}/data_source_google_storage_transfer_project_service_account.go (98%) rename google/{ => services/storagetransfer}/resource_storage_transfer_job.go (99%) rename google/{ => services/tags}/data_source_tags_tag_key.go (99%) rename google/{ => services/tags}/data_source_tags_tag_value.go (99%) rename google/{ => services/tags}/resource_tags_location_tag_bindings.go (99%) rename google/{ => services/tpu}/data_source_tpu_tensorflow_versions.go (99%) diff --git a/.changelog/8147.txt b/.changelog/8147.txt new file mode 100644 index 00000000000..8ec013c0699 --- /dev/null +++ b/.changelog/8147.txt @@ -0,0 +1,3 @@ +```release-note:none + +``` diff --git a/google/data_source_storage_object_signed_url_test.go b/google/data_source_storage_object_signed_url_test.go index 1355ffc2cb3..6decdb95dd7 100644 --- a/google/data_source_storage_object_signed_url_test.go +++ b/google/data_source_storage_object_signed_url_test.go @@ -5,103 +5,16 @@ package google import ( "testing" - "bytes" - "encoding/base64" "fmt" "io/ioutil" "net/http" - "net/url" "github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/hashicorp/terraform-provider-google/google/acctest" - "golang.org/x/oauth2/google" ) -const fakeCredentials = `{ - "type": "service_account", - "project_id": "gcp-project", - "private_key_id": "29a54056cee3d6886d9e8515a959af538ab5add9", - "private_key": "-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEAsGHDAdHZfi81LgVeeMHXYLgNDpcFYhoBykYtTDdNyA5AixID\n8JdKlCmZ6qLNnZrbs4JlBJfmzw6rjUC5bVBFg5NwYVBu3+3Msa4rgLsTGsjPH9rt\nC+QFnFhcmzg3zz8eeXBqJdhw7wmn1Xa9SsC3h6YWveBk98ecyE7yGe8J8xGphjk7\nEQ/KBmRK/EJD0ZwuYW1W4Bv5f5fca7qvi9rCprEmL8//uy0qCwoJj2jU3zc5p72M\npkSZb1XlYxxTEo/h9WCEvWS9pGhy6fJ0sA2RsBHqU4Y5O7MJEei9yu5fVSZUi05f\n/ggfUID+cFEq0Z/A98whKPEBBJ/STdEaqEEkBwIDAQABAoIBAED6EsvF0dihbXbh\ntXbI+h4AT5cTXYFRUV2B0sgkC3xqe65/2YG1Sl0gojoE9bhcxxjvLWWuy/F1Vw93\nS5gQnTsmgpzm86F8yg6euhn3UMdqOJtknDToMITzLFJmOHEZsJFOL1x3ysrUhMan\nsn4qVrIbJn+WfbumBoToSFnzbHflacOh06ZRbYa2bpSPMfGGFtwqQjRadn5+pync\nlCjaupcg209sM0qEk/BDSzHvWL1VgLMdiKBx574TSwS0o569+7vPNt92Ydi7kARo\nreOzkkF4L3xNhKZnmls2eGH6A8cp1KZXoMLFuO+IwvBMA0O29LsUlKJU4PjBrf+7\nwaslnMECgYEA5bJv0L6DKZQD3RCBLue4/mDg0GHZqAhJBS6IcaXeaWeH6PgGZggV\nMGkWnULltJIYFwtaueTfjWqciAeocKx+rqoRjuDMOGgcrEf6Y+b5AqF+IjQM66Ll\nIYPUt3FCIc69z5LNEtyP4DSWsFPJ5UhAoG4QRlDTqT5q0gKHFjeLdeECgYEAxJRk\nkrsWmdmUs5NH9pyhTdEDIc59EuJ8iOqOLzU8xUw6/s2GSClopEFJeeEoIWhLuPY3\nX3bFt4ppl/ksLh05thRs4wXRxqhnokjD3IcGu3l6Gb5QZTYwb0VfN+q2tWVEE8Qc\nPQURheUsM2aP/gpJVQvNsWVmkT0Ijc3J8bR2hucCgYEAjOF4e0ueHu5NwFTTJvWx\nHTRGLwkU+l66ipcT0MCvPW7miRk2s3XZqSuLV0Ekqi/A3sF0D/g0tQPipfwsb48c\n0/wzcLKoDyCsFW7AQG315IswVcIe+peaeYfl++1XZmzrNlkPtrXY+ObIVbXOavZ5\nzOw0xyvj5jYGRnCOci33N4ECgYA91EKx2ABq0YGw3aEj0u31MMlgZ7b1KqFq2wNv\nm7oKgEiJ/hC/P673AsXefNAHeetfOKn/77aOXQ2LTEb2FiEhwNjiquDpL+ywoVxh\nT2LxsmqSEEbvHpUrWlFxn/Rpp3k7ElKjaqWxTHyTii2+BHQ+OKEwq6kQA3deSpy6\n1jz1fwKBgQDLqbdq5FA63PWqApfNVykXukg9MASIcg/0fjADFaHTPDvJjhFutxRP\nppI5Q95P12CQ/eRBZKJnRlkhkL8tfPaWPzzOpCTjID7avRhx2oLmstmYuXx0HluE\ncqXLbAV9WDpIJ3Bpa/S8tWujWhLDmixn2JeAdurWS+naH9U9e4I6Rw==\n-----END RSA PRIVATE KEY-----\n", - "client_email": "user@gcp-project.iam.gserviceaccount.com", - "client_id": "103198861025845558729", - "auth_uri": "https://accounts.google.com/o/oauth2/auth", - "token_uri": "https://accounts.google.com/o/oauth2/token", - "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", - "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/user%40gcp-project.iam.gserviceaccount.com" -} -` - -// The following values are derived from the output of the `gsutil signurl` command. -// i.e. -// gsutil signurl fake_creds.json gs://tf-test-bucket-6159205297736845881/path/to/file -// URL HTTP Method Expiration Signed URL -// gs://tf-test-bucket-6159205297736845881/path/to/file GET 2016-08-12 14:03:30 https://storage.googleapis.com/tf-test-bucket-6159205297736845881/path/to/file?GoogleAccessId=user@gcp-project.iam.gserviceaccount.com&Expires=1470967410&Signature=JJvE2Jc%2BeoagyS1qRACKBGUkgLkKjw7cGymHhtB4IzzN3nbXDqr0acRWGy0%2BEpZ3HYNDalEYsK0lR9Q0WCgty5I0JKmPIuo9hOYa1xTNH%2B22xiWsekxGV%2FcA9FXgWpi%2BFt7fBmMk4dhDe%2BuuYc7N79hd0FYuSBNW1Wp32Bluoe4SNkNAB%2BuIDd9KqPzqs09UAbBoz2y4WxXOQnRyR8GAfb8B%2FDtv62gYjtmp%2F6%2Fyr6xj7byWKZdQt8kEftQLTQmP%2F17Efjp6p%2BXo71Q0F9IhAFiqWfp3Ij8hHDSebLcVb2ULXyHNNQpHBOhFgALrFW3I6Uc3WciLEOsBS9Ej3EGdTg%3D%3D - -const testUrlPath = "/tf-test-bucket-6159205297736845881/path/to/file" -const testUrlExpires = 1470967410 -const testUrlExpectedSignatureBase64Encoded = "JJvE2Jc%2BeoagyS1qRACKBGUkgLkKjw7cGymHhtB4IzzN3nbXDqr0acRWGy0%2BEpZ3HYNDalEYsK0lR9Q0WCgty5I0JKmPIuo9hOYa1xTNH%2B22xiWsekxGV%2FcA9FXgWpi%2BFt7fBmMk4dhDe%2BuuYc7N79hd0FYuSBNW1Wp32Bluoe4SNkNAB%2BuIDd9KqPzqs09UAbBoz2y4WxXOQnRyR8GAfb8B%2FDtv62gYjtmp%2F6%2Fyr6xj7byWKZdQt8kEftQLTQmP%2F17Efjp6p%2BXo71Q0F9IhAFiqWfp3Ij8hHDSebLcVb2ULXyHNNQpHBOhFgALrFW3I6Uc3WciLEOsBS9Ej3EGdTg%3D%3D" -const testUrlExpectedUrl = "https://storage.googleapis.com/tf-test-bucket-6159205297736845881/path/to/file?GoogleAccessId=user@gcp-project.iam.gserviceaccount.com&Expires=1470967410&Signature=JJvE2Jc%2BeoagyS1qRACKBGUkgLkKjw7cGymHhtB4IzzN3nbXDqr0acRWGy0%2BEpZ3HYNDalEYsK0lR9Q0WCgty5I0JKmPIuo9hOYa1xTNH%2B22xiWsekxGV%2FcA9FXgWpi%2BFt7fBmMk4dhDe%2BuuYc7N79hd0FYuSBNW1Wp32Bluoe4SNkNAB%2BuIDd9KqPzqs09UAbBoz2y4WxXOQnRyR8GAfb8B%2FDtv62gYjtmp%2F6%2Fyr6xj7byWKZdQt8kEftQLTQmP%2F17Efjp6p%2BXo71Q0F9IhAFiqWfp3Ij8hHDSebLcVb2ULXyHNNQpHBOhFgALrFW3I6Uc3WciLEOsBS9Ej3EGdTg%3D%3D" - -func TestUrlData_Signing(t *testing.T) { - urlData := &UrlData{ - HttpMethod: "GET", - Expires: testUrlExpires, - Path: testUrlPath, - } - // unescape and decode the expected signature - expectedSig, err := url.QueryUnescape(testUrlExpectedSignatureBase64Encoded) - if err != nil { - t.Error(err) - } - expected, err := base64.StdEncoding.DecodeString(expectedSig) - if err != nil { - t.Error(err) - } - - // load fake service account credentials - cfg, err := google.JWTConfigFromJSON([]byte(fakeCredentials), "") - if err != nil { - t.Error(err) - } - - // create url data signature - toSign := urlData.SigningString() - result, err := SignString(toSign, cfg) - if err != nil { - t.Error(err) - } - - // compare to expected value - if !bytes.Equal(result, expected) { - t.Errorf("Signatures do not match:\n%x\n%x\n", expected, result) - } - -} - -func TestUrlData_SignedUrl(t *testing.T) { - // load fake service account credentials - cfg, err := google.JWTConfigFromJSON([]byte(fakeCredentials), "") - if err != nil { - t.Error(err) - } - - urlData := &UrlData{ - HttpMethod: "GET", - Expires: testUrlExpires, - Path: testUrlPath, - JwtConfig: cfg, - } - result, err := urlData.SignedUrl() - if err != nil { - t.Errorf("Could not generated signed url: %+v", err) - } - if result != testUrlExpectedUrl { - t.Errorf("URL does not match expected value:\n%s\n%s", testUrlExpectedUrl, result) - } -} - func TestAccStorageSignedUrl_basic(t *testing.T) { t.Parallel() diff --git a/google/iam_dataproc_cluster.go b/google/iam_dataproc_cluster.go index 5d838b78f6f..13397bb8913 100644 --- a/google/iam_dataproc_cluster.go +++ b/google/iam_dataproc_cluster.go @@ -3,142 +3,19 @@ package google import ( - "fmt" - - "github.com/hashicorp/errwrap" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/services/dataproc" "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" "github.com/hashicorp/terraform-provider-google/google/tpgresource" transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" - "google.golang.org/api/cloudresourcemanager/v1" - "google.golang.org/api/dataproc/v1" ) -var IamDataprocClusterSchema = map[string]*schema.Schema{ - "cluster": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "region": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, -} - -type DataprocClusterIamUpdater struct { - project string - region string - cluster string - d tpgresource.TerraformResourceData - Config *transport_tpg.Config -} +var IamDataprocClusterSchema = dataproc.IamDataprocClusterSchema func NewDataprocClusterUpdater(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { - project, err := tpgresource.GetProject(d, config) - if err != nil { - return nil, err - } - - region, err := tpgresource.GetRegion(d, config) - if err != nil { - return nil, err - } - - if err := d.Set("project", project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("region", region); err != nil { - return nil, fmt.Errorf("Error setting region: %s", err) - } - - return &DataprocClusterIamUpdater{ - project: project, - region: region, - cluster: d.Get("cluster").(string), - d: d, - Config: config, - }, nil + return dataproc.NewDataprocClusterUpdater(d, config) } func DataprocClusterIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { - fv, err := tpgresource.ParseRegionalFieldValue("clusters", d.Id(), "project", "region", "zone", d, config, true) - if err != nil { - return err - } - - if err := d.Set("project", fv.Project); err != nil { - return fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("region", fv.Region); err != nil { - return fmt.Errorf("Error setting region: %s", err) - } - if err := d.Set("cluster", fv.Name); err != nil { - return fmt.Errorf("Error setting cluster: %s", err) - } - - // Explicitly set the id so imported resources have the same ID format as non-imported ones. - d.SetId(fv.RelativeLink()) - return nil -} - -func (u *DataprocClusterIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - req := &dataproc.GetIamPolicyRequest{} - - userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - p, err := u.Config.NewDataprocClient(userAgent).Projects.Regions.Clusters.GetIamPolicy(u.GetResourceId(), req).Do() - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - cloudResourcePolicy, err := dataprocToResourceManagerPolicy(p) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return cloudResourcePolicy, nil -} - -func (u *DataprocClusterIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - dataprocPolicy, err := resourceManagerToDataprocPolicy(policy) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - req := &dataproc.SetIamPolicyRequest{Policy: dataprocPolicy} - _, err = u.Config.NewDataprocClient(userAgent).Projects.Regions.Clusters.SetIamPolicy(u.GetResourceId(), req).Do() - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *DataprocClusterIamUpdater) GetResourceId() string { - return fmt.Sprintf("projects/%s/regions/%s/clusters/%s", u.project, u.region, u.cluster) -} - -func (u *DataprocClusterIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-dataproc-cluster-%s-%s-%s", u.project, u.region, u.cluster) -} - -func (u *DataprocClusterIamUpdater) DescribeResource() string { - return fmt.Sprintf("Dataproc Cluster %s/%s/%s", u.project, u.region, u.cluster) + return dataproc.DataprocClusterIdParseFunc(d, config) } diff --git a/google/iam_dataproc_job.go b/google/iam_dataproc_job.go index fec780a81e5..7b2ab792b8a 100644 --- a/google/iam_dataproc_job.go +++ b/google/iam_dataproc_job.go @@ -3,160 +3,19 @@ package google import ( - "fmt" - - "github.com/hashicorp/errwrap" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/services/dataproc" "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" "github.com/hashicorp/terraform-provider-google/google/tpgresource" transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" - "google.golang.org/api/cloudresourcemanager/v1" - "google.golang.org/api/dataproc/v1" ) -var IamDataprocJobSchema = map[string]*schema.Schema{ - "job_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "region": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, -} - -type DataprocJobIamUpdater struct { - project string - region string - jobId string - d tpgresource.TerraformResourceData - Config *transport_tpg.Config -} +var IamDataprocJobSchema = dataproc.IamDataprocJobSchema func NewDataprocJobUpdater(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { - project, err := tpgresource.GetProject(d, config) - if err != nil { - return nil, err - } - - region, err := tpgresource.GetRegion(d, config) - if err != nil { - return nil, err - } - - if err := d.Set("project", project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("region", region); err != nil { - return nil, fmt.Errorf("Error setting region: %s", err) - } - - return &DataprocJobIamUpdater{ - project: project, - region: region, - jobId: d.Get("job_id").(string), - d: d, - Config: config, - }, nil + return dataproc.NewDataprocJobUpdater(d, config) } func DataprocJobIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { - fv, err := tpgresource.ParseRegionalFieldValue("jobs", d.Id(), "project", "region", "zone", d, config, true) - if err != nil { - return err - } - - if err := d.Set("job_id", fv.Name); err != nil { - return fmt.Errorf("Error setting job_id: %s", err) - } - if err := d.Set("project", fv.Project); err != nil { - return fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("region", fv.Region); err != nil { - return fmt.Errorf("Error setting region: %s", err) - } - - // Explicitly set the id so imported resources have the same ID format as non-imported ones. - d.SetId(fv.RelativeLink()) - return nil -} - -func (u *DataprocJobIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - req := &dataproc.GetIamPolicyRequest{} - - userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - p, err := u.Config.NewDataprocClient(userAgent).Projects.Regions.Jobs.GetIamPolicy(u.GetResourceId(), req).Do() - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - cloudResourcePolicy, err := dataprocToResourceManagerPolicy(p) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return cloudResourcePolicy, nil -} - -func (u *DataprocJobIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - dataprocPolicy, err := resourceManagerToDataprocPolicy(policy) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - req := &dataproc.SetIamPolicyRequest{Policy: dataprocPolicy} - _, err = u.Config.NewDataprocClient(userAgent).Projects.Regions.Jobs.SetIamPolicy(u.GetResourceId(), req).Do() - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *DataprocJobIamUpdater) GetResourceId() string { - return fmt.Sprintf("projects/%s/regions/%s/jobs/%s", u.project, u.region, u.jobId) -} - -func (u *DataprocJobIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-dataproc-job-%s-%s-%s", u.project, u.region, u.jobId) -} - -func (u *DataprocJobIamUpdater) DescribeResource() string { - return fmt.Sprintf("Dataproc Job %s/%s/%s", u.project, u.region, u.jobId) -} - -func resourceManagerToDataprocPolicy(p *cloudresourcemanager.Policy) (*dataproc.Policy, error) { - out := &dataproc.Policy{} - err := tpgresource.Convert(p, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a dataproc policy to a cloudresourcemanager policy: {{err}}", err) - } - return out, nil -} - -func dataprocToResourceManagerPolicy(p *dataproc.Policy) (*cloudresourcemanager.Policy, error) { - out := &cloudresourcemanager.Policy{} - err := tpgresource.Convert(p, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a cloudresourcemanager policy to a dataproc policy: {{err}}", err) - } - return out, nil + return dataproc.DataprocJobIdParseFunc(d, config) } diff --git a/google/logging_exclusion_billing_account.go b/google/logging_exclusion_billing_account.go index c49dbd3b89d..d222ea8ab77 100644 --- a/google/logging_exclusion_billing_account.go +++ b/google/logging_exclusion_billing_account.go @@ -3,107 +3,18 @@ package google import ( - "fmt" - - "github.com/hashicorp/errwrap" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-provider-google/google/tpgresource" + + "github.com/hashicorp/terraform-provider-google/google/services/logging" transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" - "google.golang.org/api/logging/v2" ) -var BillingAccountLoggingExclusionSchema = map[string]*schema.Schema{ - "billing_account": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, -} - -type BillingAccountLoggingExclusionUpdater struct { - resourceType string - resourceId string - userAgent string - Config *transport_tpg.Config -} - -func NewBillingAccountLoggingExclusionUpdater(d *schema.ResourceData, config *transport_tpg.Config) (ResourceLoggingExclusionUpdater, error) { - billingAccount := d.Get("billing_account").(string) - - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) - if err != nil { - return nil, err - } +var BillingAccountLoggingExclusionSchema = logging.BillingAccountLoggingExclusionSchema - return &BillingAccountLoggingExclusionUpdater{ - resourceType: "billingAccounts", - resourceId: billingAccount, - userAgent: userAgent, - Config: config, - }, nil +func NewBillingAccountLoggingExclusionUpdater(d *schema.ResourceData, config *transport_tpg.Config) (logging.ResourceLoggingExclusionUpdater, error) { + return logging.NewBillingAccountLoggingExclusionUpdater(d, config) } func BillingAccountLoggingExclusionIdParseFunc(d *schema.ResourceData, _ *transport_tpg.Config) error { - loggingExclusionId, err := parseLoggingExclusionId(d.Id()) - if err != nil { - return err - } - - if "billingAccounts" != loggingExclusionId.resourceType { - return fmt.Errorf("Error importing logging exclusion, invalid resourceType %#v", loggingExclusionId.resourceType) - } - - if err := d.Set("billing_account", loggingExclusionId.resourceId); err != nil { - return fmt.Errorf("Error setting billing_account: %s", err) - } - return nil -} - -func (u *BillingAccountLoggingExclusionUpdater) CreateLoggingExclusion(parent string, exclusion *logging.LogExclusion) error { - _, err := u.Config.NewLoggingClient(u.userAgent).BillingAccounts.Exclusions.Create(parent, exclusion).Do() - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error creating logging exclusion for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *BillingAccountLoggingExclusionUpdater) ReadLoggingExclusion(id string) (*logging.LogExclusion, error) { - exclusion, err := u.Config.NewLoggingClient(u.userAgent).BillingAccounts.Exclusions.Get(id).Do() - - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving logging exclusion for %s: {{err}}", u.DescribeResource()), err) - } - - return exclusion, nil -} - -func (u *BillingAccountLoggingExclusionUpdater) UpdateLoggingExclusion(id string, exclusion *logging.LogExclusion, updateMask string) error { - _, err := u.Config.NewLoggingClient(u.userAgent).BillingAccounts.Exclusions.Patch(id, exclusion).UpdateMask(updateMask).Do() - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error updating logging exclusion for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *BillingAccountLoggingExclusionUpdater) DeleteLoggingExclusion(id string) error { - _, err := u.Config.NewLoggingClient(u.userAgent).BillingAccounts.Exclusions.Delete(id).Do() - if err != nil { - return errwrap.Wrap(fmt.Errorf("Error deleting logging exclusion for %s.", u.DescribeResource()), err) - } - - return nil -} - -func (u *BillingAccountLoggingExclusionUpdater) GetResourceType() string { - return u.resourceType -} - -func (u *BillingAccountLoggingExclusionUpdater) GetResourceId() string { - return u.resourceId -} - -func (u *BillingAccountLoggingExclusionUpdater) DescribeResource() string { - return fmt.Sprintf("%q %q", u.resourceType, u.resourceId) + return logging.BillingAccountLoggingExclusionIdParseFunc(d, nil) } diff --git a/google/logging_exclusion_folder.go b/google/logging_exclusion_folder.go index a7ebf76bff3..a38593a9e10 100644 --- a/google/logging_exclusion_folder.go +++ b/google/logging_exclusion_folder.go @@ -3,109 +3,18 @@ package google import ( - "fmt" - - "github.com/hashicorp/errwrap" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-provider-google/google/services/resourcemanager" - "github.com/hashicorp/terraform-provider-google/google/tpgresource" + "github.com/hashicorp/terraform-provider-google/google/services/logging" transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" - "google.golang.org/api/logging/v2" ) -var FolderLoggingExclusionSchema = map[string]*schema.Schema{ - "folder": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: tpgresource.OptionalPrefixSuppress("folders/"), - }, -} +var FolderLoggingExclusionSchema = logging.FolderLoggingExclusionSchema -type FolderLoggingExclusionUpdater struct { - resourceType string - resourceId string - userAgent string - Config *transport_tpg.Config -} - -func NewFolderLoggingExclusionUpdater(d *schema.ResourceData, config *transport_tpg.Config) (ResourceLoggingExclusionUpdater, error) { - folder := resourcemanager.ParseFolderId(d.Get("folder")) - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) - if err != nil { - return nil, err - } - - return &FolderLoggingExclusionUpdater{ - resourceType: "folders", - resourceId: folder, - userAgent: userAgent, - Config: config, - }, nil +func NewFolderLoggingExclusionUpdater(d *schema.ResourceData, config *transport_tpg.Config) (logging.ResourceLoggingExclusionUpdater, error) { + return logging.NewFolderLoggingExclusionUpdater(d, config) } func FolderLoggingExclusionIdParseFunc(d *schema.ResourceData, _ *transport_tpg.Config) error { - loggingExclusionId, err := parseLoggingExclusionId(d.Id()) - if err != nil { - return err - } - - if "folders" != loggingExclusionId.resourceType { - return fmt.Errorf("Error importing logging exclusion, invalid resourceType %#v", loggingExclusionId.resourceType) - } - - if err := d.Set("folder", loggingExclusionId.resourceId); err != nil { - return fmt.Errorf("Error setting folder: %s", err) - } - return nil -} - -func (u *FolderLoggingExclusionUpdater) CreateLoggingExclusion(parent string, exclusion *logging.LogExclusion) error { - _, err := u.Config.NewLoggingClient(u.userAgent).Folders.Exclusions.Create(parent, exclusion).Do() - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error creating logging exclusion for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *FolderLoggingExclusionUpdater) ReadLoggingExclusion(id string) (*logging.LogExclusion, error) { - exclusion, err := u.Config.NewLoggingClient(u.userAgent).Folders.Exclusions.Get(id).Do() - - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving logging exclusion for %s: {{err}}", u.DescribeResource()), err) - } - - return exclusion, nil -} - -func (u *FolderLoggingExclusionUpdater) UpdateLoggingExclusion(id string, exclusion *logging.LogExclusion, updateMask string) error { - _, err := u.Config.NewLoggingClient(u.userAgent).Folders.Exclusions.Patch(id, exclusion).UpdateMask(updateMask).Do() - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error updating logging exclusion for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *FolderLoggingExclusionUpdater) DeleteLoggingExclusion(id string) error { - _, err := u.Config.NewLoggingClient(u.userAgent).Folders.Exclusions.Delete(id).Do() - if err != nil { - return errwrap.Wrap(fmt.Errorf("Error deleting logging exclusion for %s.", u.DescribeResource()), err) - } - - return nil -} - -func (u *FolderLoggingExclusionUpdater) GetResourceType() string { - return u.resourceType -} - -func (u *FolderLoggingExclusionUpdater) GetResourceId() string { - return u.resourceId -} - -func (u *FolderLoggingExclusionUpdater) DescribeResource() string { - return fmt.Sprintf("%q %q", u.resourceType, u.resourceId) + return logging.FolderLoggingExclusionIdParseFunc(d, nil) } diff --git a/google/logging_exclusion_organization.go b/google/logging_exclusion_organization.go index 7308efae06e..431725c9c9b 100644 --- a/google/logging_exclusion_organization.go +++ b/google/logging_exclusion_organization.go @@ -3,107 +3,18 @@ package google import ( - "fmt" - - "github.com/hashicorp/errwrap" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-provider-google/google/tpgresource" + + "github.com/hashicorp/terraform-provider-google/google/services/logging" transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" - "google.golang.org/api/logging/v2" ) -var OrganizationLoggingExclusionSchema = map[string]*schema.Schema{ - "org_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, -} - -type OrganizationLoggingExclusionUpdater struct { - resourceType string - resourceId string - userAgent string - Config *transport_tpg.Config -} - -func NewOrganizationLoggingExclusionUpdater(d *schema.ResourceData, config *transport_tpg.Config) (ResourceLoggingExclusionUpdater, error) { - organization := d.Get("org_id").(string) - - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) - if err != nil { - return nil, err - } +var OrganizationLoggingExclusionSchema = logging.OrganizationLoggingExclusionSchema - return &OrganizationLoggingExclusionUpdater{ - resourceType: "organizations", - resourceId: organization, - userAgent: userAgent, - Config: config, - }, nil +func NewOrganizationLoggingExclusionUpdater(d *schema.ResourceData, config *transport_tpg.Config) (logging.ResourceLoggingExclusionUpdater, error) { + return logging.NewOrganizationLoggingExclusionUpdater(d, config) } func OrganizationLoggingExclusionIdParseFunc(d *schema.ResourceData, _ *transport_tpg.Config) error { - loggingExclusionId, err := parseLoggingExclusionId(d.Id()) - if err != nil { - return err - } - - if "organizations" != loggingExclusionId.resourceType { - return fmt.Errorf("Error importing logging exclusion, invalid resourceType %#v", loggingExclusionId.resourceType) - } - - if err := d.Set("org_id", loggingExclusionId.resourceId); err != nil { - return fmt.Errorf("Error setting org_id: %s", err) - } - return nil -} - -func (u *OrganizationLoggingExclusionUpdater) CreateLoggingExclusion(parent string, exclusion *logging.LogExclusion) error { - _, err := u.Config.NewLoggingClient(u.userAgent).Organizations.Exclusions.Create(parent, exclusion).Do() - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error creating logging exclusion for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *OrganizationLoggingExclusionUpdater) ReadLoggingExclusion(id string) (*logging.LogExclusion, error) { - exclusion, err := u.Config.NewLoggingClient(u.userAgent).Organizations.Exclusions.Get(id).Do() - - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving logging exclusion for %s: {{err}}", u.DescribeResource()), err) - } - - return exclusion, nil -} - -func (u *OrganizationLoggingExclusionUpdater) UpdateLoggingExclusion(id string, exclusion *logging.LogExclusion, updateMask string) error { - _, err := u.Config.NewLoggingClient(u.userAgent).Organizations.Exclusions.Patch(id, exclusion).UpdateMask(updateMask).Do() - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error updating logging exclusion for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *OrganizationLoggingExclusionUpdater) DeleteLoggingExclusion(id string) error { - _, err := u.Config.NewLoggingClient(u.userAgent).Organizations.Exclusions.Delete(id).Do() - if err != nil { - return errwrap.Wrap(fmt.Errorf("Error deleting logging exclusion for %s.", u.DescribeResource()), err) - } - - return nil -} - -func (u *OrganizationLoggingExclusionUpdater) GetResourceType() string { - return u.resourceType -} - -func (u *OrganizationLoggingExclusionUpdater) GetResourceId() string { - return u.resourceId -} - -func (u *OrganizationLoggingExclusionUpdater) DescribeResource() string { - return fmt.Sprintf("%q %q", u.resourceType, u.resourceId) + return logging.OrganizationLoggingExclusionIdParseFunc(d, nil) } diff --git a/google/logging_exclusion_project.go b/google/logging_exclusion_project.go index f313befacac..630628cade0 100644 --- a/google/logging_exclusion_project.go +++ b/google/logging_exclusion_project.go @@ -3,114 +3,18 @@ package google import ( - "fmt" - - "github.com/hashicorp/errwrap" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-provider-google/google/tpgresource" + + "github.com/hashicorp/terraform-provider-google/google/services/logging" transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" - "google.golang.org/api/logging/v2" ) -var ProjectLoggingExclusionSchema = map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, -} - -type ProjectLoggingExclusionUpdater struct { - resourceType string - resourceId string - userAgent string - Config *transport_tpg.Config -} - -func NewProjectLoggingExclusionUpdater(d *schema.ResourceData, config *transport_tpg.Config) (ResourceLoggingExclusionUpdater, error) { - pid, err := tpgresource.GetProject(d, config) - if err != nil { - return nil, err - } +var ProjectLoggingExclusionSchema = logging.ProjectLoggingExclusionSchema - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) - if err != nil { - return nil, err - } - - return &ProjectLoggingExclusionUpdater{ - resourceType: "projects", - resourceId: pid, - userAgent: userAgent, - Config: config, - }, nil +func NewProjectLoggingExclusionUpdater(d *schema.ResourceData, config *transport_tpg.Config) (logging.ResourceLoggingExclusionUpdater, error) { + return logging.NewProjectLoggingExclusionUpdater(d, config) } func ProjectLoggingExclusionIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { - loggingExclusionId, err := parseLoggingExclusionId(d.Id()) - if err != nil { - return err - } - - if "projects" != loggingExclusionId.resourceType { - return fmt.Errorf("Error importing logging exclusion, invalid resourceType %#v", loggingExclusionId.resourceType) - } - - if config.Project != loggingExclusionId.resourceId { - if err := d.Set("project", loggingExclusionId.resourceId); err != nil { - return fmt.Errorf("Error setting project: %s", err) - } - } - - return nil -} - -func (u *ProjectLoggingExclusionUpdater) CreateLoggingExclusion(parent string, exclusion *logging.LogExclusion) error { - _, err := u.Config.NewLoggingClient(u.userAgent).Projects.Exclusions.Create(parent, exclusion).Do() - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error creating logging exclusion for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *ProjectLoggingExclusionUpdater) ReadLoggingExclusion(id string) (*logging.LogExclusion, error) { - exclusion, err := u.Config.NewLoggingClient(u.userAgent).Projects.Exclusions.Get(id).Do() - - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving logging exclusion for %s: {{err}}", u.DescribeResource()), err) - } - - return exclusion, nil -} - -func (u *ProjectLoggingExclusionUpdater) UpdateLoggingExclusion(id string, exclusion *logging.LogExclusion, updateMask string) error { - _, err := u.Config.NewLoggingClient(u.userAgent).Projects.Exclusions.Patch(id, exclusion).UpdateMask(updateMask).Do() - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error updating logging exclusion for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *ProjectLoggingExclusionUpdater) DeleteLoggingExclusion(id string) error { - _, err := u.Config.NewLoggingClient(u.userAgent).Projects.Exclusions.Delete(id).Do() - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error deleting logging exclusion for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *ProjectLoggingExclusionUpdater) GetResourceType() string { - return u.resourceType -} - -func (u *ProjectLoggingExclusionUpdater) GetResourceId() string { - return u.resourceId -} - -func (u *ProjectLoggingExclusionUpdater) DescribeResource() string { - return fmt.Sprintf("%q %q", u.resourceType, u.resourceId) + return logging.ProjectLoggingExclusionIdParseFunc(d, config) } diff --git a/google/logging_utils.go b/google/logging_utils.go index da82b03feac..1726b3f2871 100644 --- a/google/logging_utils.go +++ b/google/logging_utils.go @@ -3,60 +3,15 @@ package google import ( - "fmt" - "regexp" + "github.com/hashicorp/terraform-provider-google/google/services/logging" ) // loggingSinkResourceTypes contains all the possible Stackdriver Logging resource types. Used to parse ids safely. -var loggingSinkResourceTypes = []string{ - "billingAccounts", - "folders", - "organizations", - "projects", -} - -// LoggingSinkId represents the parts that make up the canonical id used within terraform for a logging resource. -type LoggingSinkId struct { - resourceType string - resourceId string - name string -} - -// loggingSinkIdRegex matches valid logging sink canonical ids -var loggingSinkIdRegex = regexp.MustCompile("(.+)/(.+)/sinks/(.+)") +var loggingSinkResourceTypes = logging.LoggingSinkResourceTypes -// canonicalId returns the LoggingSinkId as the canonical id used within terraform. -func (l LoggingSinkId) canonicalId() string { - return fmt.Sprintf("%s/%s/sinks/%s", l.resourceType, l.resourceId, l.name) -} - -// parent returns the "parent-level" resource that the sink is in (e.g. `folders/foo` for id `folders/foo/sinks/bar`) -func (l LoggingSinkId) parent() string { - return fmt.Sprintf("%s/%s", l.resourceType, l.resourceId) -} +type LoggingSinkId = logging.LoggingSinkId // parseLoggingSinkId parses a canonical id into a LoggingSinkId, or returns an error on failure. func parseLoggingSinkId(id string) (*LoggingSinkId, error) { - parts := loggingSinkIdRegex.FindStringSubmatch(id) - if parts == nil { - return nil, fmt.Errorf("unable to parse logging sink id %#v", id) - } - // If our resourceType is not a valid logging sink resource type, complain loudly - validLoggingSinkResourceType := false - for _, v := range loggingSinkResourceTypes { - if v == parts[1] { - validLoggingSinkResourceType = true - break - } - } - - if !validLoggingSinkResourceType { - return nil, fmt.Errorf("Logging resource type %s is not valid. Valid resource types: %#v", parts[1], - loggingSinkResourceTypes) - } - return &LoggingSinkId{ - resourceType: parts[1], - resourceId: parts[2], - name: parts[3], - }, nil + return logging.ParseLoggingSinkId(id) } diff --git a/google/provider.go b/google/provider.go index 4db9645bad4..fddb50a00aa 100644 --- a/google/provider.go +++ b/google/provider.go @@ -104,6 +104,7 @@ import ( "github.com/hashicorp/terraform-provider-google/google/services/container" "github.com/hashicorp/terraform-provider-google/google/services/containeraws" "github.com/hashicorp/terraform-provider-google/google/services/containerazure" + "github.com/hashicorp/terraform-provider-google/google/services/dataflow" "github.com/hashicorp/terraform-provider-google/google/services/servicenetworking" "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" @@ -786,14 +787,14 @@ func DatasourceMapWithErrors() (map[string]*schema.Resource, error) { "google_folder": resourcemanager.DataSourceGoogleFolder(), "google_folders": resourcemanager.DataSourceGoogleFolders(), "google_folder_organization_policy": resourcemanager.DataSourceGoogleFolderOrganizationPolicy(), - "google_logging_project_cmek_settings": DataSourceGoogleLoggingProjectCmekSettings(), - "google_logging_sink": DataSourceGoogleLoggingSink(), + "google_logging_project_cmek_settings": logging.DataSourceGoogleLoggingProjectCmekSettings(), + "google_logging_sink": logging.DataSourceGoogleLoggingSink(), "google_monitoring_notification_channel": monitoring.DataSourceMonitoringNotificationChannel(), "google_monitoring_cluster_istio_service": monitoring.DataSourceMonitoringServiceClusterIstio(), "google_monitoring_istio_canonical_service": monitoring.DataSourceMonitoringIstioCanonicalService(), "google_monitoring_mesh_istio_service": monitoring.DataSourceMonitoringServiceMeshIstio(), "google_monitoring_app_engine_service": monitoring.DataSourceMonitoringServiceAppEngine(), - "google_monitoring_uptime_check_ips": DataSourceGoogleMonitoringUptimeCheckIps(), + "google_monitoring_uptime_check_ips": monitoring.DataSourceGoogleMonitoringUptimeCheckIps(), "google_netblock_ip_ranges": resourcemanager.DataSourceGoogleNetblockIpRanges(), "google_organization": resourcemanager.DataSourceGoogleOrganization(), "google_privateca_certificate_authority": privateca.DataSourcePrivatecaCertificateAuthority(), @@ -804,8 +805,8 @@ func DatasourceMapWithErrors() (map[string]*schema.Resource, error) { "google_pubsub_subscription": pubsub.DataSourceGooglePubsubSubscription(), "google_pubsub_topic": pubsub.DataSourceGooglePubsubTopic(), "google_secret_manager_secret": secretmanager.DataSourceSecretManagerSecret(), - "google_secret_manager_secret_version": DataSourceSecretManagerSecretVersion(), - "google_secret_manager_secret_version_access": DataSourceSecretManagerSecretVersionAccess(), + "google_secret_manager_secret_version": secretmanager.DataSourceSecretManagerSecretVersion(), + "google_secret_manager_secret_version_access": secretmanager.DataSourceSecretManagerSecretVersionAccess(), "google_service_account": resourcemanager.DataSourceGoogleServiceAccount(), "google_service_account_access_token": resourcemanager.DataSourceGoogleServiceAccountAccessToken(), "google_service_account_id_token": resourcemanager.DataSourceGoogleServiceAccountIdToken(), @@ -813,23 +814,23 @@ func DatasourceMapWithErrors() (map[string]*schema.Resource, error) { "google_service_account_key": resourcemanager.DataSourceGoogleServiceAccountKey(), "google_sourcerepo_repository": sourcerepo.DataSourceGoogleSourceRepoRepository(), "google_spanner_instance": spanner.DataSourceSpannerInstance(), - "google_sql_ca_certs": DataSourceGoogleSQLCaCerts(), - "google_sql_tiers": DataSourceGoogleSQLTiers(), - "google_sql_backup_run": DataSourceSqlBackupRun(), + "google_sql_ca_certs": sql.DataSourceGoogleSQLCaCerts(), + "google_sql_tiers": sql.DataSourceGoogleSQLTiers(), + "google_sql_backup_run": sql.DataSourceSqlBackupRun(), "google_sql_databases": sql.DataSourceSqlDatabases(), "google_sql_database": sql.DataSourceSqlDatabase(), - "google_sql_database_instance": DataSourceSqlDatabaseInstance(), - "google_sql_database_instances": DataSourceSqlDatabaseInstances(), + "google_sql_database_instance": sql.DataSourceSqlDatabaseInstance(), + "google_sql_database_instances": sql.DataSourceSqlDatabaseInstances(), "google_service_networking_peered_dns_domain": servicenetworking.DataSourceGoogleServiceNetworkingPeeredDNSDomain(), - "google_storage_bucket": DataSourceGoogleStorageBucket(), - "google_storage_bucket_object": DataSourceGoogleStorageBucketObject(), - "google_storage_bucket_object_content": DataSourceGoogleStorageBucketObjectContent(), - "google_storage_object_signed_url": DataSourceGoogleSignedUrl(), - "google_storage_project_service_account": DataSourceGoogleStorageProjectServiceAccount(), - "google_storage_transfer_project_service_account": DataSourceGoogleStorageTransferProjectServiceAccount(), - "google_tags_tag_key": DataSourceGoogleTagsTagKey(), - "google_tags_tag_value": DataSourceGoogleTagsTagValue(), - "google_tpu_tensorflow_versions": DataSourceTpuTensorflowVersions(), + "google_storage_bucket": storage.DataSourceGoogleStorageBucket(), + "google_storage_bucket_object": storage.DataSourceGoogleStorageBucketObject(), + "google_storage_bucket_object_content": storage.DataSourceGoogleStorageBucketObjectContent(), + "google_storage_object_signed_url": storage.DataSourceGoogleSignedUrl(), + "google_storage_project_service_account": storage.DataSourceGoogleStorageProjectServiceAccount(), + "google_storage_transfer_project_service_account": storagetransfer.DataSourceGoogleStorageTransferProjectServiceAccount(), + "google_tags_tag_key": tags.DataSourceGoogleTagsTagKey(), + "google_tags_tag_value": tags.DataSourceGoogleTagsTagValue(), + "google_tpu_tensorflow_versions": tpu.DataSourceTpuTensorflowVersions(), "google_vpc_access_connector": vpcaccess.DataSourceVPCAccessConnector(), "google_redis_instance": redis.DataSourceGoogleRedisInstance(), "google_vertex_ai_index": vertexai.DataSourceVertexAIIndex(), @@ -906,7 +907,7 @@ func DatasourceMapWithErrors() (map[string]*schema.Resource, error) { "google_bigtable_table_iam_policy": tpgiamresource.DataSourceIamPolicy(bigtable.IamBigtableTableSchema, bigtable.NewBigtableTableUpdater), "google_bigquery_dataset_iam_policy": tpgiamresource.DataSourceIamPolicy(bigquery.IamBigqueryDatasetSchema, bigquery.NewBigqueryDatasetIamUpdater), "google_billing_account_iam_policy": tpgiamresource.DataSourceIamPolicy(billing.IamBillingAccountSchema, billing.NewBillingAccountIamUpdater), - "google_dataproc_cluster_iam_policy": tpgiamresource.DataSourceIamPolicy(IamDataprocClusterSchema, NewDataprocClusterUpdater), + "google_dataproc_cluster_iam_policy": tpgiamresource.DataSourceIamPolicy(dataproc.IamDataprocClusterSchema, dataproc.NewDataprocClusterUpdater), "google_dataproc_job_iam_policy": tpgiamresource.DataSourceIamPolicy(IamDataprocJobSchema, NewDataprocJobUpdater), "google_folder_iam_policy": tpgiamresource.DataSourceIamPolicy(resourcemanager.IamFolderSchema, resourcemanager.NewFolderIamUpdater), "google_healthcare_dataset_iam_policy": tpgiamresource.DataSourceIamPolicy(healthcare.IamHealthcareDatasetSchema, healthcare.NewHealthcareDatasetIamUpdater), @@ -1442,32 +1443,32 @@ func ResourceMapWithErrors() (map[string]*schema.Resource, error) { "google_container_cluster": container.ResourceContainerCluster(), "google_container_node_pool": container.ResourceContainerNodePool(), "google_container_registry": containeranalysis.ResourceContainerRegistry(), - "google_dataflow_job": ResourceDataflowJob(), - "google_dataproc_cluster": ResourceDataprocCluster(), - "google_dataproc_job": ResourceDataprocJob(), - "google_dialogflow_cx_version": ResourceDialogflowCXVersion(), - "google_dialogflow_cx_environment": ResourceDialogflowCXEnvironment(), + "google_dataflow_job": dataflow.ResourceDataflowJob(), + "google_dataproc_cluster": dataproc.ResourceDataprocCluster(), + "google_dataproc_job": dataproc.ResourceDataprocJob(), + "google_dialogflow_cx_version": dialogflowcx.ResourceDialogflowCXVersion(), + "google_dialogflow_cx_environment": dialogflowcx.ResourceDialogflowCXEnvironment(), "google_dns_record_set": dns.ResourceDnsRecordSet(), - "google_endpoints_service": ResourceEndpointsService(), + "google_endpoints_service": servicemanagement.ResourceEndpointsService(), "google_folder": resourcemanager.ResourceGoogleFolder(), "google_folder_organization_policy": resourcemanager.ResourceGoogleFolderOrganizationPolicy(), - "google_logging_billing_account_sink": ResourceLoggingBillingAccountSink(), - "google_logging_billing_account_exclusion": ResourceLoggingExclusion(BillingAccountLoggingExclusionSchema, NewBillingAccountLoggingExclusionUpdater, BillingAccountLoggingExclusionIdParseFunc), - "google_logging_billing_account_bucket_config": ResourceLoggingBillingAccountBucketConfig(), - "google_logging_organization_sink": ResourceLoggingOrganizationSink(), - "google_logging_organization_exclusion": ResourceLoggingExclusion(OrganizationLoggingExclusionSchema, NewOrganizationLoggingExclusionUpdater, OrganizationLoggingExclusionIdParseFunc), - "google_logging_organization_bucket_config": ResourceLoggingOrganizationBucketConfig(), - "google_logging_folder_sink": ResourceLoggingFolderSink(), - "google_logging_folder_exclusion": ResourceLoggingExclusion(FolderLoggingExclusionSchema, NewFolderLoggingExclusionUpdater, FolderLoggingExclusionIdParseFunc), - "google_logging_folder_bucket_config": ResourceLoggingFolderBucketConfig(), - "google_logging_project_sink": ResourceLoggingProjectSink(), - "google_logging_project_exclusion": ResourceLoggingExclusion(ProjectLoggingExclusionSchema, NewProjectLoggingExclusionUpdater, ProjectLoggingExclusionIdParseFunc), - "google_logging_project_bucket_config": ResourceLoggingProjectBucketConfig(), - "google_monitoring_dashboard": ResourceMonitoringDashboard(), + "google_logging_billing_account_sink": logging.ResourceLoggingBillingAccountSink(), + "google_logging_billing_account_exclusion": logging.ResourceLoggingExclusion(logging.BillingAccountLoggingExclusionSchema, logging.NewBillingAccountLoggingExclusionUpdater, logging.BillingAccountLoggingExclusionIdParseFunc), + "google_logging_billing_account_bucket_config": logging.ResourceLoggingBillingAccountBucketConfig(), + "google_logging_organization_sink": logging.ResourceLoggingOrganizationSink(), + "google_logging_organization_exclusion": logging.ResourceLoggingExclusion(logging.OrganizationLoggingExclusionSchema, logging.NewOrganizationLoggingExclusionUpdater, logging.OrganizationLoggingExclusionIdParseFunc), + "google_logging_organization_bucket_config": logging.ResourceLoggingOrganizationBucketConfig(), + "google_logging_folder_sink": logging.ResourceLoggingFolderSink(), + "google_logging_folder_exclusion": logging.ResourceLoggingExclusion(logging.FolderLoggingExclusionSchema, logging.NewFolderLoggingExclusionUpdater, logging.FolderLoggingExclusionIdParseFunc), + "google_logging_folder_bucket_config": logging.ResourceLoggingFolderBucketConfig(), + "google_logging_project_sink": logging.ResourceLoggingProjectSink(), + "google_logging_project_exclusion": logging.ResourceLoggingExclusion(logging.ProjectLoggingExclusionSchema, logging.NewProjectLoggingExclusionUpdater, logging.ProjectLoggingExclusionIdParseFunc), + "google_logging_project_bucket_config": logging.ResourceLoggingProjectBucketConfig(), + "google_monitoring_dashboard": monitoring.ResourceMonitoringDashboard(), "google_service_networking_connection": servicenetworking.ResourceServiceNetworkingConnection(), - "google_sql_database_instance": ResourceSqlDatabaseInstance(), - "google_sql_ssl_cert": ResourceSqlSslCert(), - "google_sql_user": ResourceSqlUser(), + "google_sql_database_instance": sql.ResourceSqlDatabaseInstance(), + "google_sql_ssl_cert": sql.ResourceSqlSslCert(), + "google_sql_user": sql.ResourceSqlUser(), "google_organization_iam_custom_role": resourcemanager.ResourceGoogleOrganizationIamCustomRole(), "google_organization_policy": resourcemanager.ResourceGoogleOrganizationPolicy(), "google_project": resourcemanager.ResourceGoogleProject(), @@ -1475,18 +1476,18 @@ func ResourceMapWithErrors() (map[string]*schema.Resource, error) { "google_project_service": resourcemanager.ResourceGoogleProjectService(), "google_project_iam_custom_role": resourcemanager.ResourceGoogleProjectIamCustomRole(), "google_project_organization_policy": resourcemanager.ResourceGoogleProjectOrganizationPolicy(), - "google_project_usage_export_bucket": ResourceProjectUsageBucket(), + "google_project_usage_export_bucket": compute.ResourceProjectUsageBucket(), "google_service_account": resourcemanager.ResourceGoogleServiceAccount(), "google_service_account_key": resourcemanager.ResourceGoogleServiceAccountKey(), "google_service_networking_peered_dns_domain": servicenetworking.ResourceGoogleServiceNetworkingPeeredDNSDomain(), - "google_storage_bucket": ResourceStorageBucket(), - "google_storage_bucket_acl": ResourceStorageBucketAcl(), - "google_storage_bucket_object": ResourceStorageBucketObject(), - "google_storage_object_acl": ResourceStorageObjectAcl(), - "google_storage_default_object_acl": ResourceStorageDefaultObjectAcl(), - "google_storage_notification": ResourceStorageNotification(), - "google_storage_transfer_job": ResourceStorageTransferJob(), - "google_tags_location_tag_binding": ResourceTagsLocationTagBinding(), + "google_storage_bucket": storage.ResourceStorageBucket(), + "google_storage_bucket_acl": storage.ResourceStorageBucketAcl(), + "google_storage_bucket_object": storage.ResourceStorageBucketObject(), + "google_storage_object_acl": storage.ResourceStorageObjectAcl(), + "google_storage_default_object_acl": storage.ResourceStorageDefaultObjectAcl(), + "google_storage_notification": storage.ResourceStorageNotification(), + "google_storage_transfer_job": storagetransfer.ResourceStorageTransferJob(), + "google_tags_location_tag_binding": tags.ResourceTagsLocationTagBinding(), // ####### END handwritten resources ########### }, map[string]*schema.Resource{ @@ -1503,9 +1504,9 @@ func ResourceMapWithErrors() (map[string]*schema.Resource, error) { "google_billing_account_iam_binding": tpgiamresource.ResourceIamBinding(billing.IamBillingAccountSchema, billing.NewBillingAccountIamUpdater, billing.BillingAccountIdParseFunc), "google_billing_account_iam_member": tpgiamresource.ResourceIamMember(billing.IamBillingAccountSchema, billing.NewBillingAccountIamUpdater, billing.BillingAccountIdParseFunc), "google_billing_account_iam_policy": tpgiamresource.ResourceIamPolicy(billing.IamBillingAccountSchema, billing.NewBillingAccountIamUpdater, billing.BillingAccountIdParseFunc), - "google_dataproc_cluster_iam_binding": tpgiamresource.ResourceIamBinding(IamDataprocClusterSchema, NewDataprocClusterUpdater, DataprocClusterIdParseFunc), - "google_dataproc_cluster_iam_member": tpgiamresource.ResourceIamMember(IamDataprocClusterSchema, NewDataprocClusterUpdater, DataprocClusterIdParseFunc), - "google_dataproc_cluster_iam_policy": tpgiamresource.ResourceIamPolicy(IamDataprocClusterSchema, NewDataprocClusterUpdater, DataprocClusterIdParseFunc), + "google_dataproc_cluster_iam_binding": tpgiamresource.ResourceIamBinding(dataproc.IamDataprocClusterSchema, dataproc.NewDataprocClusterUpdater, dataproc.DataprocClusterIdParseFunc), + "google_dataproc_cluster_iam_member": tpgiamresource.ResourceIamMember(dataproc.IamDataprocClusterSchema, dataproc.NewDataprocClusterUpdater, dataproc.DataprocClusterIdParseFunc), + "google_dataproc_cluster_iam_policy": tpgiamresource.ResourceIamPolicy(dataproc.IamDataprocClusterSchema, dataproc.NewDataprocClusterUpdater, dataproc.DataprocClusterIdParseFunc), "google_dataproc_job_iam_binding": tpgiamresource.ResourceIamBinding(IamDataprocJobSchema, NewDataprocJobUpdater, DataprocJobIdParseFunc), "google_dataproc_job_iam_member": tpgiamresource.ResourceIamMember(IamDataprocJobSchema, NewDataprocJobUpdater, DataprocJobIdParseFunc), "google_dataproc_job_iam_policy": tpgiamresource.ResourceIamPolicy(IamDataprocJobSchema, NewDataprocJobUpdater, DataprocJobIdParseFunc), diff --git a/google/resource_dataflow_job.go b/google/resource_dataflow_job.go index 7c4b5afc722..2d39e15aac5 100644 --- a/google/resource_dataflow_job.go +++ b/google/resource_dataflow_job.go @@ -3,666 +3,9 @@ package google import ( - "context" - "fmt" - "log" - "strings" - "time" - - "github.com/hashicorp/terraform-provider-google/google/tpgresource" - transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - dataflow "google.golang.org/api/dataflow/v1b3" - "google.golang.org/api/googleapi" + "github.com/hashicorp/terraform-provider-google/google/services/dataflow" ) -const resourceDataflowJobGoogleProvidedLabelPrefix = "labels.goog-dataflow-provided" - -var dataflowTerminatingStatesMap = map[string]struct{}{ - "JOB_STATE_CANCELLING": {}, - "JOB_STATE_DRAINING": {}, -} - -var dataflowTerminalStatesMap = map[string]struct{}{ - "JOB_STATE_DONE": {}, - "JOB_STATE_FAILED": {}, - "JOB_STATE_CANCELLED": {}, - "JOB_STATE_UPDATED": {}, - "JOB_STATE_DRAINED": {}, -} - -func resourceDataflowJobLabelDiffSuppress(k, old, new string, d *schema.ResourceData) bool { - // Example Diff: "labels.goog-dataflow-provided-template-version": "word_count" => "" - if strings.HasPrefix(k, resourceDataflowJobGoogleProvidedLabelPrefix) && new == "" { - // Suppress diff if field is a Google Dataflow-provided label key and has no explicitly set value in Config. - return true - } - - // Let diff be determined by labels (above) - if strings.HasPrefix(k, "labels.%") { - return true - } - - // For other keys, don't suppress diff. - return false -} - -func ResourceDataflowJob() *schema.Resource { - return &schema.Resource{ - Create: resourceDataflowJobCreate, - Read: resourceDataflowJobRead, - Update: resourceDataflowJobUpdateByReplacement, - Delete: resourceDataflowJobDelete, - Timeouts: &schema.ResourceTimeout{ - Update: schema.DefaultTimeout(10 * time.Minute), - }, - CustomizeDiff: customdiff.All( - resourceDataflowJobTypeCustomizeDiff, - ), - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - // ForceNew applies to both stream and batch jobs - ForceNew: true, - Description: `A unique name for the resource, required by Dataflow.`, - }, - - "template_gcs_path": { - Type: schema.TypeString, - Required: true, - Description: `The Google Cloud Storage path to the Dataflow job template.`, - }, - - "temp_gcs_location": { - Type: schema.TypeString, - Required: true, - Description: `A writeable location on Google Cloud Storage for the Dataflow job to dump its temporary data.`, - }, - - "zone": { - Type: schema.TypeString, - Optional: true, - // ForceNew applies to both stream and batch jobs - ForceNew: true, - Description: `The zone in which the created job should run. If it is not provided, the provider zone is used.`, - }, - - "region": { - Type: schema.TypeString, - Optional: true, - // ForceNew applies to both stream and batch jobs - ForceNew: true, - Description: `The region in which the created job should run.`, - }, - - "max_workers": { - Type: schema.TypeInt, - Optional: true, - // ForceNew applies to both stream and batch jobs - ForceNew: true, - Description: `The number of workers permitted to work on the job. More workers may improve processing speed at additional cost.`, - }, - - "parameters": { - Type: schema.TypeMap, - Optional: true, - Description: `Key/Value pairs to be passed to the Dataflow job (as used in the template).`, - }, - - "labels": { - Type: schema.TypeMap, - Optional: true, - Computed: true, - DiffSuppressFunc: resourceDataflowJobLabelDiffSuppress, - Description: `User labels to be specified for the job. Keys and values should follow the restrictions specified in the labeling restrictions page. NOTE: Google-provided Dataflow templates often provide default labels that begin with goog-dataflow-provided. Unless explicitly set in config, these labels will be ignored to prevent diffs on re-apply.`, - }, - - "transform_name_mapping": { - Type: schema.TypeMap, - Optional: true, - Description: `Only applicable when updating a pipeline. Map of transform name prefixes of the job to be replaced with the corresponding name prefixes of the new job.`, - }, - - "on_delete": { - Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"cancel", "drain"}, false), - Optional: true, - Default: "drain", - Description: `One of "drain" or "cancel". Specifies behavior of deletion during terraform destroy.`, - }, - - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - // ForceNew applies to both stream and batch jobs - ForceNew: true, - Description: `The project in which the resource belongs.`, - }, - - "state": { - Type: schema.TypeString, - Computed: true, - Description: `The current state of the resource, selected from the JobState enum.`, - }, - "type": { - Type: schema.TypeString, - Computed: true, - Description: `The type of this job, selected from the JobType enum.`, - }, - "service_account_email": { - Type: schema.TypeString, - Optional: true, - Description: `The Service Account email used to create the job.`, - }, - - "network": { - Type: schema.TypeString, - Optional: true, - DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, - Description: `The network to which VMs will be assigned. If it is not provided, "default" will be used.`, - }, - - "subnetwork": { - Type: schema.TypeString, - Optional: true, - DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, - Description: `The subnetwork to which VMs will be assigned. Should be of the form "regions/REGION/subnetworks/SUBNETWORK".`, - }, - - "machine_type": { - Type: schema.TypeString, - Optional: true, - Description: `The machine type to use for the job.`, - }, - - "kms_key_name": { - Type: schema.TypeString, - Optional: true, - Description: `The name for the Cloud KMS key for the job. Key format is: projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY`, - }, - - "ip_configuration": { - Type: schema.TypeString, - Optional: true, - Description: `The configuration for VM IPs. Options are "WORKER_IP_PUBLIC" or "WORKER_IP_PRIVATE".`, - ValidateFunc: validation.StringInSlice([]string{"WORKER_IP_PUBLIC", "WORKER_IP_PRIVATE", ""}, false), - }, - - "additional_experiments": { - Type: schema.TypeSet, - Optional: true, - Computed: true, - Description: `List of experiments that should be used by the job. An example value is ["enable_stackdriver_agent_metrics"].`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - - "job_id": { - Type: schema.TypeString, - Computed: true, - Description: `The unique ID of this job.`, - }, - - "enable_streaming_engine": { - Type: schema.TypeBool, - Optional: true, - Description: `Indicates if the job should use the streaming engine feature.`, - }, - - "skip_wait_on_job_termination": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: `If true, treat DRAINING and CANCELLING as terminal job states and do not wait for further changes before removing from terraform state and moving on. WARNING: this will lead to job name conflicts if you do not ensure that the job names are different, e.g. by embedding a release ID or by using a random_id.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceDataflowJobTypeCustomizeDiff(_ context.Context, d *schema.ResourceDiff, meta interface{}) error { - // All non-virtual fields are ForceNew for batch jobs - if d.Get("type") == "JOB_TYPE_BATCH" { - resourceSchema := ResourceDataflowJob().Schema - for field := range resourceSchema { - if field == "on_delete" { - continue - } - // Labels map will likely have suppressed changes, so we check each key instead of the parent field - if field == "labels" { - if err := resourceDataflowJobIterateMapForceNew(field, d); err != nil { - return err - } - } else if d.HasChange(field) { - if err := d.ForceNew(field); err != nil { - return err - } - } - } - } - - return nil -} - -// return true if a job is in a terminal state, OR if a job is in a -// terminating state and skipWait is true -func shouldStopDataflowJobDeleteQuery(state string, skipWait bool) bool { - _, stopQuery := dataflowTerminalStatesMap[state] - if !stopQuery && skipWait { - _, stopQuery = dataflowTerminatingStatesMap[state] - } - return stopQuery -} - -func resourceDataflowJobCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*transport_tpg.Config) - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - project, err := tpgresource.GetProject(d, config) - if err != nil { - return err - } - - region, err := tpgresource.GetRegion(d, config) - if err != nil { - return err - } - - params := tpgresource.ExpandStringMap(d, "parameters") - - env, err := resourceDataflowJobSetupEnv(d, config) - if err != nil { - return err - } - - request := dataflow.CreateJobFromTemplateRequest{ - JobName: d.Get("name").(string), - GcsPath: d.Get("template_gcs_path").(string), - Parameters: params, - Environment: &env, - } - - job, err := resourceDataflowJobCreateJob(config, project, region, userAgent, &request) - if err != nil { - return err - } - d.SetId(job.Id) - - return resourceDataflowJobRead(d, meta) -} - -func resourceDataflowJobRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*transport_tpg.Config) - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - project, err := tpgresource.GetProject(d, config) - if err != nil { - return err - } - - region, err := tpgresource.GetRegion(d, config) - if err != nil { - return err - } - - id := d.Id() - - job, err := resourceDataflowJobGetJob(config, project, region, userAgent, id) - if err != nil { - return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Dataflow job %s", id)) - } - - if err := d.Set("job_id", job.Id); err != nil { - return fmt.Errorf("Error setting job_id: %s", err) - } - if err := d.Set("state", job.CurrentState); err != nil { - return fmt.Errorf("Error setting state: %s", err) - } - if err := d.Set("name", job.Name); err != nil { - return fmt.Errorf("Error setting name: %s", err) - } - if err := d.Set("type", job.Type); err != nil { - return fmt.Errorf("Error setting type: %s", err) - } - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("labels", job.Labels); err != nil { - return fmt.Errorf("Error setting labels: %s", err) - } - if err := d.Set("kms_key_name", job.Environment.ServiceKmsKeyName); err != nil { - return fmt.Errorf("Error setting kms_key_name: %s", err) - } - - sdkPipelineOptions, err := tpgresource.ConvertToMap(job.Environment.SdkPipelineOptions) - if err != nil { - return err - } - optionsMap := sdkPipelineOptions["options"].(map[string]interface{}) - if err := d.Set("template_gcs_path", optionsMap["templateLocation"]); err != nil { - return fmt.Errorf("Error setting template_gcs_path: %s", err) - } - if err := d.Set("temp_gcs_location", optionsMap["tempLocation"]); err != nil { - return fmt.Errorf("Error setting temp_gcs_location: %s", err) - } - if err := d.Set("machine_type", optionsMap["machineType"]); err != nil { - return fmt.Errorf("Error setting machine_type: %s", err) - } - if err := d.Set("network", optionsMap["network"]); err != nil { - return fmt.Errorf("Error setting network: %s", err) - } - if err := d.Set("service_account_email", optionsMap["serviceAccountEmail"]); err != nil { - return fmt.Errorf("Error setting service_account_email: %s", err) - } - - if ok := shouldStopDataflowJobDeleteQuery(job.CurrentState, d.Get("skip_wait_on_job_termination").(bool)); ok { - log.Printf("[DEBUG] Removing resource '%s' because it is in state %s.\n", job.Name, job.CurrentState) - d.SetId("") - return nil - } - d.SetId(job.Id) - - return nil -} - -// Stream update method. Batch job changes should have been set to ForceNew via custom diff -func resourceDataflowJobUpdateByReplacement(d *schema.ResourceData, meta interface{}) error { - // Don't send an update request if only virtual fields have changes - if resourceDataflowJobIsVirtualUpdate(d, ResourceDataflowJob().Schema) { - return nil - } - - config := meta.(*transport_tpg.Config) - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - project, err := tpgresource.GetProject(d, config) - if err != nil { - return err - } - - region, err := tpgresource.GetRegion(d, config) - if err != nil { - return err - } - - params := tpgresource.ExpandStringMap(d, "parameters") - tnamemapping := tpgresource.ExpandStringMap(d, "transform_name_mapping") - - env, err := resourceDataflowJobSetupEnv(d, config) - if err != nil { - return err - } - - request := dataflow.LaunchTemplateParameters{ - JobName: d.Get("name").(string), - Parameters: params, - TransformNameMapping: tnamemapping, - Environment: &env, - Update: true, - } - - var response *dataflow.LaunchTemplateResponse - err = transport_tpg.Retry(transport_tpg.RetryOptions{ - RetryFunc: func() (updateErr error) { - response, updateErr = resourceDataflowJobLaunchTemplate(config, project, region, userAgent, d.Get("template_gcs_path").(string), &request) - return updateErr - }, - Timeout: time.Minute * time.Duration(5), - ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsDataflowJobUpdateRetryableError}, - }) - if err != nil { - return err - } - - if err := waitForDataflowJobToBeUpdated(d, config, response.Job.Id, userAgent, d.Timeout(schema.TimeoutUpdate)); err != nil { - return fmt.Errorf("Error updating job with job ID %q: %v", d.Id(), err) - } - - d.SetId(response.Job.Id) - - return resourceDataflowJobRead(d, meta) -} - -func resourceDataflowJobDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*transport_tpg.Config) - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - project, err := tpgresource.GetProject(d, config) - if err != nil { - return err - } - - region, err := tpgresource.GetRegion(d, config) - if err != nil { - return err - } - - id := d.Id() - - requestedState, err := resourceDataflowJobMapRequestedState(d.Get("on_delete").(string)) - if err != nil { - return err - } - - // Retry updating the state while the job is not ready to be canceled/drained. - err = resource.Retry(time.Minute*time.Duration(15), func() *resource.RetryError { - // To terminate a dataflow job, we update the job with a requested - // terminal state. - job := &dataflow.Job{ - RequestedState: requestedState, - } - - _, updateErr := resourceDataflowJobUpdateJob(config, project, region, userAgent, id, job) - if updateErr != nil { - gerr, isGoogleErr := updateErr.(*googleapi.Error) - if !isGoogleErr { - // If we have an error and it's not a google-specific error, we should go ahead and return. - return resource.NonRetryableError(updateErr) - } - - if strings.Contains(gerr.Message, "not yet ready for canceling") { - // Retry cancelling job if it's not ready. - // Sleep to avoid hitting update quota with repeated attempts. - time.Sleep(5 * time.Second) - return resource.RetryableError(updateErr) - } - - if strings.Contains(gerr.Message, "Job has terminated") { - // Job has already been terminated, skip. - return nil - } - } - - return nil - }) - if err != nil { - return err - } - - // Wait for state to reach terminal state (canceled/drained/done plus cancelling/draining if skipWait) - skipWait := d.Get("skip_wait_on_job_termination").(bool) - ok := shouldStopDataflowJobDeleteQuery(d.Get("state").(string), skipWait) - for !ok { - log.Printf("[DEBUG] Waiting for job with job state %q to terminate...", d.Get("state").(string)) - time.Sleep(5 * time.Second) - - err = resourceDataflowJobRead(d, meta) - if err != nil { - return fmt.Errorf("Error while reading job to see if it was properly terminated: %v", err) - } - ok = shouldStopDataflowJobDeleteQuery(d.Get("state").(string), skipWait) - } - - // Only remove the job from state if it's actually successfully hit a final state. - if ok = shouldStopDataflowJobDeleteQuery(d.Get("state").(string), skipWait); ok { - log.Printf("[DEBUG] Removing dataflow job with final state %q", d.Get("state").(string)) - d.SetId("") - return nil - } - return fmt.Errorf("Unable to cancel the dataflow job '%s' - final state was %q.", d.Id(), d.Get("state").(string)) -} - -func resourceDataflowJobMapRequestedState(policy string) (string, error) { - switch policy { - case "cancel": - return "JOB_STATE_CANCELLED", nil - case "drain": - return "JOB_STATE_DRAINING", nil - default: - return "", fmt.Errorf("Invalid `on_delete` policy: %s", policy) - } -} - -func resourceDataflowJobCreateJob(config *transport_tpg.Config, project, region, userAgent string, request *dataflow.CreateJobFromTemplateRequest) (*dataflow.Job, error) { - if region == "" { - return config.NewDataflowClient(userAgent).Projects.Templates.Create(project, request).Do() - } - return config.NewDataflowClient(userAgent).Projects.Locations.Templates.Create(project, region, request).Do() -} - -func resourceDataflowJobGetJob(config *transport_tpg.Config, project, region, userAgent string, id string) (*dataflow.Job, error) { - if region == "" { - return config.NewDataflowClient(userAgent).Projects.Jobs.Get(project, id).View("JOB_VIEW_ALL").Do() - } - return config.NewDataflowClient(userAgent).Projects.Locations.Jobs.Get(project, region, id).View("JOB_VIEW_ALL").Do() -} - -func resourceDataflowJobUpdateJob(config *transport_tpg.Config, project, region, userAgent string, id string, job *dataflow.Job) (*dataflow.Job, error) { - if region == "" { - return config.NewDataflowClient(userAgent).Projects.Jobs.Update(project, id, job).Do() - } - return config.NewDataflowClient(userAgent).Projects.Locations.Jobs.Update(project, region, id, job).Do() -} - -func resourceDataflowJobLaunchTemplate(config *transport_tpg.Config, project, region, userAgent string, gcsPath string, request *dataflow.LaunchTemplateParameters) (*dataflow.LaunchTemplateResponse, error) { - if region == "" { - return config.NewDataflowClient(userAgent).Projects.Templates.Launch(project, request).GcsPath(gcsPath).Do() - } - return config.NewDataflowClient(userAgent).Projects.Locations.Templates.Launch(project, region, request).GcsPath(gcsPath).Do() -} - -func resourceDataflowJobSetupEnv(d *schema.ResourceData, config *transport_tpg.Config) (dataflow.RuntimeEnvironment, error) { - zone, _ := tpgresource.GetZone(d, config) - - labels := tpgresource.ExpandStringMap(d, "labels") - - additionalExperiments := tpgresource.ConvertStringSet(d.Get("additional_experiments").(*schema.Set)) - - env := dataflow.RuntimeEnvironment{ - MaxWorkers: int64(d.Get("max_workers").(int)), - Network: d.Get("network").(string), - ServiceAccountEmail: d.Get("service_account_email").(string), - Subnetwork: d.Get("subnetwork").(string), - TempLocation: d.Get("temp_gcs_location").(string), - MachineType: d.Get("machine_type").(string), - KmsKeyName: d.Get("kms_key_name").(string), - IpConfiguration: d.Get("ip_configuration").(string), - EnableStreamingEngine: d.Get("enable_streaming_engine").(bool), - AdditionalUserLabels: labels, - Zone: zone, - AdditionalExperiments: additionalExperiments, - } - return env, nil -} - -func resourceDataflowJobIterateMapForceNew(mapKey string, d *schema.ResourceDiff) error { - obj := d.Get(mapKey).(map[string]interface{}) - for k := range obj { - entrySchemaKey := mapKey + "." + k - if d.HasChange(entrySchemaKey) { - // ForceNew must be called on the parent map to trigger - if err := d.ForceNew(mapKey); err != nil { - return err - } - break - } - } - return nil -} - -func resourceDataflowJobIterateMapHasChange(mapKey string, d *schema.ResourceData) bool { - obj := d.Get(mapKey).(map[string]interface{}) - for k := range obj { - entrySchemaKey := mapKey + "." + k - if d.HasChange(entrySchemaKey) { - return true - } - } - return false -} - -func resourceDataflowJobIsVirtualUpdate(d *schema.ResourceData, resourceSchema map[string]*schema.Schema) bool { - // on_delete is the only virtual field - if d.HasChange("on_delete") { - for field := range resourceSchema { - if field == "on_delete" { - continue - } - // Labels map will likely have suppressed changes, so we check each key instead of the parent field - if (field == "labels" && resourceDataflowJobIterateMapHasChange(field, d)) || - (field != "labels" && d.HasChange(field)) { - return false - } - } - // on_delete is changing, but nothing else - return true - } - - return false -} - -func waitForDataflowJobToBeUpdated(d *schema.ResourceData, config *transport_tpg.Config, replacementJobID, userAgent string, timeout time.Duration) error { - return resource.Retry(timeout, func() *resource.RetryError { - project, err := tpgresource.GetProject(d, config) - if err != nil { - return resource.NonRetryableError(err) - } - - region, err := tpgresource.GetRegion(d, config) - if err != nil { - return resource.NonRetryableError(err) - } - - replacementJob, err := resourceDataflowJobGetJob(config, project, region, userAgent, replacementJobID) - if err != nil { - if transport_tpg.IsRetryableError(err, nil, nil) { - return resource.RetryableError(err) - } - return resource.NonRetryableError(err) - } +var dataflowTerminatingStatesMap = dataflow.DataflowTerminatingStatesMap - state := replacementJob.CurrentState - switch state { - case "", "JOB_STATE_PENDING": - return resource.RetryableError(fmt.Errorf("the replacement job with ID %q has pending state %q.", replacementJobID, state)) - case "JOB_STATE_FAILED": - return resource.NonRetryableError(fmt.Errorf("the replacement job with ID %q failed with state %q.", replacementJobID, state)) - default: - log.Printf("[DEBUG] the replacement job with ID %q has state %q.", replacementJobID, state) - return nil - } - }) -} +var dataflowTerminalStatesMap = dataflow.DataflowTerminalStatesMap diff --git a/google/resource_dataflow_job_test.go b/google/resource_dataflow_job_test.go index 84c2ce4e661..98992b97fa4 100644 --- a/google/resource_dataflow_job_test.go +++ b/google/resource_dataflow_job_test.go @@ -11,6 +11,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-google/google/services/dataflow" "github.com/hashicorp/terraform-provider-google/google/acctest" "github.com/hashicorp/terraform-provider-google/google/tpgresource" @@ -442,9 +443,9 @@ func testAccCheckDataflowJobDestroyProducer(t *testing.T) func(s *terraform.Stat if err != nil { return fmt.Errorf("could not parse attribute: %v", err) } - _, ok = dataflowTerminalStatesMap[job.CurrentState] + _, ok = dataflow.DataflowTerminalStatesMap[job.CurrentState] if !ok && skipWait { - _, ok = dataflowTerminatingStatesMap[job.CurrentState] + _, ok = dataflow.DataflowTerminatingStatesMap[job.CurrentState] } if !ok { return fmt.Errorf("Job still present") @@ -472,9 +473,9 @@ func testAccCheckDataflowJobRegionDestroyProducer(t *testing.T) func(s *terrafor if err != nil { return fmt.Errorf("could not parse attribute: %v", err) } - _, ok = dataflowTerminalStatesMap[job.CurrentState] + _, ok = dataflow.DataflowTerminalStatesMap[job.CurrentState] if !ok && skipWait { - _, ok = dataflowTerminatingStatesMap[job.CurrentState] + _, ok = dataflow.DataflowTerminatingStatesMap[job.CurrentState] } if !ok { return fmt.Errorf("Job still present") diff --git a/google/resource_dataproc_cluster_test.go b/google/resource_dataproc_cluster_test.go index 11bd40149f4..7e5773c27b4 100644 --- a/google/resource_dataproc_cluster_test.go +++ b/google/resource_dataproc_cluster_test.go @@ -24,122 +24,6 @@ import ( "google.golang.org/api/dataproc/v1" ) -func TestDataprocExtractInitTimeout(t *testing.T) { - t.Parallel() - - actual, err := extractInitTimeout("500s") - expected := 500 - if err != nil { - t.Fatalf("Expected %d, but got error %v", expected, err) - } - if actual != expected { - t.Fatalf("Expected %d, but got %d", expected, actual) - } -} - -func TestDataprocExtractInitTimeout_nonSeconds(t *testing.T) { - t.Parallel() - - actual, err := extractInitTimeout("5m") - expected := 300 - if err != nil { - t.Fatalf("Expected %d, but got error %v", expected, err) - } - if actual != expected { - t.Fatalf("Expected %d, but got %d", expected, actual) - } -} - -func TestDataprocExtractInitTimeout_empty(t *testing.T) { - t.Parallel() - - _, err := extractInitTimeout("") - expected := "time: invalid duration" - if err != nil && err.Error() != expected { - return - } - t.Fatalf("Expected an error with message '%s', but got %v", expected, err.Error()) -} - -func TestDataprocParseImageVersion(t *testing.T) { - t.Parallel() - - testCases := map[string]dataprocImageVersion{ - "1.2": {"1", "2", "", ""}, - "1.2.3": {"1", "2", "3", ""}, - "1.2.3rc": {"1", "2", "3rc", ""}, - "1.2-debian9": {"1", "2", "", "debian9"}, - "1.2.3-debian9": {"1", "2", "3", "debian9"}, - "1.2.3rc-debian9": {"1", "2", "3rc", "debian9"}, - } - - for v, expected := range testCases { - actual, err := parseDataprocImageVersion(v) - if actual.major != expected.major { - t.Errorf("parsing version %q returned error: %v", v, err) - } - if err != nil { - t.Errorf("parsing version %q returned error: %v", v, err) - } - if actual.minor != expected.minor { - t.Errorf("parsing version %q returned error: %v", v, err) - } - if actual.subminor != expected.subminor { - t.Errorf("parsing version %q returned error: %v", v, err) - } - if actual.osName != expected.osName { - t.Errorf("parsing version %q returned error: %v", v, err) - } - } - - errorTestCases := []string{ - "", - "1", - "notaversion", - "1-debian", - } - for _, v := range errorTestCases { - if _, err := parseDataprocImageVersion(v); err == nil { - t.Errorf("expected parsing invalid version %q to return error", v) - } - } -} - -func TestDataprocDiffSuppress(t *testing.T) { - t.Parallel() - - doSuppress := [][]string{ - {"1.3.10-debian9", "1.3"}, - {"1.3.10-debian9", "1.3-debian9"}, - {"1.3.10", "1.3"}, - {"1.3-debian9", "1.3"}, - } - - noSuppress := [][]string{ - {"1.3.10-debian9", "1.3.10-ubuntu"}, - {"1.3.10-debian9", "1.3.9-debian9"}, - {"1.3.10-debian9", "1.3-ubuntu"}, - {"1.3.10-debian9", "1.3.9"}, - {"1.3.10-debian9", "1.4"}, - {"1.3.10-debian9", "2.3"}, - {"1.3.10", "1.3.10-debian9"}, - {"1.3", "1.3.10"}, - {"1.3", "1.3.10-debian9"}, - {"1.3", "1.3-debian9"}, - } - - for _, tup := range doSuppress { - if !dataprocImageVersionDiffSuppress("", tup[0], tup[1], nil) { - t.Errorf("expected (old: %q, new: %q) to be suppressed", tup[0], tup[1]) - } - } - for _, tup := range noSuppress { - if dataprocImageVersionDiffSuppress("", tup[0], tup[1], nil) { - t.Errorf("expected (old: %q, new: %q) to not be suppressed", tup[0], tup[1]) - } - } -} - func TestAccDataprocCluster_missingZoneGlobalRegion1(t *testing.T) { t.Parallel() diff --git a/google/resource_endpoints_service_test.go b/google/resource_endpoints_service_test.go index dd37b9a528e..1a952e08067 100644 --- a/google/resource_endpoints_service_test.go +++ b/google/resource_endpoints_service_test.go @@ -3,7 +3,6 @@ package google import ( - "reflect" "strings" "testing" @@ -60,58 +59,6 @@ func TestAccEndpointsService_grpc(t *testing.T) { }) } -func TestEndpointsService_grpcMigrateState(t *testing.T) { - cases := map[string]struct { - StateVersion int - Attributes map[string]string - ExpectedAttributes map[string]string - Meta interface{} - }{ - "update from protoc_output to protoc_output_base64": { - StateVersion: 0, - Attributes: map[string]string{ - "protoc_output": "123456789", - "name": "testcase", - }, - ExpectedAttributes: map[string]string{ - "protoc_output_base64": "MTIzNDU2Nzg5", - "protoc_output": "", - "name": "testcase", - }, - Meta: &transport_tpg.Config{Project: "gcp-project", Region: "us-central1"}, - }, - "update from non-protoc_output": { - StateVersion: 0, - Attributes: map[string]string{ - "openapi_config": "foo bar baz", - "name": "testcase-2", - }, - ExpectedAttributes: map[string]string{ - "openapi_config": "foo bar baz", - "name": "testcase-2", - }, - Meta: &transport_tpg.Config{Project: "gcp-project", Region: "us-central1"}, - }, - } - - for tn, tc := range cases { - is := &terraform.InstanceState{ - ID: tc.Attributes["name"], - Attributes: tc.Attributes, - } - - is, err := migrateEndpointsService(tc.StateVersion, is, tc.Meta) - - if err != nil { - t.Fatalf("bad: %s, err: %#v", tn, err) - } - - if !reflect.DeepEqual(is.Attributes, tc.ExpectedAttributes) { - t.Fatalf("Attributes should be `%s` but are `%s`", tc.ExpectedAttributes, is.Attributes) - } - } -} - func testAccEndpointsService_basic(serviceId, project, rev string) string { return fmt.Sprintf(` resource "google_endpoints_service" "endpoints_service" { diff --git a/google/resource_logging_exclusion.go b/google/resource_logging_exclusion.go index 2cc303869cc..963a50ccabd 100644 --- a/google/resource_logging_exclusion.go +++ b/google/resource_logging_exclusion.go @@ -2,306 +2,9 @@ // SPDX-License-Identifier: MPL-2.0 package google -import ( - "fmt" - "regexp" - "strings" - - "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" - "github.com/hashicorp/terraform-provider-google/google/tpgresource" - transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/logging/v2" -) - -var LoggingExclusionBaseSchema = map[string]*schema.Schema{ - "filter": { - Type: schema.TypeString, - Required: true, - Description: `The filter to apply when excluding logs. Only log entries that match the filter are excluded.`, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the logging exclusion.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `A human-readable description.`, - }, - "disabled": { - Type: schema.TypeBool, - Optional: true, - Description: `Whether this exclusion rule should be disabled or not. This defaults to false.`, - }, -} - -func ResourceLoggingExclusion(parentSpecificSchema map[string]*schema.Schema, newUpdaterFunc newResourceLoggingExclusionUpdaterFunc, resourceIdParser tpgiamresource.ResourceIdParserFunc) *schema.Resource { - return &schema.Resource{ - Create: resourceLoggingExclusionCreate(newUpdaterFunc), - Read: resourceLoggingExclusionRead(newUpdaterFunc), - Update: resourceLoggingExclusionUpdate(newUpdaterFunc), - Delete: resourceLoggingExclusionDelete(newUpdaterFunc), - - Importer: &schema.ResourceImporter{ - State: resourceLoggingExclusionImportState(resourceIdParser), - }, - - Schema: tpgresource.MergeSchemas(LoggingExclusionBaseSchema, parentSpecificSchema), - UseJSONNumber: true, - } -} - -func resourceLoggingExclusionCreate(newUpdaterFunc newResourceLoggingExclusionUpdaterFunc) schema.CreateFunc { - return func(d *schema.ResourceData, meta interface{}) error { - config := meta.(*transport_tpg.Config) - updater, err := newUpdaterFunc(d, config) - if err != nil { - return err - } - - id, exclusion := expandResourceLoggingExclusion(d, updater.GetResourceType(), updater.GetResourceId()) - - // Logging exclusions don't seem to be able to be mutated in parallel, see - // https://github.com/hashicorp/terraform-provider-google/issues/4796 - transport_tpg.MutexStore.Lock(id.parent()) - defer transport_tpg.MutexStore.Unlock(id.parent()) - - err = updater.CreateLoggingExclusion(id.parent(), exclusion) - if err != nil { - return err - } - - d.SetId(id.canonicalId()) - - return resourceLoggingExclusionRead(newUpdaterFunc)(d, meta) - } -} - -func resourceLoggingExclusionRead(newUpdaterFunc newResourceLoggingExclusionUpdaterFunc) schema.ReadFunc { - return func(d *schema.ResourceData, meta interface{}) error { - config := meta.(*transport_tpg.Config) - updater, err := newUpdaterFunc(d, config) - if err != nil { - return err - } - - exclusion, err := updater.ReadLoggingExclusion(d.Id()) - - if err != nil { - return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Logging Exclusion %s", d.Get("name").(string))) - } - - if err := flattenResourceLoggingExclusion(d, exclusion); err != nil { - return err - } - - if updater.GetResourceType() == "projects" { - if err := d.Set("project", updater.GetResourceId()); err != nil { - return fmt.Errorf("Error setting project: %s", err) - } - } - - return nil - } -} - -func resourceLoggingExclusionUpdate(newUpdaterFunc newResourceLoggingExclusionUpdaterFunc) schema.UpdateFunc { - return func(d *schema.ResourceData, meta interface{}) error { - config := meta.(*transport_tpg.Config) - updater, err := newUpdaterFunc(d, config) - if err != nil { - return err - } - - id, _ := expandResourceLoggingExclusion(d, updater.GetResourceType(), updater.GetResourceId()) - exclusion, updateMask := expandResourceLoggingExclusionForUpdate(d) - - // Logging exclusions don't seem to be able to be mutated in parallel, see - // https://github.com/hashicorp/terraform-provider-google/issues/4796 - transport_tpg.MutexStore.Lock(id.parent()) - defer transport_tpg.MutexStore.Unlock(id.parent()) - - err = updater.UpdateLoggingExclusion(d.Id(), exclusion, updateMask) - if err != nil { - return err - } - - return resourceLoggingExclusionRead(newUpdaterFunc)(d, meta) - } -} - -func resourceLoggingExclusionDelete(newUpdaterFunc newResourceLoggingExclusionUpdaterFunc) schema.DeleteFunc { - return func(d *schema.ResourceData, meta interface{}) error { - config := meta.(*transport_tpg.Config) - updater, err := newUpdaterFunc(d, config) - if err != nil { - return err - } - - id, _ := expandResourceLoggingExclusion(d, updater.GetResourceType(), updater.GetResourceId()) - // Logging exclusions don't seem to be able to be mutated in parallel, see - // https://github.com/hashicorp/terraform-provider-google/issues/4796 - transport_tpg.MutexStore.Lock(id.parent()) - defer transport_tpg.MutexStore.Unlock(id.parent()) - - err = updater.DeleteLoggingExclusion(d.Id()) - if err != nil { - return err - } - - d.SetId("") - return nil - } -} - -func resourceLoggingExclusionImportState(resourceIdParser tpgiamresource.ResourceIdParserFunc) schema.StateFunc { - return func(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*transport_tpg.Config) - err := resourceIdParser(d, config) - if err != nil { - return nil, err - } - return []*schema.ResourceData{d}, nil - } -} - -func expandResourceLoggingExclusion(d *schema.ResourceData, resourceType, resourceId string) (LoggingExclusionId, *logging.LogExclusion) { - id := LoggingExclusionId{ - resourceType: resourceType, - resourceId: resourceId, - name: d.Get("name").(string), - } - - exclusion := logging.LogExclusion{ - Name: d.Get("name").(string), - Description: d.Get("description").(string), - Filter: d.Get("filter").(string), - Disabled: d.Get("disabled").(bool), - } - return id, &exclusion -} - -func flattenResourceLoggingExclusion(d *schema.ResourceData, exclusion *logging.LogExclusion) error { - if err := d.Set("name", exclusion.Name); err != nil { - return fmt.Errorf("Error setting name: %s", err) - } - if err := d.Set("description", exclusion.Description); err != nil { - return fmt.Errorf("Error setting description: %s", err) - } - if err := d.Set("filter", exclusion.Filter); err != nil { - return fmt.Errorf("Error setting filter: %s", err) - } - if err := d.Set("disabled", exclusion.Disabled); err != nil { - return fmt.Errorf("Error setting disabled: %s", err) - } - - return nil -} - -func expandResourceLoggingExclusionForUpdate(d *schema.ResourceData) (*logging.LogExclusion, string) { - // Can update description/filter/disabled right now. - exclusion := logging.LogExclusion{} - - var updateMaskArr []string - - if d.HasChange("description") { - exclusion.Description = d.Get("description").(string) - exclusion.ForceSendFields = append(exclusion.ForceSendFields, "Description") - updateMaskArr = append(updateMaskArr, "description") - } - - if d.HasChange("filter") { - exclusion.Filter = d.Get("filter").(string) - exclusion.ForceSendFields = append(exclusion.ForceSendFields, "Filter") - updateMaskArr = append(updateMaskArr, "filter") - } - - if d.HasChange("disabled") { - exclusion.Disabled = d.Get("disabled").(bool) - exclusion.ForceSendFields = append(exclusion.ForceSendFields, "Disabled") - updateMaskArr = append(updateMaskArr, "disabled") - } - - updateMask := strings.Join(updateMaskArr, ",") - return &exclusion, updateMask -} - -// The ResourceLoggingExclusionUpdater interface is implemented for each GCP -// resource supporting log exclusions. -// -// Implementations should keep track of the resource identifier. -type ResourceLoggingExclusionUpdater interface { - CreateLoggingExclusion(parent string, exclusion *logging.LogExclusion) error - ReadLoggingExclusion(id string) (*logging.LogExclusion, error) - UpdateLoggingExclusion(id string, exclusion *logging.LogExclusion, updateMask string) error - DeleteLoggingExclusion(id string) error - - GetResourceType() string - - // Returns the unique resource identifier. - GetResourceId() string - - // Textual description of this resource to be used in error message. - // The description should include the unique resource identifier. - DescribeResource() string -} - -type newResourceLoggingExclusionUpdaterFunc func(d *schema.ResourceData, config *transport_tpg.Config) (ResourceLoggingExclusionUpdater, error) - -// loggingExclusionResourceTypes contains all the possible Stackdriver Logging resource types. Used to parse ids safely. -var loggingExclusionResourceTypes = []string{ - "billingAccounts", - "folders", - "organizations", - "projects", -} - -// LoggingExclusionId represents the parts that make up the canonical id used within terraform for a logging resource. -type LoggingExclusionId struct { - resourceType string - resourceId string - name string -} - -// loggingExclusionIdRegex matches valid logging exclusion canonical ids -var loggingExclusionIdRegex = regexp.MustCompile("(.+)/(.+)/exclusions/(.+)") - -// canonicalId returns the LoggingExclusionId as the canonical id used within terraform. -func (l LoggingExclusionId) canonicalId() string { - return fmt.Sprintf("%s/%s/exclusions/%s", l.resourceType, l.resourceId, l.name) -} - -// parent returns the "parent-level" resource that the exclusion is in (e.g. `folders/foo` for id `folders/foo/exclusions/bar`) -func (l LoggingExclusionId) parent() string { - return fmt.Sprintf("%s/%s", l.resourceType, l.resourceId) -} +import "github.com/hashicorp/terraform-provider-google/google/services/logging" // parseLoggingExclusionId parses a canonical id into a LoggingExclusionId, or returns an error on failure. -func parseLoggingExclusionId(id string) (*LoggingExclusionId, error) { - parts := loggingExclusionIdRegex.FindStringSubmatch(id) - if parts == nil { - return nil, fmt.Errorf("unable to parse logging exclusion id %#v", id) - } - // If our resourceType is not a valid logging exclusion resource type, complain loudly - validLoggingExclusionResourceType := false - for _, v := range loggingExclusionResourceTypes { - if v == parts[1] { - validLoggingExclusionResourceType = true - break - } - } - - if !validLoggingExclusionResourceType { - return nil, fmt.Errorf("Logging resource type %s is not valid. Valid resource types: %#v", parts[1], - loggingExclusionResourceTypes) - } - return &LoggingExclusionId{ - resourceType: parts[1], - resourceId: parts[2], - name: parts[3], - }, nil +func parseLoggingExclusionId(id string) (*logging.LoggingExclusionId, error) { + return logging.ParseLoggingExclusionId(id) } diff --git a/google/resource_logging_folder_exclusion_test.go b/google/resource_logging_folder_exclusion_test.go index 7127d8a80d9..a012c9cec3a 100644 --- a/google/resource_logging_folder_exclusion_test.go +++ b/google/resource_logging_folder_exclusion_test.go @@ -9,6 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/services/logging" ) // Logging exclusions don't always work when making parallel requests, so run tests serially @@ -64,14 +65,14 @@ func testAccLoggingFolderExclusion_folderAcceptsFullFolderPath(t *testing.T) { description := "Description " + RandString(t, 10) checkFn := func(s []*terraform.InstanceState) error { - loggingExclusionId, err := parseLoggingExclusionId(s[0].ID) + loggingExclusionId, err := logging.ParseLoggingExclusionId(s[0].ID) if err != nil { return err } folderAttribute := s[0].Attributes["folder"] - if loggingExclusionId.resourceId != folderAttribute { - return fmt.Errorf("imported folder id does not match: actual = %#v expected = %#v", folderAttribute, loggingExclusionId.resourceId) + if loggingExclusionId.ResourceId != folderAttribute { + return fmt.Errorf("imported folder id does not match: actual = %#v expected = %#v", folderAttribute, loggingExclusionId.ResourceId) } return nil diff --git a/google/resource_logging_project_sink_test.go b/google/resource_logging_project_sink_test.go index 424b998ef63..b1d17e3b4a7 100644 --- a/google/resource_logging_project_sink_test.go +++ b/google/resource_logging_project_sink_test.go @@ -9,7 +9,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/hashicorp/terraform-provider-google/google/acctest" - "github.com/hashicorp/terraform-provider-google/google/tpgresource" ) func TestAccLoggingProjectSink_basic(t *testing.T) { @@ -220,62 +219,6 @@ func TestAccLoggingProjectSink_loggingbucket(t *testing.T) { }) } -func TestLoggingProjectSink_bigqueryOptionCustomizedDiff(t *testing.T) { - t.Parallel() - - type LoggingProjectSink struct { - BigqueryOptions int - UniqueWriterIdentity bool - } - cases := map[string]struct { - ExpectedError bool - After LoggingProjectSink - }{ - "no biquery options with false unique writer identity": { - ExpectedError: false, - After: LoggingProjectSink{ - BigqueryOptions: 0, - UniqueWriterIdentity: false, - }, - }, - "no biquery options with true unique writer identity": { - ExpectedError: false, - After: LoggingProjectSink{ - BigqueryOptions: 0, - UniqueWriterIdentity: true, - }, - }, - "biquery options with false unique writer identity": { - ExpectedError: true, - After: LoggingProjectSink{ - BigqueryOptions: 1, - UniqueWriterIdentity: false, - }, - }, - "biquery options with true unique writer identity": { - ExpectedError: false, - After: LoggingProjectSink{ - BigqueryOptions: 1, - UniqueWriterIdentity: true, - }, - }, - } - - for tn, tc := range cases { - d := &tpgresource.ResourceDiffMock{ - After: map[string]interface{}{ - "bigquery_options.#": tc.After.BigqueryOptions, - "unique_writer_identity": tc.After.UniqueWriterIdentity, - }, - } - err := resourceLoggingProjectSinkCustomizeDiffFunc(d) - hasError := err != nil - if tc.ExpectedError != hasError { - t.Errorf("%v: expected has error %v, but was %v", tn, tc.ExpectedError, hasError) - } - } -} - func TestAccLoggingProjectSink_disabled_update(t *testing.T) { t.Parallel() diff --git a/google/resource_sql_database_instance_test.go b/google/resource_sql_database_instance_test.go index 161928e5dc8..0c32efe2e74 100644 --- a/google/resource_sql_database_instance_test.go +++ b/google/resource_sql_database_instance_test.go @@ -31,39 +31,6 @@ var ignoredReplicaConfigurationFields = []string{ "deletion_protection", } -func TestMaintenanceVersionDiffSuppress(t *testing.T) { - cases := map[string]struct { - Old, New string - ShouldSuppress bool - }{ - "older configuration maintenance version than current version should suppress diff": { - Old: "MYSQL_8_0_26.R20220508.01_09", - New: "MYSQL_5_7_37.R20210508.01_03", - ShouldSuppress: true, - }, - "older configuration maintenance version than current version should suppress diff with lexicographically smaller database version": { - Old: "MYSQL_5_8_10.R20220508.01_09", - New: "MYSQL_5_8_7.R20210508.01_03", - ShouldSuppress: true, - }, - "newer configuration maintenance version than current version should not suppress diff": { - Old: "MYSQL_5_7_37.R20210508.01_03", - New: "MYSQL_8_0_26.R20220508.01_09", - ShouldSuppress: false, - }, - } - - for tn, tc := range cases { - tc := tc - t.Run(tn, func(t *testing.T) { - t.Parallel() - if maintenanceVersionDiffSuppress("version", tc.Old, tc.New, nil) != tc.ShouldSuppress { - t.Fatalf("%q => %q expect DiffSuppress to return %t", tc.Old, tc.New, tc.ShouldSuppress) - } - }) - } -} - func TestAccSqlDatabaseInstance_basicInferredName(t *testing.T) { // Randomness acctest.SkipIfVcr(t) diff --git a/google/resource_sql_database_test.go b/google/resource_sql_database_test.go index 7ce36e50c40..89d5a86d9da 100644 --- a/google/resource_sql_database_test.go +++ b/google/resource_sql_database_test.go @@ -13,40 +13,6 @@ import ( sqladmin "google.golang.org/api/sqladmin/v1beta4" ) -func TestCaseDiffDashSuppress(t *testing.T) { - cases := map[string]struct { - Old, New string - ExpectDiffSuppress bool - }{ - "PD_HDD": { - Old: "PD_HDD", - New: "pd-hdd", - ExpectDiffSuppress: true, - }, - "PD_SSD": { - Old: "PD_SSD", - New: "pd-ssd", - ExpectDiffSuppress: true, - }, - "pd-hdd": { - Old: "pd-hdd", - New: "PD_HDD", - ExpectDiffSuppress: false, - }, - "pd-ssd": { - Old: "pd-ssd", - New: "PD_SSD", - ExpectDiffSuppress: false, - }, - } - - for tn, tc := range cases { - if caseDiffDashSuppress(tn, tc.Old, tc.New, nil) != tc.ExpectDiffSuppress { - t.Errorf("bad: %s, %q => %q expect DiffSuppress to return %t", tn, tc.Old, tc.New, tc.ExpectDiffSuppress) - } - } -} - func TestAccSqlDatabase_basic(t *testing.T) { t.Parallel() diff --git a/google/resource_storage_bucket_acl.go b/google/resource_storage_bucket_acl.go index 1965dad2a75..79133918307 100644 --- a/google/resource_storage_bucket_acl.go +++ b/google/resource_storage_bucket_acl.go @@ -3,401 +3,9 @@ package google import ( - "context" - "fmt" - "log" - "strconv" - "strings" - - "github.com/hashicorp/terraform-provider-google/google/tpgresource" - transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - - "google.golang.org/api/storage/v1" + "github.com/hashicorp/terraform-provider-google/google/services/storage" ) -func ResourceStorageBucketAcl() *schema.Resource { - return &schema.Resource{ - Create: resourceStorageBucketAclCreate, - Read: resourceStorageBucketAclRead, - Update: resourceStorageBucketAclUpdate, - Delete: resourceStorageBucketAclDelete, - CustomizeDiff: resourceStorageRoleEntityCustomizeDiff, - - Schema: map[string]*schema.Schema{ - "bucket": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the bucket it applies to.`, - }, - - "default_acl": { - Type: schema.TypeString, - Optional: true, - Description: `Configure this ACL to be the default ACL.`, - }, - - "predefined_acl": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ConflictsWith: []string{"role_entity"}, - Description: `The canned GCS ACL to apply. Must be set if role_entity is not.`, - }, - - "role_entity": { - Type: schema.TypeList, - Optional: true, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - ConflictsWith: []string{"predefined_acl"}, - Description: `List of role/entity pairs in the form ROLE:entity. See GCS Bucket ACL documentation for more details. Must be set if predefined_acl is not.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceStorageRoleEntityCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, meta interface{}) error { - keys := diff.GetChangedKeysPrefix("role_entity") - if len(keys) < 1 { - return nil - } - count := diff.Get("role_entity.#").(int) - if count < 1 { - return nil - } - state := map[string]struct{}{} - conf := map[string]struct{}{} - for i := 0; i < count; i++ { - old, new := diff.GetChange(fmt.Sprintf("role_entity.%d", i)) - state[old.(string)] = struct{}{} - conf[new.(string)] = struct{}{} - } - if len(state) != len(conf) { - return nil - } - for k := range state { - if _, ok := conf[k]; !ok { - // project-owners- is explicitly stripped from the roles that this - // resource will delete - if strings.Contains(k, "OWNER:project-owners-") { - continue - } - return nil - } - } - return diff.Clear("role_entity") -} - -type RoleEntity struct { - Role string - Entity string -} - -func getBucketAclId(bucket string) string { - return bucket + "-acl" -} - -func getRoleEntityPair(role_entity string) (*RoleEntity, error) { - split := strings.Split(role_entity, ":") - if len(split) != 2 { - return nil, fmt.Errorf("Error, each role entity pair must be " + - "formatted as ROLE:entity") - } - - return &RoleEntity{Role: split[0], Entity: split[1]}, nil -} - -func resourceStorageBucketAclCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*transport_tpg.Config) - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - bucket := d.Get("bucket").(string) - predefined_acl := "" - default_acl := "" - role_entity := make([]interface{}, 0) - - if v, ok := d.GetOk("predefined_acl"); ok { - predefined_acl = v.(string) - } - - if v, ok := d.GetOk("role_entity"); ok { - role_entity = v.([]interface{}) - } - - if v, ok := d.GetOk("default_acl"); ok { - default_acl = v.(string) - } - - lockName, err := tpgresource.ReplaceVars(d, config, "storage/buckets/{{bucket}}") - if err != nil { - return err - } - transport_tpg.MutexStore.Lock(lockName) - defer transport_tpg.MutexStore.Unlock(lockName) - - if len(predefined_acl) > 0 { - res, err := config.NewStorageClient(userAgent).Buckets.Get(bucket).Do() - - if err != nil { - return fmt.Errorf("Error reading bucket %s: %v", bucket, err) - } - - _, err = config.NewStorageClient(userAgent).Buckets.Update(bucket, - res).PredefinedAcl(predefined_acl).Do() - - if err != nil { - return fmt.Errorf("Error updating bucket %s: %v", bucket, err) - } - - } - - if len(role_entity) > 0 { - current, err := config.NewStorageClient(userAgent).BucketAccessControls.List(bucket).Do() - if err != nil { - return fmt.Errorf("Error retrieving current ACLs: %s", err) - } - for _, v := range role_entity { - pair, err := getRoleEntityPair(v.(string)) - if err != nil { - return err - } - var alreadyInserted bool - for _, cur := range current.Items { - if cur.Entity == pair.Entity && cur.Role == pair.Role { - alreadyInserted = true - break - } - } - if alreadyInserted { - log.Printf("[DEBUG]: pair %s-%s already exists, not trying to insert again\n", pair.Role, pair.Entity) - continue - } - bucketAccessControl := &storage.BucketAccessControl{ - Role: pair.Role, - Entity: pair.Entity, - } - - log.Printf("[DEBUG]: storing re %s-%s", pair.Role, pair.Entity) - - _, err = config.NewStorageClient(userAgent).BucketAccessControls.Insert(bucket, bucketAccessControl).Do() - - if err != nil { - return fmt.Errorf("Error updating ACL for bucket %s: %v", bucket, err) - } - } - - } - - if len(default_acl) > 0 { - res, err := config.NewStorageClient(userAgent).Buckets.Get(bucket).Do() - - if err != nil { - return fmt.Errorf("Error reading bucket %s: %v", bucket, err) - } - - _, err = config.NewStorageClient(userAgent).Buckets.Update(bucket, - res).PredefinedDefaultObjectAcl(default_acl).Do() - - if err != nil { - return fmt.Errorf("Error updating bucket %s: %v", bucket, err) - } - - } - - d.SetId(getBucketAclId(bucket)) - return resourceStorageBucketAclRead(d, meta) -} - -func resourceStorageBucketAclRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*transport_tpg.Config) - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - bucket := d.Get("bucket").(string) - - // The API offers no way to retrieve predefined ACLs, - // and we can't tell which access controls were created - // by the predefined roles, so... - // - // This is, needless to say, a bad state of affairs and - // should be fixed. - if _, ok := d.GetOk("role_entity"); ok { - res, err := config.NewStorageClient(userAgent).BucketAccessControls.List(bucket).Do() - - if err != nil { - return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Storage Bucket ACL for bucket %q", d.Get("bucket").(string))) - } - entities := make([]string, 0, len(res.Items)) - for _, item := range res.Items { - entities = append(entities, item.Role+":"+item.Entity) - } - - if err := d.Set("role_entity", entities); err != nil { - return fmt.Errorf("Error setting role_entity: %s", err) - } - } else { - // if we don't set `role_entity` to nil (effectively setting it - // to empty in Terraform state), because it's computed now, - // Terraform will think it's missing from state, is supposed - // to be there, and throw up a diff for role_entity.#. So it - // must always be set in state. - if err := d.Set("role_entity", nil); err != nil { - return fmt.Errorf("Error setting role_entity: %s", err) - } - } - - return nil -} - -func resourceStorageBucketAclUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*transport_tpg.Config) - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - bucket := d.Get("bucket").(string) - - lockName, err := tpgresource.ReplaceVars(d, config, "storage/buckets/{{bucket}}") - if err != nil { - return err - } - transport_tpg.MutexStore.Lock(lockName) - defer transport_tpg.MutexStore.Unlock(lockName) - - if d.HasChange("role_entity") { - bkt, err := config.NewStorageClient(userAgent).Buckets.Get(bucket).Do() - if err != nil { - return fmt.Errorf("Error reading bucket %q: %v", bucket, err) - } - - project := strconv.FormatUint(bkt.ProjectNumber, 10) - o, n := d.GetChange("role_entity") - old_re, new_re := o.([]interface{}), n.([]interface{}) - - old_re_map := make(map[string]string) - for _, v := range old_re { - res, err := getRoleEntityPair(v.(string)) - - if err != nil { - return fmt.Errorf( - "Old state has malformed Role/Entity pair: %v", err) - } - - old_re_map[res.Entity] = res.Role - } - - for _, v := range new_re { - pair, err := getRoleEntityPair(v.(string)) - - bucketAccessControl := &storage.BucketAccessControl{ - Role: pair.Role, - Entity: pair.Entity, - } - - // If the old state entity's role doesn't match the new one, it needs to be inserted - if old_re_map[pair.Entity] != bucketAccessControl.Role { - _, err = config.NewStorageClient(userAgent).BucketAccessControls.Insert( - bucket, bucketAccessControl).Do() - } - - // Now we only store the keys that have to be removed - delete(old_re_map, pair.Entity) - - if err != nil { - return fmt.Errorf("Error updating ACL for bucket %s: %v", bucket, err) - } - } - - for entity, role := range old_re_map { - if entity == fmt.Sprintf("project-owners-%s", project) && role == "OWNER" { - log.Printf("[WARN]: Skipping %s-%s; not deleting owner ACL.", role, entity) - continue - } - log.Printf("[DEBUG]: removing entity %s", entity) - err := config.NewStorageClient(userAgent).BucketAccessControls.Delete(bucket, entity).Do() - - if err != nil { - return fmt.Errorf("Error updating ACL for bucket %s: %v", bucket, err) - } - } - - return resourceStorageBucketAclRead(d, meta) - } - - if d.HasChange("default_acl") { - default_acl := d.Get("default_acl").(string) - - res, err := config.NewStorageClient(userAgent).Buckets.Get(bucket).Do() - - if err != nil { - return fmt.Errorf("Error reading bucket %s: %v", bucket, err) - } - - _, err = config.NewStorageClient(userAgent).Buckets.Update(bucket, - res).PredefinedDefaultObjectAcl(default_acl).Do() - - if err != nil { - return fmt.Errorf("Error updating bucket %s: %v", bucket, err) - } - - return resourceStorageBucketAclRead(d, meta) - } - - return nil -} - -func resourceStorageBucketAclDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*transport_tpg.Config) - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - bucket := d.Get("bucket").(string) - - lockName, err := tpgresource.ReplaceVars(d, config, "storage/buckets/{{bucket}}") - if err != nil { - return err - } - transport_tpg.MutexStore.Lock(lockName) - defer transport_tpg.MutexStore.Unlock(lockName) - - bkt, err := config.NewStorageClient(userAgent).Buckets.Get(bucket).Do() - if err != nil { - return fmt.Errorf("Error retrieving bucket %q: %v", bucket, err) - } - project := strconv.FormatUint(bkt.ProjectNumber, 10) - - re_local := d.Get("role_entity").([]interface{}) - for _, v := range re_local { - res, err := getRoleEntityPair(v.(string)) - if err != nil { - return err - } - - if res.Entity == fmt.Sprintf("project-owners-%s", project) && res.Role == "OWNER" { - log.Printf("[WARN]: Skipping %s-%s; not deleting owner ACL.", res.Role, res.Entity) - continue - } - - log.Printf("[DEBUG]: removing entity %s", res.Entity) - - err = config.NewStorageClient(userAgent).BucketAccessControls.Delete(bucket, res.Entity).Do() - - if err != nil { - return fmt.Errorf("Error deleting entity %s ACL: %s", res.Entity, err) - } - } - - return nil +func getRoleEntityPair(role_entity string) (*storage.RoleEntity, error) { + return storage.GetRoleEntityPair(role_entity) } diff --git a/google/resource_storage_bucket_acl_test.go b/google/resource_storage_bucket_acl_test.go index 5d3a1ad6677..e1c5ad4b67f 100644 --- a/google/resource_storage_bucket_acl_test.go +++ b/google/resource_storage_bucket_acl_test.go @@ -8,6 +8,7 @@ import ( "testing" "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/services/storage" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" @@ -213,7 +214,7 @@ func TestAccStorageBucketAcl_RemoveOwner(t *testing.T) { func testAccCheckGoogleStorageBucketAclDelete(t *testing.T, bucket, roleEntityS string) resource.TestCheckFunc { return func(s *terraform.State) error { - roleEntity, _ := getRoleEntityPair(roleEntityS) + roleEntity, _ := storage.GetRoleEntityPair(roleEntityS) config := GoogleProviderConfig(t) _, err := config.NewStorageClient(config.UserAgent).BucketAccessControls.Get(bucket, roleEntity.Entity).Do() @@ -228,7 +229,7 @@ func testAccCheckGoogleStorageBucketAclDelete(t *testing.T, bucket, roleEntityS func testAccCheckGoogleStorageBucketAcl(t *testing.T, bucket, roleEntityS string) resource.TestCheckFunc { return func(s *terraform.State) error { - roleEntity, _ := getRoleEntityPair(roleEntityS) + roleEntity, _ := storage.GetRoleEntityPair(roleEntityS) config := GoogleProviderConfig(t) res, err := config.NewStorageClient(config.UserAgent).BucketAccessControls.Get(bucket, roleEntity.Entity).Do() diff --git a/google/resource_storage_bucket_test.go b/google/resource_storage_bucket_test.go index d14a0d374cd..fc8fed9eae2 100644 --- a/google/resource_storage_bucket_test.go +++ b/google/resource_storage_bucket_test.go @@ -1129,85 +1129,6 @@ func TestAccStorageBucket_retentionPolicyLocked(t *testing.T) { }) } -func TestLabelDiffSuppress(t *testing.T) { - cases := map[string]struct { - K, Old, New string - ExpectDiffSuppress bool - }{ - "missing goog-dataplex-asset-id": { - K: "labels.goog-dataplex-asset-id", - Old: "test-bucket", - New: "", - ExpectDiffSuppress: true, - }, - "explicit goog-dataplex-asset-id": { - K: "labels.goog-dataplex-asset-id", - Old: "test-bucket", - New: "test-bucket-1", - ExpectDiffSuppress: false, - }, - "missing goog-dataplex-lake-id": { - K: "labels.goog-dataplex-lake-id", - Old: "test-lake", - New: "", - ExpectDiffSuppress: true, - }, - "explicit goog-dataplex-lake-id": { - K: "labels.goog-dataplex-lake-id", - Old: "test-lake", - New: "test-lake-1", - ExpectDiffSuppress: false, - }, - "missing goog-dataplex-project-id": { - K: "labels.goog-dataplex-project-id", - Old: "test-project-12345", - New: "", - ExpectDiffSuppress: true, - }, - "explicit goog-dataplex-project-id": { - K: "labels.goog-dataplex-project-id", - Old: "test-project-12345", - New: "test-project-12345-1", - ExpectDiffSuppress: false, - }, - "missing goog-dataplex-zone-id": { - K: "labels.goog-dataplex-zone-id", - Old: "test-zone1", - New: "", - ExpectDiffSuppress: true, - }, - "explicit goog-dataplex-zone-id": { - K: "labels.goog-dataplex-zone-id", - Old: "test-zone1", - New: "test-zone1-1", - ExpectDiffSuppress: false, - }, - "labels.%": { - K: "labels.%", - Old: "5", - New: "1", - ExpectDiffSuppress: true, - }, - "deleted custom key": { - K: "labels.my-label", - Old: "my-value", - New: "", - ExpectDiffSuppress: false, - }, - "added custom key": { - K: "labels.my-label", - Old: "", - New: "my-value", - ExpectDiffSuppress: false, - }, - } - for tn, tc := range cases { - if resourceDataplexLabelDiffSuppress(tc.K, tc.Old, tc.New, nil) != tc.ExpectDiffSuppress { - t.Errorf("bad: %s, %q: %q => %q expect DiffSuppress to return %t", tn, tc.K, tc.Old, tc.New, tc.ExpectDiffSuppress) - } - } -} - func testAccCheckStorageBucketExists(t *testing.T, n string, bucketName string, bucket *storage.Bucket) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] diff --git a/google/resource_storage_default_object_acl_test.go b/google/resource_storage_default_object_acl_test.go index 4f86a313545..352af801fb9 100644 --- a/google/resource_storage_default_object_acl_test.go +++ b/google/resource_storage_default_object_acl_test.go @@ -9,6 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/services/storage" ) func TestAccStorageDefaultObjectAcl_basic(t *testing.T) { @@ -143,7 +144,7 @@ func TestAccStorageDefaultObjectAcl_unordered(t *testing.T) { func testAccCheckGoogleStorageDefaultObjectAcl(t *testing.T, bucket, roleEntityS string) resource.TestCheckFunc { return func(s *terraform.State) error { - roleEntity, _ := getRoleEntityPair(roleEntityS) + roleEntity, _ := storage.GetRoleEntityPair(roleEntityS) config := GoogleProviderConfig(t) res, err := config.NewStorageClient(config.UserAgent).DefaultObjectAccessControls.Get(bucket, @@ -184,7 +185,7 @@ func testAccStorageDefaultObjectAclDestroyProducer(t *testing.T) func(s *terrafo func testAccCheckGoogleStorageDefaultObjectAclDelete(t *testing.T, bucket, roleEntityS string) resource.TestCheckFunc { return func(s *terraform.State) error { - roleEntity, _ := getRoleEntityPair(roleEntityS) + roleEntity, _ := storage.GetRoleEntityPair(roleEntityS) config := GoogleProviderConfig(t) _, err := config.NewStorageClient(config.UserAgent).DefaultObjectAccessControls.Get(bucket, roleEntity.Entity).Do() diff --git a/google/resource_storage_notification.go b/google/resource_storage_notification.go index f9e648b71db..2bbab65420f 100644 --- a/google/resource_storage_notification.go +++ b/google/resource_storage_notification.go @@ -3,196 +3,9 @@ package google import ( - "fmt" - "strings" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - - "github.com/hashicorp/terraform-provider-google/google/services/pubsub" - "github.com/hashicorp/terraform-provider-google/google/tpgresource" - transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" - "google.golang.org/api/storage/v1" + "github.com/hashicorp/terraform-provider-google/google/services/storage" ) -func ResourceStorageNotification() *schema.Resource { - return &schema.Resource{ - Create: resourceStorageNotificationCreate, - Read: resourceStorageNotificationRead, - Delete: resourceStorageNotificationDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "bucket": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the bucket.`, - }, - - "payload_format": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice([]string{"JSON_API_V1", "NONE"}, false), - Description: `The desired content of the Payload. One of "JSON_API_V1" or "NONE".`, - }, - - "topic": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, - Description: `The Cloud Pub/Sub topic to which this subscription publishes. Expects either the topic name, assumed to belong to the default GCP provider project, or the project-level name, i.e. projects/my-gcp-project/topics/my-topic or my-topic. If the project is not set in the provider, you will need to use the project-level name.`, - }, - - "custom_attributes": { - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Description: ` A set of key/value attribute pairs to attach to each Cloud Pub/Sub message published for this notification subscription`, - }, - - "event_types": { - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{ - "OBJECT_FINALIZE", "OBJECT_METADATA_UPDATE", "OBJECT_DELETE", "OBJECT_ARCHIVE"}, - false), - }, - Description: `List of event type filters for this notification config. If not specified, Cloud Storage will send notifications for all event types. The valid types are: "OBJECT_FINALIZE", "OBJECT_METADATA_UPDATE", "OBJECT_DELETE", "OBJECT_ARCHIVE"`, - }, - - "object_name_prefix": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Specifies a prefix path filter for this notification config. Cloud Storage will only send notifications for objects in this bucket whose names begin with the specified prefix.`, - }, - - "notification_id": { - Type: schema.TypeString, - Computed: true, - Description: `The ID of the created notification.`, - }, - - "self_link": { - Type: schema.TypeString, - Computed: true, - Description: `The URI of the created resource.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceStorageNotificationCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*transport_tpg.Config) - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - bucket := d.Get("bucket").(string) - - topicName := d.Get("topic").(string) - computedTopicName := pubsub.GetComputedTopicName("", topicName) - if computedTopicName != topicName { - project, err := tpgresource.GetProject(d, config) - if err != nil { - return err - } - computedTopicName = pubsub.GetComputedTopicName(project, topicName) - } - - storageNotification := &storage.Notification{ - CustomAttributes: tpgresource.ExpandStringMap(d, "custom_attributes"), - EventTypes: tpgresource.ConvertStringSet(d.Get("event_types").(*schema.Set)), - ObjectNamePrefix: d.Get("object_name_prefix").(string), - PayloadFormat: d.Get("payload_format").(string), - Topic: computedTopicName, - } - - res, err := config.NewStorageClient(userAgent).Notifications.Insert(bucket, storageNotification).Do() - if err != nil { - return fmt.Errorf("Error creating notification config for bucket %s: %v", bucket, err) - } - - d.SetId(fmt.Sprintf("%s/notificationConfigs/%s", bucket, res.Id)) - - return resourceStorageNotificationRead(d, meta) -} - -func resourceStorageNotificationRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*transport_tpg.Config) - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - bucket, notificationID := resourceStorageNotificationParseID(d.Id()) - - res, err := config.NewStorageClient(userAgent).Notifications.Get(bucket, notificationID).Do() - if err != nil { - return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Notification configuration %s for bucket %s", notificationID, bucket)) - } - - if err := d.Set("bucket", bucket); err != nil { - return fmt.Errorf("Error setting bucket: %s", err) - } - if err := d.Set("payload_format", res.PayloadFormat); err != nil { - return fmt.Errorf("Error setting payload_format: %s", err) - } - if err := d.Set("topic", res.Topic); err != nil { - return fmt.Errorf("Error setting topic: %s", err) - } - if err := d.Set("object_name_prefix", res.ObjectNamePrefix); err != nil { - return fmt.Errorf("Error setting object_name_prefix: %s", err) - } - if err := d.Set("event_types", res.EventTypes); err != nil { - return fmt.Errorf("Error setting event_types: %s", err) - } - if err := d.Set("notification_id", notificationID); err != nil { - return fmt.Errorf("Error setting notification_id: %s", err) - } - if err := d.Set("self_link", res.SelfLink); err != nil { - return fmt.Errorf("Error setting self_link: %s", err) - } - if err := d.Set("custom_attributes", res.CustomAttributes); err != nil { - return fmt.Errorf("Error setting custom_attributes: %s", err) - } - - return nil -} - -func resourceStorageNotificationDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*transport_tpg.Config) - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - bucket, notificationID := resourceStorageNotificationParseID(d.Id()) - - err = config.NewStorageClient(userAgent).Notifications.Delete(bucket, notificationID).Do() - if err != nil { - return fmt.Errorf("Error deleting notification configuration %s for bucket %s: %v", notificationID, bucket, err) - } - - return nil -} - func resourceStorageNotificationParseID(id string) (string, string) { - //bucket, NotificationID - parts := strings.Split(id, "/") - - return parts[0], parts[2] + return storage.ResourceStorageNotificationParseID(id) } diff --git a/google/resource_storage_notification_test.go b/google/resource_storage_notification_test.go index 8a168e3de91..a75ae192433 100644 --- a/google/resource_storage_notification_test.go +++ b/google/resource_storage_notification_test.go @@ -9,6 +9,7 @@ import ( "testing" "github.com/hashicorp/terraform-provider-google/google/acctest" + tpgstorage "github.com/hashicorp/terraform-provider-google/google/services/storage" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" @@ -115,7 +116,7 @@ func testAccStorageNotificationDestroyProducer(t *testing.T) func(s *terraform.S continue } - bucket, notificationID := resourceStorageNotificationParseID(rs.Primary.ID) + bucket, notificationID := tpgstorage.ResourceStorageNotificationParseID(rs.Primary.ID) _, err := config.NewStorageClient(config.UserAgent).Notifications.Get(bucket, notificationID).Do() if err == nil { @@ -140,7 +141,7 @@ func testAccCheckStorageNotificationExists(t *testing.T, resource string, notifi config := GoogleProviderConfig(t) - bucket, notificationID := resourceStorageNotificationParseID(rs.Primary.ID) + bucket, notificationID := tpgstorage.ResourceStorageNotificationParseID(rs.Primary.ID) found, err := config.NewStorageClient(config.UserAgent).Notifications.Get(bucket, notificationID).Do() if err != nil { diff --git a/google/resource_storage_object_acl_test.go b/google/resource_storage_object_acl_test.go index 4380d450067..cbfdb7d3ff8 100644 --- a/google/resource_storage_object_acl_test.go +++ b/google/resource_storage_object_acl_test.go @@ -13,6 +13,7 @@ import ( "testing" "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/services/storage" transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -373,7 +374,7 @@ func TestAccStorageObjectAcl_noOwner(t *testing.T) { func testAccCheckGoogleStorageObjectAcl(t *testing.T, bucket, object, roleEntityS string) resource.TestCheckFunc { return func(s *terraform.State) error { - roleEntity, _ := getRoleEntityPair(roleEntityS) + roleEntity, _ := storage.GetRoleEntityPair(roleEntityS) config := GoogleProviderConfig(t) res, err := config.NewStorageClient(config.UserAgent).ObjectAccessControls.Get(bucket, @@ -393,7 +394,7 @@ func testAccCheckGoogleStorageObjectAcl(t *testing.T, bucket, object, roleEntity func testAccCheckGoogleStorageObjectAclDelete(t *testing.T, bucket, object, roleEntityS string) resource.TestCheckFunc { return func(s *terraform.State) error { - roleEntity, _ := getRoleEntityPair(roleEntityS) + roleEntity, _ := storage.GetRoleEntityPair(roleEntityS) config := GoogleProviderConfig(t) _, err := config.NewStorageClient(config.UserAgent).ObjectAccessControls.Get(bucket, diff --git a/google/resource_usage_export_bucket.go b/google/services/compute/resource_usage_export_bucket.go similarity index 93% rename from google/resource_usage_export_bucket.go rename to google/services/compute/resource_usage_export_bucket.go index 3a8426a4ac5..48436da9fda 100644 --- a/google/resource_usage_export_bucket.go +++ b/google/services/compute/resource_usage_export_bucket.go @@ -1,13 +1,12 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package compute import ( "fmt" "log" "time" - tpgcompute "github.com/hashicorp/terraform-provider-google/google/services/compute" "github.com/hashicorp/terraform-provider-google/google/tpgresource" transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" @@ -110,7 +109,7 @@ func resourceProjectUsageBucketCreate(d *schema.ResourceData, meta interface{}) return err } d.SetId(project) - err = tpgcompute.ComputeOperationWaitTime(config, op, project, "Setting usage export bucket.", userAgent, d.Timeout(schema.TimeoutCreate)) + err = ComputeOperationWaitTime(config, op, project, "Setting usage export bucket.", userAgent, d.Timeout(schema.TimeoutCreate)) if err != nil { d.SetId("") return err @@ -140,7 +139,7 @@ func resourceProjectUsageBucketDelete(d *schema.ResourceData, meta interface{}) return err } - err = tpgcompute.ComputeOperationWaitTime(config, op, project, + err = ComputeOperationWaitTime(config, op, project, "Setting usage export bucket to nil, automatically disabling usage export.", userAgent, d.Timeout(schema.TimeoutDelete)) if err != nil { return err diff --git a/google/resource_dataflow_flex_template_job.go b/google/services/dataflow/resource_dataflow_flex_template_job.go similarity index 80% rename from google/resource_dataflow_flex_template_job.go rename to google/services/dataflow/resource_dataflow_flex_template_job.go index 9821d4cf9b2..6ca5ed3366f 100644 --- a/google/resource_dataflow_flex_template_job.go +++ b/google/services/dataflow/resource_dataflow_flex_template_job.go @@ -1,3 +1,3 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package dataflow diff --git a/google/services/dataflow/resource_dataflow_job.go b/google/services/dataflow/resource_dataflow_job.go new file mode 100644 index 00000000000..1815fff0ec6 --- /dev/null +++ b/google/services/dataflow/resource_dataflow_job.go @@ -0,0 +1,668 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package dataflow + +import ( + "context" + "fmt" + "log" + "strings" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + dataflow "google.golang.org/api/dataflow/v1b3" + "google.golang.org/api/googleapi" +) + +const resourceDataflowJobGoogleProvidedLabelPrefix = "labels.goog-dataflow-provided" + +var DataflowTerminatingStatesMap = map[string]struct{}{ + "JOB_STATE_CANCELLING": {}, + "JOB_STATE_DRAINING": {}, +} + +var DataflowTerminalStatesMap = map[string]struct{}{ + "JOB_STATE_DONE": {}, + "JOB_STATE_FAILED": {}, + "JOB_STATE_CANCELLED": {}, + "JOB_STATE_UPDATED": {}, + "JOB_STATE_DRAINED": {}, +} + +func resourceDataflowJobLabelDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + // Example Diff: "labels.goog-dataflow-provided-template-version": "word_count" => "" + if strings.HasPrefix(k, resourceDataflowJobGoogleProvidedLabelPrefix) && new == "" { + // Suppress diff if field is a Google Dataflow-provided label key and has no explicitly set value in Config. + return true + } + + // Let diff be determined by labels (above) + if strings.HasPrefix(k, "labels.%") { + return true + } + + // For other keys, don't suppress diff. + return false +} + +func ResourceDataflowJob() *schema.Resource { + return &schema.Resource{ + Create: resourceDataflowJobCreate, + Read: resourceDataflowJobRead, + Update: resourceDataflowJobUpdateByReplacement, + Delete: resourceDataflowJobDelete, + Timeouts: &schema.ResourceTimeout{ + Update: schema.DefaultTimeout(10 * time.Minute), + }, + CustomizeDiff: customdiff.All( + resourceDataflowJobTypeCustomizeDiff, + ), + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + // ForceNew applies to both stream and batch jobs + ForceNew: true, + Description: `A unique name for the resource, required by Dataflow.`, + }, + + "template_gcs_path": { + Type: schema.TypeString, + Required: true, + Description: `The Google Cloud Storage path to the Dataflow job template.`, + }, + + "temp_gcs_location": { + Type: schema.TypeString, + Required: true, + Description: `A writeable location on Google Cloud Storage for the Dataflow job to dump its temporary data.`, + }, + + "zone": { + Type: schema.TypeString, + Optional: true, + // ForceNew applies to both stream and batch jobs + ForceNew: true, + Description: `The zone in which the created job should run. If it is not provided, the provider zone is used.`, + }, + + "region": { + Type: schema.TypeString, + Optional: true, + // ForceNew applies to both stream and batch jobs + ForceNew: true, + Description: `The region in which the created job should run.`, + }, + + "max_workers": { + Type: schema.TypeInt, + Optional: true, + // ForceNew applies to both stream and batch jobs + ForceNew: true, + Description: `The number of workers permitted to work on the job. More workers may improve processing speed at additional cost.`, + }, + + "parameters": { + Type: schema.TypeMap, + Optional: true, + Description: `Key/Value pairs to be passed to the Dataflow job (as used in the template).`, + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + Computed: true, + DiffSuppressFunc: resourceDataflowJobLabelDiffSuppress, + Description: `User labels to be specified for the job. Keys and values should follow the restrictions specified in the labeling restrictions page. NOTE: Google-provided Dataflow templates often provide default labels that begin with goog-dataflow-provided. Unless explicitly set in config, these labels will be ignored to prevent diffs on re-apply.`, + }, + + "transform_name_mapping": { + Type: schema.TypeMap, + Optional: true, + Description: `Only applicable when updating a pipeline. Map of transform name prefixes of the job to be replaced with the corresponding name prefixes of the new job.`, + }, + + "on_delete": { + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"cancel", "drain"}, false), + Optional: true, + Default: "drain", + Description: `One of "drain" or "cancel". Specifies behavior of deletion during terraform destroy.`, + }, + + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + // ForceNew applies to both stream and batch jobs + ForceNew: true, + Description: `The project in which the resource belongs.`, + }, + + "state": { + Type: schema.TypeString, + Computed: true, + Description: `The current state of the resource, selected from the JobState enum.`, + }, + "type": { + Type: schema.TypeString, + Computed: true, + Description: `The type of this job, selected from the JobType enum.`, + }, + "service_account_email": { + Type: schema.TypeString, + Optional: true, + Description: `The Service Account email used to create the job.`, + }, + + "network": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The network to which VMs will be assigned. If it is not provided, "default" will be used.`, + }, + + "subnetwork": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The subnetwork to which VMs will be assigned. Should be of the form "regions/REGION/subnetworks/SUBNETWORK".`, + }, + + "machine_type": { + Type: schema.TypeString, + Optional: true, + Description: `The machine type to use for the job.`, + }, + + "kms_key_name": { + Type: schema.TypeString, + Optional: true, + Description: `The name for the Cloud KMS key for the job. Key format is: projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY`, + }, + + "ip_configuration": { + Type: schema.TypeString, + Optional: true, + Description: `The configuration for VM IPs. Options are "WORKER_IP_PUBLIC" or "WORKER_IP_PRIVATE".`, + ValidateFunc: validation.StringInSlice([]string{"WORKER_IP_PUBLIC", "WORKER_IP_PRIVATE", ""}, false), + }, + + "additional_experiments": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Description: `List of experiments that should be used by the job. An example value is ["enable_stackdriver_agent_metrics"].`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + + "job_id": { + Type: schema.TypeString, + Computed: true, + Description: `The unique ID of this job.`, + }, + + "enable_streaming_engine": { + Type: schema.TypeBool, + Optional: true, + Description: `Indicates if the job should use the streaming engine feature.`, + }, + + "skip_wait_on_job_termination": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: `If true, treat DRAINING and CANCELLING as terminal job states and do not wait for further changes before removing from terraform state and moving on. WARNING: this will lead to job name conflicts if you do not ensure that the job names are different, e.g. by embedding a release ID or by using a random_id.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceDataflowJobTypeCustomizeDiff(_ context.Context, d *schema.ResourceDiff, meta interface{}) error { + // All non-virtual fields are ForceNew for batch jobs + if d.Get("type") == "JOB_TYPE_BATCH" { + resourceSchema := ResourceDataflowJob().Schema + for field := range resourceSchema { + if field == "on_delete" { + continue + } + // Labels map will likely have suppressed changes, so we check each key instead of the parent field + if field == "labels" { + if err := resourceDataflowJobIterateMapForceNew(field, d); err != nil { + return err + } + } else if d.HasChange(field) { + if err := d.ForceNew(field); err != nil { + return err + } + } + } + } + + return nil +} + +// return true if a job is in a terminal state, OR if a job is in a +// terminating state and skipWait is true +func shouldStopDataflowJobDeleteQuery(state string, skipWait bool) bool { + _, stopQuery := DataflowTerminalStatesMap[state] + if !stopQuery && skipWait { + _, stopQuery = DataflowTerminatingStatesMap[state] + } + return stopQuery +} + +func resourceDataflowJobCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return err + } + + params := tpgresource.ExpandStringMap(d, "parameters") + + env, err := resourceDataflowJobSetupEnv(d, config) + if err != nil { + return err + } + + request := dataflow.CreateJobFromTemplateRequest{ + JobName: d.Get("name").(string), + GcsPath: d.Get("template_gcs_path").(string), + Parameters: params, + Environment: &env, + } + + job, err := resourceDataflowJobCreateJob(config, project, region, userAgent, &request) + if err != nil { + return err + } + d.SetId(job.Id) + + return resourceDataflowJobRead(d, meta) +} + +func resourceDataflowJobRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return err + } + + id := d.Id() + + job, err := resourceDataflowJobGetJob(config, project, region, userAgent, id) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Dataflow job %s", id)) + } + + if err := d.Set("job_id", job.Id); err != nil { + return fmt.Errorf("Error setting job_id: %s", err) + } + if err := d.Set("state", job.CurrentState); err != nil { + return fmt.Errorf("Error setting state: %s", err) + } + if err := d.Set("name", job.Name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + if err := d.Set("type", job.Type); err != nil { + return fmt.Errorf("Error setting type: %s", err) + } + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("labels", job.Labels); err != nil { + return fmt.Errorf("Error setting labels: %s", err) + } + if err := d.Set("kms_key_name", job.Environment.ServiceKmsKeyName); err != nil { + return fmt.Errorf("Error setting kms_key_name: %s", err) + } + + sdkPipelineOptions, err := tpgresource.ConvertToMap(job.Environment.SdkPipelineOptions) + if err != nil { + return err + } + optionsMap := sdkPipelineOptions["options"].(map[string]interface{}) + if err := d.Set("template_gcs_path", optionsMap["templateLocation"]); err != nil { + return fmt.Errorf("Error setting template_gcs_path: %s", err) + } + if err := d.Set("temp_gcs_location", optionsMap["tempLocation"]); err != nil { + return fmt.Errorf("Error setting temp_gcs_location: %s", err) + } + if err := d.Set("machine_type", optionsMap["machineType"]); err != nil { + return fmt.Errorf("Error setting machine_type: %s", err) + } + if err := d.Set("network", optionsMap["network"]); err != nil { + return fmt.Errorf("Error setting network: %s", err) + } + if err := d.Set("service_account_email", optionsMap["serviceAccountEmail"]); err != nil { + return fmt.Errorf("Error setting service_account_email: %s", err) + } + + if ok := shouldStopDataflowJobDeleteQuery(job.CurrentState, d.Get("skip_wait_on_job_termination").(bool)); ok { + log.Printf("[DEBUG] Removing resource '%s' because it is in state %s.\n", job.Name, job.CurrentState) + d.SetId("") + return nil + } + d.SetId(job.Id) + + return nil +} + +// Stream update method. Batch job changes should have been set to ForceNew via custom diff +func resourceDataflowJobUpdateByReplacement(d *schema.ResourceData, meta interface{}) error { + // Don't send an update request if only virtual fields have changes + if resourceDataflowJobIsVirtualUpdate(d, ResourceDataflowJob().Schema) { + return nil + } + + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return err + } + + params := tpgresource.ExpandStringMap(d, "parameters") + tnamemapping := tpgresource.ExpandStringMap(d, "transform_name_mapping") + + env, err := resourceDataflowJobSetupEnv(d, config) + if err != nil { + return err + } + + request := dataflow.LaunchTemplateParameters{ + JobName: d.Get("name").(string), + Parameters: params, + TransformNameMapping: tnamemapping, + Environment: &env, + Update: true, + } + + var response *dataflow.LaunchTemplateResponse + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (updateErr error) { + response, updateErr = resourceDataflowJobLaunchTemplate(config, project, region, userAgent, d.Get("template_gcs_path").(string), &request) + return updateErr + }, + Timeout: time.Minute * time.Duration(5), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsDataflowJobUpdateRetryableError}, + }) + if err != nil { + return err + } + + if err := waitForDataflowJobToBeUpdated(d, config, response.Job.Id, userAgent, d.Timeout(schema.TimeoutUpdate)); err != nil { + return fmt.Errorf("Error updating job with job ID %q: %v", d.Id(), err) + } + + d.SetId(response.Job.Id) + + return resourceDataflowJobRead(d, meta) +} + +func resourceDataflowJobDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return err + } + + id := d.Id() + + requestedState, err := resourceDataflowJobMapRequestedState(d.Get("on_delete").(string)) + if err != nil { + return err + } + + // Retry updating the state while the job is not ready to be canceled/drained. + err = resource.Retry(time.Minute*time.Duration(15), func() *resource.RetryError { + // To terminate a dataflow job, we update the job with a requested + // terminal state. + job := &dataflow.Job{ + RequestedState: requestedState, + } + + _, updateErr := resourceDataflowJobUpdateJob(config, project, region, userAgent, id, job) + if updateErr != nil { + gerr, isGoogleErr := updateErr.(*googleapi.Error) + if !isGoogleErr { + // If we have an error and it's not a google-specific error, we should go ahead and return. + return resource.NonRetryableError(updateErr) + } + + if strings.Contains(gerr.Message, "not yet ready for canceling") { + // Retry cancelling job if it's not ready. + // Sleep to avoid hitting update quota with repeated attempts. + time.Sleep(5 * time.Second) + return resource.RetryableError(updateErr) + } + + if strings.Contains(gerr.Message, "Job has terminated") { + // Job has already been terminated, skip. + return nil + } + } + + return nil + }) + if err != nil { + return err + } + + // Wait for state to reach terminal state (canceled/drained/done plus cancelling/draining if skipWait) + skipWait := d.Get("skip_wait_on_job_termination").(bool) + ok := shouldStopDataflowJobDeleteQuery(d.Get("state").(string), skipWait) + for !ok { + log.Printf("[DEBUG] Waiting for job with job state %q to terminate...", d.Get("state").(string)) + time.Sleep(5 * time.Second) + + err = resourceDataflowJobRead(d, meta) + if err != nil { + return fmt.Errorf("Error while reading job to see if it was properly terminated: %v", err) + } + ok = shouldStopDataflowJobDeleteQuery(d.Get("state").(string), skipWait) + } + + // Only remove the job from state if it's actually successfully hit a final state. + if ok = shouldStopDataflowJobDeleteQuery(d.Get("state").(string), skipWait); ok { + log.Printf("[DEBUG] Removing dataflow job with final state %q", d.Get("state").(string)) + d.SetId("") + return nil + } + return fmt.Errorf("Unable to cancel the dataflow job '%s' - final state was %q.", d.Id(), d.Get("state").(string)) +} + +func resourceDataflowJobMapRequestedState(policy string) (string, error) { + switch policy { + case "cancel": + return "JOB_STATE_CANCELLED", nil + case "drain": + return "JOB_STATE_DRAINING", nil + default: + return "", fmt.Errorf("Invalid `on_delete` policy: %s", policy) + } +} + +func resourceDataflowJobCreateJob(config *transport_tpg.Config, project, region, userAgent string, request *dataflow.CreateJobFromTemplateRequest) (*dataflow.Job, error) { + if region == "" { + return config.NewDataflowClient(userAgent).Projects.Templates.Create(project, request).Do() + } + return config.NewDataflowClient(userAgent).Projects.Locations.Templates.Create(project, region, request).Do() +} + +func resourceDataflowJobGetJob(config *transport_tpg.Config, project, region, userAgent string, id string) (*dataflow.Job, error) { + if region == "" { + return config.NewDataflowClient(userAgent).Projects.Jobs.Get(project, id).View("JOB_VIEW_ALL").Do() + } + return config.NewDataflowClient(userAgent).Projects.Locations.Jobs.Get(project, region, id).View("JOB_VIEW_ALL").Do() +} + +func resourceDataflowJobUpdateJob(config *transport_tpg.Config, project, region, userAgent string, id string, job *dataflow.Job) (*dataflow.Job, error) { + if region == "" { + return config.NewDataflowClient(userAgent).Projects.Jobs.Update(project, id, job).Do() + } + return config.NewDataflowClient(userAgent).Projects.Locations.Jobs.Update(project, region, id, job).Do() +} + +func resourceDataflowJobLaunchTemplate(config *transport_tpg.Config, project, region, userAgent string, gcsPath string, request *dataflow.LaunchTemplateParameters) (*dataflow.LaunchTemplateResponse, error) { + if region == "" { + return config.NewDataflowClient(userAgent).Projects.Templates.Launch(project, request).GcsPath(gcsPath).Do() + } + return config.NewDataflowClient(userAgent).Projects.Locations.Templates.Launch(project, region, request).GcsPath(gcsPath).Do() +} + +func resourceDataflowJobSetupEnv(d *schema.ResourceData, config *transport_tpg.Config) (dataflow.RuntimeEnvironment, error) { + zone, _ := tpgresource.GetZone(d, config) + + labels := tpgresource.ExpandStringMap(d, "labels") + + additionalExperiments := tpgresource.ConvertStringSet(d.Get("additional_experiments").(*schema.Set)) + + env := dataflow.RuntimeEnvironment{ + MaxWorkers: int64(d.Get("max_workers").(int)), + Network: d.Get("network").(string), + ServiceAccountEmail: d.Get("service_account_email").(string), + Subnetwork: d.Get("subnetwork").(string), + TempLocation: d.Get("temp_gcs_location").(string), + MachineType: d.Get("machine_type").(string), + KmsKeyName: d.Get("kms_key_name").(string), + IpConfiguration: d.Get("ip_configuration").(string), + EnableStreamingEngine: d.Get("enable_streaming_engine").(bool), + AdditionalUserLabels: labels, + Zone: zone, + AdditionalExperiments: additionalExperiments, + } + return env, nil +} + +func resourceDataflowJobIterateMapForceNew(mapKey string, d *schema.ResourceDiff) error { + obj := d.Get(mapKey).(map[string]interface{}) + for k := range obj { + entrySchemaKey := mapKey + "." + k + if d.HasChange(entrySchemaKey) { + // ForceNew must be called on the parent map to trigger + if err := d.ForceNew(mapKey); err != nil { + return err + } + break + } + } + return nil +} + +func resourceDataflowJobIterateMapHasChange(mapKey string, d *schema.ResourceData) bool { + obj := d.Get(mapKey).(map[string]interface{}) + for k := range obj { + entrySchemaKey := mapKey + "." + k + if d.HasChange(entrySchemaKey) { + return true + } + } + return false +} + +func resourceDataflowJobIsVirtualUpdate(d *schema.ResourceData, resourceSchema map[string]*schema.Schema) bool { + // on_delete is the only virtual field + if d.HasChange("on_delete") { + for field := range resourceSchema { + if field == "on_delete" { + continue + } + // Labels map will likely have suppressed changes, so we check each key instead of the parent field + if (field == "labels" && resourceDataflowJobIterateMapHasChange(field, d)) || + (field != "labels" && d.HasChange(field)) { + return false + } + } + // on_delete is changing, but nothing else + return true + } + + return false +} + +func waitForDataflowJobToBeUpdated(d *schema.ResourceData, config *transport_tpg.Config, replacementJobID, userAgent string, timeout time.Duration) error { + return resource.Retry(timeout, func() *resource.RetryError { + project, err := tpgresource.GetProject(d, config) + if err != nil { + return resource.NonRetryableError(err) + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return resource.NonRetryableError(err) + } + + replacementJob, err := resourceDataflowJobGetJob(config, project, region, userAgent, replacementJobID) + if err != nil { + if transport_tpg.IsRetryableError(err, nil, nil) { + return resource.RetryableError(err) + } + return resource.NonRetryableError(err) + } + + state := replacementJob.CurrentState + switch state { + case "", "JOB_STATE_PENDING": + return resource.RetryableError(fmt.Errorf("the replacement job with ID %q has pending state %q.", replacementJobID, state)) + case "JOB_STATE_FAILED": + return resource.NonRetryableError(fmt.Errorf("the replacement job with ID %q failed with state %q.", replacementJobID, state)) + default: + log.Printf("[DEBUG] the replacement job with ID %q has state %q.", replacementJobID, state) + return nil + } + }) +} diff --git a/google/services/dataproc/iam_dataproc_cluster.go b/google/services/dataproc/iam_dataproc_cluster.go new file mode 100644 index 00000000000..33d1f063f8f --- /dev/null +++ b/google/services/dataproc/iam_dataproc_cluster.go @@ -0,0 +1,144 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package dataproc + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "google.golang.org/api/cloudresourcemanager/v1" + "google.golang.org/api/dataproc/v1" +) + +var IamDataprocClusterSchema = map[string]*schema.Schema{ + "cluster": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "region": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, +} + +type DataprocClusterIamUpdater struct { + project string + region string + cluster string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func NewDataprocClusterUpdater(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + project, err := tpgresource.GetProject(d, config) + if err != nil { + return nil, err + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return nil, err + } + + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("region", region); err != nil { + return nil, fmt.Errorf("Error setting region: %s", err) + } + + return &DataprocClusterIamUpdater{ + project: project, + region: region, + cluster: d.Get("cluster").(string), + d: d, + Config: config, + }, nil +} + +func DataprocClusterIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + fv, err := tpgresource.ParseRegionalFieldValue("clusters", d.Id(), "project", "region", "zone", d, config, true) + if err != nil { + return err + } + + if err := d.Set("project", fv.Project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("region", fv.Region); err != nil { + return fmt.Errorf("Error setting region: %s", err) + } + if err := d.Set("cluster", fv.Name); err != nil { + return fmt.Errorf("Error setting cluster: %s", err) + } + + // Explicitly set the id so imported resources have the same ID format as non-imported ones. + d.SetId(fv.RelativeLink()) + return nil +} + +func (u *DataprocClusterIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + req := &dataproc.GetIamPolicyRequest{} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + p, err := u.Config.NewDataprocClient(userAgent).Projects.Regions.Clusters.GetIamPolicy(u.GetResourceId(), req).Do() + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + cloudResourcePolicy, err := dataprocToResourceManagerPolicy(p) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return cloudResourcePolicy, nil +} + +func (u *DataprocClusterIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + dataprocPolicy, err := resourceManagerToDataprocPolicy(policy) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + req := &dataproc.SetIamPolicyRequest{Policy: dataprocPolicy} + _, err = u.Config.NewDataprocClient(userAgent).Projects.Regions.Clusters.SetIamPolicy(u.GetResourceId(), req).Do() + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *DataprocClusterIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/regions/%s/clusters/%s", u.project, u.region, u.cluster) +} + +func (u *DataprocClusterIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-dataproc-cluster-%s-%s-%s", u.project, u.region, u.cluster) +} + +func (u *DataprocClusterIamUpdater) DescribeResource() string { + return fmt.Sprintf("Dataproc Cluster %s/%s/%s", u.project, u.region, u.cluster) +} diff --git a/google/services/dataproc/iam_dataproc_job.go b/google/services/dataproc/iam_dataproc_job.go new file mode 100644 index 00000000000..1e3ff90ef44 --- /dev/null +++ b/google/services/dataproc/iam_dataproc_job.go @@ -0,0 +1,162 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package dataproc + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "google.golang.org/api/cloudresourcemanager/v1" + "google.golang.org/api/dataproc/v1" +) + +var IamDataprocJobSchema = map[string]*schema.Schema{ + "job_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "region": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, +} + +type DataprocJobIamUpdater struct { + project string + region string + jobId string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func NewDataprocJobUpdater(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + project, err := tpgresource.GetProject(d, config) + if err != nil { + return nil, err + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return nil, err + } + + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("region", region); err != nil { + return nil, fmt.Errorf("Error setting region: %s", err) + } + + return &DataprocJobIamUpdater{ + project: project, + region: region, + jobId: d.Get("job_id").(string), + d: d, + Config: config, + }, nil +} + +func DataprocJobIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + fv, err := tpgresource.ParseRegionalFieldValue("jobs", d.Id(), "project", "region", "zone", d, config, true) + if err != nil { + return err + } + + if err := d.Set("job_id", fv.Name); err != nil { + return fmt.Errorf("Error setting job_id: %s", err) + } + if err := d.Set("project", fv.Project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("region", fv.Region); err != nil { + return fmt.Errorf("Error setting region: %s", err) + } + + // Explicitly set the id so imported resources have the same ID format as non-imported ones. + d.SetId(fv.RelativeLink()) + return nil +} + +func (u *DataprocJobIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + req := &dataproc.GetIamPolicyRequest{} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + p, err := u.Config.NewDataprocClient(userAgent).Projects.Regions.Jobs.GetIamPolicy(u.GetResourceId(), req).Do() + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + cloudResourcePolicy, err := dataprocToResourceManagerPolicy(p) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return cloudResourcePolicy, nil +} + +func (u *DataprocJobIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + dataprocPolicy, err := resourceManagerToDataprocPolicy(policy) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + req := &dataproc.SetIamPolicyRequest{Policy: dataprocPolicy} + _, err = u.Config.NewDataprocClient(userAgent).Projects.Regions.Jobs.SetIamPolicy(u.GetResourceId(), req).Do() + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *DataprocJobIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/regions/%s/jobs/%s", u.project, u.region, u.jobId) +} + +func (u *DataprocJobIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-dataproc-job-%s-%s-%s", u.project, u.region, u.jobId) +} + +func (u *DataprocJobIamUpdater) DescribeResource() string { + return fmt.Sprintf("Dataproc Job %s/%s/%s", u.project, u.region, u.jobId) +} + +func resourceManagerToDataprocPolicy(p *cloudresourcemanager.Policy) (*dataproc.Policy, error) { + out := &dataproc.Policy{} + err := tpgresource.Convert(p, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a dataproc policy to a cloudresourcemanager policy: {{err}}", err) + } + return out, nil +} + +func dataprocToResourceManagerPolicy(p *dataproc.Policy) (*cloudresourcemanager.Policy, error) { + out := &cloudresourcemanager.Policy{} + err := tpgresource.Convert(p, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a cloudresourcemanager policy to a dataproc policy: {{err}}", err) + } + return out, nil +} diff --git a/google/resource_dataproc_cluster.go b/google/services/dataproc/resource_dataproc_cluster.go similarity index 99% rename from google/resource_dataproc_cluster.go rename to google/services/dataproc/resource_dataproc_cluster.go index 3fe7ad7363d..2194cd2fd53 100644 --- a/google/resource_dataproc_cluster.go +++ b/google/services/dataproc/resource_dataproc_cluster.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package dataproc import ( "errors" @@ -1345,7 +1345,7 @@ func resourceDataprocClusterCreate(d *schema.ResourceData, meta interface{}) err d.SetId(fmt.Sprintf("projects/%s/regions/%s/clusters/%s", project, region, cluster.ClusterName)) // Wait until it's created - waitErr := dataprocClusterOperationWait(config, op, "creating Dataproc cluster", userAgent, d.Timeout(schema.TimeoutCreate)) + waitErr := DataprocClusterOperationWait(config, op, "creating Dataproc cluster", userAgent, d.Timeout(schema.TimeoutCreate)) if waitErr != nil { // The resource didn't actually create // Note that we do not remove the ID here - this resource tends to leave @@ -2043,7 +2043,7 @@ func resourceDataprocClusterUpdate(d *schema.ResourceData, meta interface{}) err } // Wait until it's updated - waitErr := dataprocClusterOperationWait(config, op, "updating Dataproc cluster ", userAgent, d.Timeout(schema.TimeoutUpdate)) + waitErr := DataprocClusterOperationWait(config, op, "updating Dataproc cluster ", userAgent, d.Timeout(schema.TimeoutUpdate)) if waitErr != nil { return waitErr } @@ -2566,7 +2566,7 @@ func resourceDataprocClusterDelete(d *schema.ResourceData, meta interface{}) err } // Wait until it's deleted - waitErr := dataprocClusterOperationWait(config, op, "deleting Dataproc cluster", userAgent, d.Timeout(schema.TimeoutDelete)) + waitErr := DataprocClusterOperationWait(config, op, "deleting Dataproc cluster", userAgent, d.Timeout(schema.TimeoutDelete)) if waitErr != nil { return waitErr } diff --git a/google/services/dataproc/resource_dataproc_cluster_internal_test.go b/google/services/dataproc/resource_dataproc_cluster_internal_test.go new file mode 100644 index 00000000000..a72d22dada5 --- /dev/null +++ b/google/services/dataproc/resource_dataproc_cluster_internal_test.go @@ -0,0 +1,123 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package dataproc + +import ( + "testing" +) + +func TestDataprocExtractInitTimeout(t *testing.T) { + t.Parallel() + + actual, err := extractInitTimeout("500s") + expected := 500 + if err != nil { + t.Fatalf("Expected %d, but got error %v", expected, err) + } + if actual != expected { + t.Fatalf("Expected %d, but got %d", expected, actual) + } +} + +func TestDataprocExtractInitTimeout_nonSeconds(t *testing.T) { + t.Parallel() + + actual, err := extractInitTimeout("5m") + expected := 300 + if err != nil { + t.Fatalf("Expected %d, but got error %v", expected, err) + } + if actual != expected { + t.Fatalf("Expected %d, but got %d", expected, actual) + } +} + +func TestDataprocExtractInitTimeout_empty(t *testing.T) { + t.Parallel() + + _, err := extractInitTimeout("") + expected := "time: invalid duration" + if err != nil && err.Error() != expected { + return + } + t.Fatalf("Expected an error with message '%s', but got %v", expected, err.Error()) +} + +func TestDataprocParseImageVersion(t *testing.T) { + t.Parallel() + + testCases := map[string]dataprocImageVersion{ + "1.2": {"1", "2", "", ""}, + "1.2.3": {"1", "2", "3", ""}, + "1.2.3rc": {"1", "2", "3rc", ""}, + "1.2-debian9": {"1", "2", "", "debian9"}, + "1.2.3-debian9": {"1", "2", "3", "debian9"}, + "1.2.3rc-debian9": {"1", "2", "3rc", "debian9"}, + } + + for v, expected := range testCases { + actual, err := parseDataprocImageVersion(v) + if actual.major != expected.major { + t.Errorf("parsing version %q returned error: %v", v, err) + } + if err != nil { + t.Errorf("parsing version %q returned error: %v", v, err) + } + if actual.minor != expected.minor { + t.Errorf("parsing version %q returned error: %v", v, err) + } + if actual.subminor != expected.subminor { + t.Errorf("parsing version %q returned error: %v", v, err) + } + if actual.osName != expected.osName { + t.Errorf("parsing version %q returned error: %v", v, err) + } + } + + errorTestCases := []string{ + "", + "1", + "notaversion", + "1-debian", + } + for _, v := range errorTestCases { + if _, err := parseDataprocImageVersion(v); err == nil { + t.Errorf("expected parsing invalid version %q to return error", v) + } + } +} + +func TestDataprocDiffSuppress(t *testing.T) { + t.Parallel() + + doSuppress := [][]string{ + {"1.3.10-debian9", "1.3"}, + {"1.3.10-debian9", "1.3-debian9"}, + {"1.3.10", "1.3"}, + {"1.3-debian9", "1.3"}, + } + + noSuppress := [][]string{ + {"1.3.10-debian9", "1.3.10-ubuntu"}, + {"1.3.10-debian9", "1.3.9-debian9"}, + {"1.3.10-debian9", "1.3-ubuntu"}, + {"1.3.10-debian9", "1.3.9"}, + {"1.3.10-debian9", "1.4"}, + {"1.3.10-debian9", "2.3"}, + {"1.3.10", "1.3.10-debian9"}, + {"1.3", "1.3.10"}, + {"1.3", "1.3.10-debian9"}, + {"1.3", "1.3-debian9"}, + } + + for _, tup := range doSuppress { + if !dataprocImageVersionDiffSuppress("", tup[0], tup[1], nil) { + t.Errorf("expected (old: %q, new: %q) to be suppressed", tup[0], tup[1]) + } + } + for _, tup := range noSuppress { + if dataprocImageVersionDiffSuppress("", tup[0], tup[1], nil) { + t.Errorf("expected (old: %q, new: %q) to not be suppressed", tup[0], tup[1]) + } + } +} diff --git a/google/resource_dataproc_job.go b/google/services/dataproc/resource_dataproc_job.go similarity index 97% rename from google/resource_dataproc_job.go rename to google/services/dataproc/resource_dataproc_job.go index 46e136d8326..095d1c167be 100644 --- a/google/resource_dataproc_job.go +++ b/google/services/dataproc/resource_dataproc_job.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package dataproc import ( "fmt" @@ -227,7 +227,7 @@ func resourceDataprocJobCreate(d *schema.ResourceData, meta interface{}) error { } if v, ok := d.GetOk("scheduling"); ok { - config := extractFirstMapConfig(v.([]interface{})) + config := tpgresource.ExtractFirstMapConfig(v.([]interface{})) submitReq.Job.Scheduling = expandJobScheduling(config) } @@ -236,37 +236,37 @@ func resourceDataprocJobCreate(d *schema.ResourceData, meta interface{}) error { } if v, ok := d.GetOk("pyspark_config"); ok { - config := extractFirstMapConfig(v.([]interface{})) + config := tpgresource.ExtractFirstMapConfig(v.([]interface{})) submitReq.Job.PysparkJob = expandPySparkJob(config) } if v, ok := d.GetOk("spark_config"); ok { - config := extractFirstMapConfig(v.([]interface{})) + config := tpgresource.ExtractFirstMapConfig(v.([]interface{})) submitReq.Job.SparkJob = expandSparkJob(config) } if v, ok := d.GetOk("hadoop_config"); ok { - config := extractFirstMapConfig(v.([]interface{})) + config := tpgresource.ExtractFirstMapConfig(v.([]interface{})) submitReq.Job.HadoopJob = expandHadoopJob(config) } if v, ok := d.GetOk("hive_config"); ok { - config := extractFirstMapConfig(v.([]interface{})) + config := tpgresource.ExtractFirstMapConfig(v.([]interface{})) submitReq.Job.HiveJob = expandHiveJob(config) } if v, ok := d.GetOk("pig_config"); ok { - config := extractFirstMapConfig(v.([]interface{})) + config := tpgresource.ExtractFirstMapConfig(v.([]interface{})) submitReq.Job.PigJob = expandPigJob(config) } if v, ok := d.GetOk("sparksql_config"); ok { - config := extractFirstMapConfig(v.([]interface{})) + config := tpgresource.ExtractFirstMapConfig(v.([]interface{})) submitReq.Job.SparkSqlJob = expandSparkSqlJob(config) } if v, ok := d.GetOk("presto_config"); ok { - config := extractFirstMapConfig(v.([]interface{})) + config := tpgresource.ExtractFirstMapConfig(v.([]interface{})) submitReq.Job.PrestoJob = expandPrestoJob(config) } @@ -278,7 +278,7 @@ func resourceDataprocJobCreate(d *schema.ResourceData, meta interface{}) error { } d.SetId(fmt.Sprintf("projects/%s/regions/%s/jobs/%s", project, region, job.Reference.JobId)) - waitErr := dataprocJobOperationWait(config, region, project, job.Reference.JobId, + waitErr := DataprocJobOperationWait(config, region, project, job.Reference.JobId, "Creating Dataproc job", userAgent, d.Timeout(schema.TimeoutCreate)) if waitErr != nil { return waitErr @@ -402,7 +402,7 @@ func resourceDataprocJobDelete(d *schema.ResourceData, meta interface{}) error { // at least not active _, _ = config.NewDataprocClient(userAgent).Projects.Regions.Jobs.Cancel(project, region, jobId, &dataproc.CancelJobRequest{}).Do() - waitErr := dataprocJobOperationWait(config, region, project, jobId, + waitErr := DataprocJobOperationWait(config, region, project, jobId, "Cancelling Dataproc job", userAgent, d.Timeout(schema.TimeoutDelete)) if waitErr != nil { return waitErr @@ -417,7 +417,7 @@ func resourceDataprocJobDelete(d *schema.ResourceData, meta interface{}) error { return err } - waitErr := dataprocDeleteOperationWait(config, region, project, jobId, + waitErr := DataprocDeleteOperationWait(config, region, project, jobId, "Deleting Dataproc job", userAgent, d.Timeout(schema.TimeoutDelete)) if waitErr != nil { return waitErr @@ -558,7 +558,7 @@ func expandPySparkJob(config map[string]interface{}) *dataproc.PySparkJob { job.Properties = tpgresource.ConvertStringMap(v.(map[string]interface{})) } if v, ok := config["logging_config"]; ok { - config := extractFirstMapConfig(v.([]interface{})) + config := tpgresource.ExtractFirstMapConfig(v.([]interface{})) job.LoggingConfig = expandLoggingConfig(config) } @@ -690,7 +690,7 @@ func expandSparkJob(config map[string]interface{}) *dataproc.SparkJob { job.Properties = tpgresource.ConvertStringMap(v.(map[string]interface{})) } if v, ok := config["logging_config"]; ok { - config := extractFirstMapConfig(v.([]interface{})) + config := tpgresource.ExtractFirstMapConfig(v.([]interface{})) job.LoggingConfig = expandLoggingConfig(config) } @@ -811,7 +811,7 @@ func expandHadoopJob(config map[string]interface{}) *dataproc.HadoopJob { job.Properties = tpgresource.ConvertStringMap(v.(map[string]interface{})) } if v, ok := config["logging_config"]; ok { - config := extractFirstMapConfig(v.([]interface{})) + config := tpgresource.ExtractFirstMapConfig(v.([]interface{})) job.LoggingConfig = expandLoggingConfig(config) } diff --git a/google/resource_dialogflow_cx_environment.go b/google/services/dialogflowcx/resource_dialogflow_cx_environment.go similarity index 99% rename from google/resource_dialogflow_cx_environment.go rename to google/services/dialogflowcx/resource_dialogflow_cx_environment.go index d40867e5e19..820df76ab11 100644 --- a/google/resource_dialogflow_cx_environment.go +++ b/google/services/dialogflowcx/resource_dialogflow_cx_environment.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package dialogflowcx import ( "fmt" diff --git a/google/resource_dialogflow_cx_version.go b/google/services/dialogflowcx/resource_dialogflow_cx_version.go similarity index 99% rename from google/resource_dialogflow_cx_version.go rename to google/services/dialogflowcx/resource_dialogflow_cx_version.go index b4f818f144f..57fdc6ac7b1 100644 --- a/google/resource_dialogflow_cx_version.go +++ b/google/services/dialogflowcx/resource_dialogflow_cx_version.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package dialogflowcx import ( "fmt" diff --git a/google/data_source_google_kms_secret_asymmetric.go b/google/services/kms/data_source_google_kms_secret_asymmetric.go similarity index 82% rename from google/data_source_google_kms_secret_asymmetric.go rename to google/services/kms/data_source_google_kms_secret_asymmetric.go index 9821d4cf9b2..3d831111e11 100644 --- a/google/data_source_google_kms_secret_asymmetric.go +++ b/google/services/kms/data_source_google_kms_secret_asymmetric.go @@ -1,3 +1,3 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package kms diff --git a/google/data_source_google_logging_project_cmek_settings.go b/google/services/logging/data_source_google_logging_project_cmek_settings.go similarity index 99% rename from google/data_source_google_logging_project_cmek_settings.go rename to google/services/logging/data_source_google_logging_project_cmek_settings.go index 39cf37526b8..e79d4dd1c83 100644 --- a/google/data_source_google_logging_project_cmek_settings.go +++ b/google/services/logging/data_source_google_logging_project_cmek_settings.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package logging import ( "fmt" diff --git a/google/data_source_google_logging_sink.go b/google/services/logging/data_source_google_logging_sink.go similarity index 98% rename from google/data_source_google_logging_sink.go rename to google/services/logging/data_source_google_logging_sink.go index 58b95651b3b..0315bdd6e7e 100644 --- a/google/data_source_google_logging_sink.go +++ b/google/services/logging/data_source_google_logging_sink.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package logging import ( "fmt" diff --git a/google/services/logging/logging_exclusion_billing_account.go b/google/services/logging/logging_exclusion_billing_account.go new file mode 100644 index 00000000000..abe76cee895 --- /dev/null +++ b/google/services/logging/logging_exclusion_billing_account.go @@ -0,0 +1,109 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package logging + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "google.golang.org/api/logging/v2" +) + +var BillingAccountLoggingExclusionSchema = map[string]*schema.Schema{ + "billing_account": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, +} + +type BillingAccountLoggingExclusionUpdater struct { + resourceType string + resourceId string + userAgent string + Config *transport_tpg.Config +} + +func NewBillingAccountLoggingExclusionUpdater(d *schema.ResourceData, config *transport_tpg.Config) (ResourceLoggingExclusionUpdater, error) { + billingAccount := d.Get("billing_account").(string) + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + + return &BillingAccountLoggingExclusionUpdater{ + resourceType: "billingAccounts", + resourceId: billingAccount, + userAgent: userAgent, + Config: config, + }, nil +} + +func BillingAccountLoggingExclusionIdParseFunc(d *schema.ResourceData, _ *transport_tpg.Config) error { + loggingExclusionId, err := ParseLoggingExclusionId(d.Id()) + if err != nil { + return err + } + + if "billingAccounts" != loggingExclusionId.resourceType { + return fmt.Errorf("Error importing logging exclusion, invalid resourceType %#v", loggingExclusionId.resourceType) + } + + if err := d.Set("billing_account", loggingExclusionId.ResourceId); err != nil { + return fmt.Errorf("Error setting billing_account: %s", err) + } + return nil +} + +func (u *BillingAccountLoggingExclusionUpdater) CreateLoggingExclusion(parent string, exclusion *logging.LogExclusion) error { + _, err := u.Config.NewLoggingClient(u.userAgent).BillingAccounts.Exclusions.Create(parent, exclusion).Do() + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error creating logging exclusion for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *BillingAccountLoggingExclusionUpdater) ReadLoggingExclusion(id string) (*logging.LogExclusion, error) { + exclusion, err := u.Config.NewLoggingClient(u.userAgent).BillingAccounts.Exclusions.Get(id).Do() + + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving logging exclusion for %s: {{err}}", u.DescribeResource()), err) + } + + return exclusion, nil +} + +func (u *BillingAccountLoggingExclusionUpdater) UpdateLoggingExclusion(id string, exclusion *logging.LogExclusion, updateMask string) error { + _, err := u.Config.NewLoggingClient(u.userAgent).BillingAccounts.Exclusions.Patch(id, exclusion).UpdateMask(updateMask).Do() + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error updating logging exclusion for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *BillingAccountLoggingExclusionUpdater) DeleteLoggingExclusion(id string) error { + _, err := u.Config.NewLoggingClient(u.userAgent).BillingAccounts.Exclusions.Delete(id).Do() + if err != nil { + return errwrap.Wrap(fmt.Errorf("Error deleting logging exclusion for %s.", u.DescribeResource()), err) + } + + return nil +} + +func (u *BillingAccountLoggingExclusionUpdater) GetResourceType() string { + return u.resourceType +} + +func (u *BillingAccountLoggingExclusionUpdater) GetResourceId() string { + return u.resourceId +} + +func (u *BillingAccountLoggingExclusionUpdater) DescribeResource() string { + return fmt.Sprintf("%q %q", u.resourceType, u.resourceId) +} diff --git a/google/services/logging/logging_exclusion_folder.go b/google/services/logging/logging_exclusion_folder.go new file mode 100644 index 00000000000..58d9dfcb2f2 --- /dev/null +++ b/google/services/logging/logging_exclusion_folder.go @@ -0,0 +1,110 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package logging + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/services/resourcemanager" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "google.golang.org/api/logging/v2" +) + +var FolderLoggingExclusionSchema = map[string]*schema.Schema{ + "folder": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.OptionalPrefixSuppress("folders/"), + }, +} + +type FolderLoggingExclusionUpdater struct { + resourceType string + resourceId string + userAgent string + Config *transport_tpg.Config +} + +func NewFolderLoggingExclusionUpdater(d *schema.ResourceData, config *transport_tpg.Config) (ResourceLoggingExclusionUpdater, error) { + folder := resourcemanager.ParseFolderId(d.Get("folder")) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + + return &FolderLoggingExclusionUpdater{ + resourceType: "folders", + resourceId: folder, + userAgent: userAgent, + Config: config, + }, nil +} + +func FolderLoggingExclusionIdParseFunc(d *schema.ResourceData, _ *transport_tpg.Config) error { + loggingExclusionId, err := ParseLoggingExclusionId(d.Id()) + if err != nil { + return err + } + + if "folders" != loggingExclusionId.resourceType { + return fmt.Errorf("Error importing logging exclusion, invalid resourceType %#v", loggingExclusionId.resourceType) + } + + if err := d.Set("folder", loggingExclusionId.ResourceId); err != nil { + return fmt.Errorf("Error setting folder: %s", err) + } + return nil +} + +func (u *FolderLoggingExclusionUpdater) CreateLoggingExclusion(parent string, exclusion *logging.LogExclusion) error { + _, err := u.Config.NewLoggingClient(u.userAgent).Folders.Exclusions.Create(parent, exclusion).Do() + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error creating logging exclusion for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *FolderLoggingExclusionUpdater) ReadLoggingExclusion(id string) (*logging.LogExclusion, error) { + exclusion, err := u.Config.NewLoggingClient(u.userAgent).Folders.Exclusions.Get(id).Do() + + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving logging exclusion for %s: {{err}}", u.DescribeResource()), err) + } + + return exclusion, nil +} + +func (u *FolderLoggingExclusionUpdater) UpdateLoggingExclusion(id string, exclusion *logging.LogExclusion, updateMask string) error { + _, err := u.Config.NewLoggingClient(u.userAgent).Folders.Exclusions.Patch(id, exclusion).UpdateMask(updateMask).Do() + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error updating logging exclusion for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *FolderLoggingExclusionUpdater) DeleteLoggingExclusion(id string) error { + _, err := u.Config.NewLoggingClient(u.userAgent).Folders.Exclusions.Delete(id).Do() + if err != nil { + return errwrap.Wrap(fmt.Errorf("Error deleting logging exclusion for %s.", u.DescribeResource()), err) + } + + return nil +} + +func (u *FolderLoggingExclusionUpdater) GetResourceType() string { + return u.resourceType +} + +func (u *FolderLoggingExclusionUpdater) GetResourceId() string { + return u.resourceId +} + +func (u *FolderLoggingExclusionUpdater) DescribeResource() string { + return fmt.Sprintf("%q %q", u.resourceType, u.resourceId) +} diff --git a/google/services/logging/logging_exclusion_organization.go b/google/services/logging/logging_exclusion_organization.go new file mode 100644 index 00000000000..938ce6aa7bd --- /dev/null +++ b/google/services/logging/logging_exclusion_organization.go @@ -0,0 +1,109 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package logging + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "google.golang.org/api/logging/v2" +) + +var OrganizationLoggingExclusionSchema = map[string]*schema.Schema{ + "org_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, +} + +type OrganizationLoggingExclusionUpdater struct { + resourceType string + resourceId string + userAgent string + Config *transport_tpg.Config +} + +func NewOrganizationLoggingExclusionUpdater(d *schema.ResourceData, config *transport_tpg.Config) (ResourceLoggingExclusionUpdater, error) { + organization := d.Get("org_id").(string) + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + + return &OrganizationLoggingExclusionUpdater{ + resourceType: "organizations", + resourceId: organization, + userAgent: userAgent, + Config: config, + }, nil +} + +func OrganizationLoggingExclusionIdParseFunc(d *schema.ResourceData, _ *transport_tpg.Config) error { + loggingExclusionId, err := ParseLoggingExclusionId(d.Id()) + if err != nil { + return err + } + + if "organizations" != loggingExclusionId.resourceType { + return fmt.Errorf("Error importing logging exclusion, invalid resourceType %#v", loggingExclusionId.resourceType) + } + + if err := d.Set("org_id", loggingExclusionId.ResourceId); err != nil { + return fmt.Errorf("Error setting org_id: %s", err) + } + return nil +} + +func (u *OrganizationLoggingExclusionUpdater) CreateLoggingExclusion(parent string, exclusion *logging.LogExclusion) error { + _, err := u.Config.NewLoggingClient(u.userAgent).Organizations.Exclusions.Create(parent, exclusion).Do() + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error creating logging exclusion for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *OrganizationLoggingExclusionUpdater) ReadLoggingExclusion(id string) (*logging.LogExclusion, error) { + exclusion, err := u.Config.NewLoggingClient(u.userAgent).Organizations.Exclusions.Get(id).Do() + + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving logging exclusion for %s: {{err}}", u.DescribeResource()), err) + } + + return exclusion, nil +} + +func (u *OrganizationLoggingExclusionUpdater) UpdateLoggingExclusion(id string, exclusion *logging.LogExclusion, updateMask string) error { + _, err := u.Config.NewLoggingClient(u.userAgent).Organizations.Exclusions.Patch(id, exclusion).UpdateMask(updateMask).Do() + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error updating logging exclusion for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *OrganizationLoggingExclusionUpdater) DeleteLoggingExclusion(id string) error { + _, err := u.Config.NewLoggingClient(u.userAgent).Organizations.Exclusions.Delete(id).Do() + if err != nil { + return errwrap.Wrap(fmt.Errorf("Error deleting logging exclusion for %s.", u.DescribeResource()), err) + } + + return nil +} + +func (u *OrganizationLoggingExclusionUpdater) GetResourceType() string { + return u.resourceType +} + +func (u *OrganizationLoggingExclusionUpdater) GetResourceId() string { + return u.resourceId +} + +func (u *OrganizationLoggingExclusionUpdater) DescribeResource() string { + return fmt.Sprintf("%q %q", u.resourceType, u.resourceId) +} diff --git a/google/services/logging/logging_exclusion_project.go b/google/services/logging/logging_exclusion_project.go new file mode 100644 index 00000000000..2674e3fef59 --- /dev/null +++ b/google/services/logging/logging_exclusion_project.go @@ -0,0 +1,116 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package logging + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "google.golang.org/api/logging/v2" +) + +var ProjectLoggingExclusionSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, +} + +type ProjectLoggingExclusionUpdater struct { + resourceType string + resourceId string + userAgent string + Config *transport_tpg.Config +} + +func NewProjectLoggingExclusionUpdater(d *schema.ResourceData, config *transport_tpg.Config) (ResourceLoggingExclusionUpdater, error) { + pid, err := tpgresource.GetProject(d, config) + if err != nil { + return nil, err + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + + return &ProjectLoggingExclusionUpdater{ + resourceType: "projects", + resourceId: pid, + userAgent: userAgent, + Config: config, + }, nil +} + +func ProjectLoggingExclusionIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + loggingExclusionId, err := ParseLoggingExclusionId(d.Id()) + if err != nil { + return err + } + + if "projects" != loggingExclusionId.resourceType { + return fmt.Errorf("Error importing logging exclusion, invalid resourceType %#v", loggingExclusionId.resourceType) + } + + if config.Project != loggingExclusionId.ResourceId { + if err := d.Set("project", loggingExclusionId.ResourceId); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + } + + return nil +} + +func (u *ProjectLoggingExclusionUpdater) CreateLoggingExclusion(parent string, exclusion *logging.LogExclusion) error { + _, err := u.Config.NewLoggingClient(u.userAgent).Projects.Exclusions.Create(parent, exclusion).Do() + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error creating logging exclusion for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *ProjectLoggingExclusionUpdater) ReadLoggingExclusion(id string) (*logging.LogExclusion, error) { + exclusion, err := u.Config.NewLoggingClient(u.userAgent).Projects.Exclusions.Get(id).Do() + + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving logging exclusion for %s: {{err}}", u.DescribeResource()), err) + } + + return exclusion, nil +} + +func (u *ProjectLoggingExclusionUpdater) UpdateLoggingExclusion(id string, exclusion *logging.LogExclusion, updateMask string) error { + _, err := u.Config.NewLoggingClient(u.userAgent).Projects.Exclusions.Patch(id, exclusion).UpdateMask(updateMask).Do() + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error updating logging exclusion for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *ProjectLoggingExclusionUpdater) DeleteLoggingExclusion(id string) error { + _, err := u.Config.NewLoggingClient(u.userAgent).Projects.Exclusions.Delete(id).Do() + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error deleting logging exclusion for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *ProjectLoggingExclusionUpdater) GetResourceType() string { + return u.resourceType +} + +func (u *ProjectLoggingExclusionUpdater) GetResourceId() string { + return u.resourceId +} + +func (u *ProjectLoggingExclusionUpdater) DescribeResource() string { + return fmt.Sprintf("%q %q", u.resourceType, u.resourceId) +} diff --git a/google/services/logging/logging_utils.go b/google/services/logging/logging_utils.go new file mode 100644 index 00000000000..7220ee8f22a --- /dev/null +++ b/google/services/logging/logging_utils.go @@ -0,0 +1,62 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package logging + +import ( + "fmt" + "regexp" +) + +// LoggingSinkResourceTypes contains all the possible Stackdriver Logging resource types. Used to parse ids safely. +var LoggingSinkResourceTypes = []string{ + "billingAccounts", + "folders", + "organizations", + "projects", +} + +// LoggingSinkId represents the parts that make up the canonical id used within terraform for a logging resource. +type LoggingSinkId struct { + resourceType string + resourceId string + name string +} + +// loggingSinkIdRegex matches valid logging sink canonical ids +var loggingSinkIdRegex = regexp.MustCompile("(.+)/(.+)/sinks/(.+)") + +// canonicalId returns the LoggingSinkId as the canonical id used within terraform. +func (l LoggingSinkId) canonicalId() string { + return fmt.Sprintf("%s/%s/sinks/%s", l.resourceType, l.resourceId, l.name) +} + +// parent returns the "parent-level" resource that the sink is in (e.g. `folders/foo` for id `folders/foo/sinks/bar`) +func (l LoggingSinkId) parent() string { + return fmt.Sprintf("%s/%s", l.resourceType, l.resourceId) +} + +// ParseLoggingSinkId parses a canonical id into a LoggingSinkId, or returns an error on failure. +func ParseLoggingSinkId(id string) (*LoggingSinkId, error) { + parts := loggingSinkIdRegex.FindStringSubmatch(id) + if parts == nil { + return nil, fmt.Errorf("unable to parse logging sink id %#v", id) + } + // If our resourceType is not a valid logging sink resource type, complain loudly + validLoggingSinkResourceType := false + for _, v := range LoggingSinkResourceTypes { + if v == parts[1] { + validLoggingSinkResourceType = true + break + } + } + + if !validLoggingSinkResourceType { + return nil, fmt.Errorf("Logging resource type %s is not valid. Valid resource types: %#v", parts[1], + LoggingSinkResourceTypes) + } + return &LoggingSinkId{ + resourceType: parts[1], + resourceId: parts[2], + name: parts[3], + }, nil +} diff --git a/google/logging_utils_test.go b/google/services/logging/logging_utils_test.go similarity index 96% rename from google/logging_utils_test.go rename to google/services/logging/logging_utils_test.go index 11b00f8f86b..3540d7e82dd 100644 --- a/google/logging_utils_test.go +++ b/google/services/logging/logging_utils_test.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package logging import "testing" @@ -16,7 +16,7 @@ func TestParseLoggingSinkId(t *testing.T) { } for _, test := range tests { - out, err := parseLoggingSinkId(test.val) + out, err := ParseLoggingSinkId(test.val) if err != nil { if !test.errExpected { t.Errorf("Got error with val %#v: error = %#v", test.val, err) diff --git a/google/resource_logging_billing_account_bucket_config.go b/google/services/logging/resource_logging_billing_account_bucket_config.go similarity index 98% rename from google/resource_logging_billing_account_bucket_config.go rename to google/services/logging/resource_logging_billing_account_bucket_config.go index ea5bd492ee7..ea34ad2f3c2 100644 --- a/google/resource_logging_billing_account_bucket_config.go +++ b/google/services/logging/resource_logging_billing_account_bucket_config.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package logging import ( "fmt" diff --git a/google/resource_logging_billing_account_sink.go b/google/services/logging/resource_logging_billing_account_sink.go similarity index 99% rename from google/resource_logging_billing_account_sink.go rename to google/services/logging/resource_logging_billing_account_sink.go index 9ea0aec88ea..818a2e29675 100644 --- a/google/resource_logging_billing_account_sink.go +++ b/google/services/logging/resource_logging_billing_account_sink.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package logging import ( "fmt" diff --git a/google/resource_logging_bucket_config.go b/google/services/logging/resource_logging_bucket_config.go similarity index 98% rename from google/resource_logging_bucket_config.go rename to google/services/logging/resource_logging_bucket_config.go index 2608e1a7d25..683e8d1684d 100644 --- a/google/resource_logging_bucket_config.go +++ b/google/services/logging/resource_logging_bucket_config.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package logging import ( "fmt" @@ -120,11 +120,11 @@ func resourceLoggingBucketConfigImportState(parent string) schema.StateFunc { } if len(parts) != 5 { - return nil, fmt.Errorf("Invalid id format. Format should be '{{parent}}/{{parent_id}}/locations/{{location}}/buckets/{{bucket_id}} with parent in %s", loggingSinkResourceTypes) + return nil, fmt.Errorf("Invalid id format. Format should be '{{parent}}/{{parent_id}}/locations/{{location}}/buckets/{{bucket_id}} with parent in %s", LoggingSinkResourceTypes) } validLoggingType := false - for _, v := range loggingSinkResourceTypes { + for _, v := range LoggingSinkResourceTypes { if v == parts[1] { validLoggingType = true break @@ -132,7 +132,7 @@ func resourceLoggingBucketConfigImportState(parent string) schema.StateFunc { } if !validLoggingType { return nil, fmt.Errorf("Logging parent type %s is not valid. Valid resource types: %#v", parts[1], - loggingSinkResourceTypes) + LoggingSinkResourceTypes) } if err := d.Set(parent, parts[1]+"/"+parts[2]); err != nil { diff --git a/google/services/logging/resource_logging_exclusion.go b/google/services/logging/resource_logging_exclusion.go new file mode 100644 index 00000000000..bd6a508f836 --- /dev/null +++ b/google/services/logging/resource_logging_exclusion.go @@ -0,0 +1,307 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package logging + +import ( + "fmt" + "regexp" + "strings" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/logging/v2" +) + +var LoggingExclusionBaseSchema = map[string]*schema.Schema{ + "filter": { + Type: schema.TypeString, + Required: true, + Description: `The filter to apply when excluding logs. Only log entries that match the filter are excluded.`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the logging exclusion.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `A human-readable description.`, + }, + "disabled": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether this exclusion rule should be disabled or not. This defaults to false.`, + }, +} + +func ResourceLoggingExclusion(parentSpecificSchema map[string]*schema.Schema, newUpdaterFunc newResourceLoggingExclusionUpdaterFunc, resourceIdParser tpgiamresource.ResourceIdParserFunc) *schema.Resource { + return &schema.Resource{ + Create: resourceLoggingExclusionCreate(newUpdaterFunc), + Read: resourceLoggingExclusionRead(newUpdaterFunc), + Update: resourceLoggingExclusionUpdate(newUpdaterFunc), + Delete: resourceLoggingExclusionDelete(newUpdaterFunc), + + Importer: &schema.ResourceImporter{ + State: resourceLoggingExclusionImportState(resourceIdParser), + }, + + Schema: tpgresource.MergeSchemas(LoggingExclusionBaseSchema, parentSpecificSchema), + UseJSONNumber: true, + } +} + +func resourceLoggingExclusionCreate(newUpdaterFunc newResourceLoggingExclusionUpdaterFunc) schema.CreateFunc { + return func(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + updater, err := newUpdaterFunc(d, config) + if err != nil { + return err + } + + id, exclusion := expandResourceLoggingExclusion(d, updater.GetResourceType(), updater.GetResourceId()) + + // Logging exclusions don't seem to be able to be mutated in parallel, see + // https://github.com/hashicorp/terraform-provider-google/issues/4796 + transport_tpg.MutexStore.Lock(id.parent()) + defer transport_tpg.MutexStore.Unlock(id.parent()) + + err = updater.CreateLoggingExclusion(id.parent(), exclusion) + if err != nil { + return err + } + + d.SetId(id.canonicalId()) + + return resourceLoggingExclusionRead(newUpdaterFunc)(d, meta) + } +} + +func resourceLoggingExclusionRead(newUpdaterFunc newResourceLoggingExclusionUpdaterFunc) schema.ReadFunc { + return func(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + updater, err := newUpdaterFunc(d, config) + if err != nil { + return err + } + + exclusion, err := updater.ReadLoggingExclusion(d.Id()) + + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Logging Exclusion %s", d.Get("name").(string))) + } + + if err := flattenResourceLoggingExclusion(d, exclusion); err != nil { + return err + } + + if updater.GetResourceType() == "projects" { + if err := d.Set("project", updater.GetResourceId()); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + } + + return nil + } +} + +func resourceLoggingExclusionUpdate(newUpdaterFunc newResourceLoggingExclusionUpdaterFunc) schema.UpdateFunc { + return func(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + updater, err := newUpdaterFunc(d, config) + if err != nil { + return err + } + + id, _ := expandResourceLoggingExclusion(d, updater.GetResourceType(), updater.GetResourceId()) + exclusion, updateMask := expandResourceLoggingExclusionForUpdate(d) + + // Logging exclusions don't seem to be able to be mutated in parallel, see + // https://github.com/hashicorp/terraform-provider-google/issues/4796 + transport_tpg.MutexStore.Lock(id.parent()) + defer transport_tpg.MutexStore.Unlock(id.parent()) + + err = updater.UpdateLoggingExclusion(d.Id(), exclusion, updateMask) + if err != nil { + return err + } + + return resourceLoggingExclusionRead(newUpdaterFunc)(d, meta) + } +} + +func resourceLoggingExclusionDelete(newUpdaterFunc newResourceLoggingExclusionUpdaterFunc) schema.DeleteFunc { + return func(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + updater, err := newUpdaterFunc(d, config) + if err != nil { + return err + } + + id, _ := expandResourceLoggingExclusion(d, updater.GetResourceType(), updater.GetResourceId()) + // Logging exclusions don't seem to be able to be mutated in parallel, see + // https://github.com/hashicorp/terraform-provider-google/issues/4796 + transport_tpg.MutexStore.Lock(id.parent()) + defer transport_tpg.MutexStore.Unlock(id.parent()) + + err = updater.DeleteLoggingExclusion(d.Id()) + if err != nil { + return err + } + + d.SetId("") + return nil + } +} + +func resourceLoggingExclusionImportState(resourceIdParser tpgiamresource.ResourceIdParserFunc) schema.StateFunc { + return func(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + err := resourceIdParser(d, config) + if err != nil { + return nil, err + } + return []*schema.ResourceData{d}, nil + } +} + +func expandResourceLoggingExclusion(d *schema.ResourceData, resourceType, ResourceId string) (LoggingExclusionId, *logging.LogExclusion) { + id := LoggingExclusionId{ + resourceType: resourceType, + ResourceId: ResourceId, + name: d.Get("name").(string), + } + + exclusion := logging.LogExclusion{ + Name: d.Get("name").(string), + Description: d.Get("description").(string), + Filter: d.Get("filter").(string), + Disabled: d.Get("disabled").(bool), + } + return id, &exclusion +} + +func flattenResourceLoggingExclusion(d *schema.ResourceData, exclusion *logging.LogExclusion) error { + if err := d.Set("name", exclusion.Name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + if err := d.Set("description", exclusion.Description); err != nil { + return fmt.Errorf("Error setting description: %s", err) + } + if err := d.Set("filter", exclusion.Filter); err != nil { + return fmt.Errorf("Error setting filter: %s", err) + } + if err := d.Set("disabled", exclusion.Disabled); err != nil { + return fmt.Errorf("Error setting disabled: %s", err) + } + + return nil +} + +func expandResourceLoggingExclusionForUpdate(d *schema.ResourceData) (*logging.LogExclusion, string) { + // Can update description/filter/disabled right now. + exclusion := logging.LogExclusion{} + + var updateMaskArr []string + + if d.HasChange("description") { + exclusion.Description = d.Get("description").(string) + exclusion.ForceSendFields = append(exclusion.ForceSendFields, "Description") + updateMaskArr = append(updateMaskArr, "description") + } + + if d.HasChange("filter") { + exclusion.Filter = d.Get("filter").(string) + exclusion.ForceSendFields = append(exclusion.ForceSendFields, "Filter") + updateMaskArr = append(updateMaskArr, "filter") + } + + if d.HasChange("disabled") { + exclusion.Disabled = d.Get("disabled").(bool) + exclusion.ForceSendFields = append(exclusion.ForceSendFields, "Disabled") + updateMaskArr = append(updateMaskArr, "disabled") + } + + updateMask := strings.Join(updateMaskArr, ",") + return &exclusion, updateMask +} + +// The ResourceLoggingExclusionUpdater interface is implemented for each GCP +// resource supporting log exclusions. +// +// Implementations should keep track of the resource identifier. +type ResourceLoggingExclusionUpdater interface { + CreateLoggingExclusion(parent string, exclusion *logging.LogExclusion) error + ReadLoggingExclusion(id string) (*logging.LogExclusion, error) + UpdateLoggingExclusion(id string, exclusion *logging.LogExclusion, updateMask string) error + DeleteLoggingExclusion(id string) error + + GetResourceType() string + + // Returns the unique resource identifier. + GetResourceId() string + + // Textual description of this resource to be used in error message. + // The description should include the unique resource identifier. + DescribeResource() string +} + +type newResourceLoggingExclusionUpdaterFunc func(d *schema.ResourceData, config *transport_tpg.Config) (ResourceLoggingExclusionUpdater, error) + +// loggingExclusionResourceTypes contains all the possible Stackdriver Logging resource types. Used to parse ids safely. +var loggingExclusionResourceTypes = []string{ + "billingAccounts", + "folders", + "organizations", + "projects", +} + +// LoggingExclusionId represents the parts that make up the canonical id used within terraform for a logging resource. +type LoggingExclusionId struct { + resourceType string + ResourceId string + name string +} + +// loggingExclusionIdRegex matches valid logging exclusion canonical ids +var loggingExclusionIdRegex = regexp.MustCompile("(.+)/(.+)/exclusions/(.+)") + +// canonicalId returns the LoggingExclusionId as the canonical id used within terraform. +func (l LoggingExclusionId) canonicalId() string { + return fmt.Sprintf("%s/%s/exclusions/%s", l.resourceType, l.ResourceId, l.name) +} + +// parent returns the "parent-level" resource that the exclusion is in (e.g. `folders/foo` for id `folders/foo/exclusions/bar`) +func (l LoggingExclusionId) parent() string { + return fmt.Sprintf("%s/%s", l.resourceType, l.ResourceId) +} + +// ParseLoggingExclusionId parses a canonical id into a LoggingExclusionId, or returns an error on failure. +func ParseLoggingExclusionId(id string) (*LoggingExclusionId, error) { + parts := loggingExclusionIdRegex.FindStringSubmatch(id) + if parts == nil { + return nil, fmt.Errorf("unable to parse logging exclusion id %#v", id) + } + // If our resourceType is not a valid logging exclusion resource type, complain loudly + validLoggingExclusionResourceType := false + for _, v := range loggingExclusionResourceTypes { + if v == parts[1] { + validLoggingExclusionResourceType = true + break + } + } + + if !validLoggingExclusionResourceType { + return nil, fmt.Errorf("Logging resource type %s is not valid. Valid resource types: %#v", parts[1], + loggingExclusionResourceTypes) + } + return &LoggingExclusionId{ + resourceType: parts[1], + ResourceId: parts[2], + name: parts[3], + }, nil +} diff --git a/google/resource_logging_folder_bucket_config.go b/google/services/logging/resource_logging_folder_bucket_config.go similarity index 98% rename from google/resource_logging_folder_bucket_config.go rename to google/services/logging/resource_logging_folder_bucket_config.go index f3405f89a1f..9ab225303bd 100644 --- a/google/resource_logging_folder_bucket_config.go +++ b/google/services/logging/resource_logging_folder_bucket_config.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package logging import ( "fmt" diff --git a/google/resource_logging_folder_sink.go b/google/services/logging/resource_logging_folder_sink.go similarity index 99% rename from google/resource_logging_folder_sink.go rename to google/services/logging/resource_logging_folder_sink.go index 714f22532e0..8df1066da24 100644 --- a/google/resource_logging_folder_sink.go +++ b/google/services/logging/resource_logging_folder_sink.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package logging import ( "fmt" diff --git a/google/resource_logging_organization_bucket_config.go b/google/services/logging/resource_logging_organization_bucket_config.go similarity index 98% rename from google/resource_logging_organization_bucket_config.go rename to google/services/logging/resource_logging_organization_bucket_config.go index dfa8b233cc3..a8d338f884d 100644 --- a/google/resource_logging_organization_bucket_config.go +++ b/google/services/logging/resource_logging_organization_bucket_config.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package logging import ( "fmt" diff --git a/google/resource_logging_organization_sink.go b/google/services/logging/resource_logging_organization_sink.go similarity index 99% rename from google/resource_logging_organization_sink.go rename to google/services/logging/resource_logging_organization_sink.go index 877d7721e31..3c7129793f3 100644 --- a/google/resource_logging_organization_sink.go +++ b/google/services/logging/resource_logging_organization_sink.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package logging import ( "fmt" diff --git a/google/resource_logging_project_bucket_config.go b/google/services/logging/resource_logging_project_bucket_config.go similarity index 99% rename from google/resource_logging_project_bucket_config.go rename to google/services/logging/resource_logging_project_bucket_config.go index b58f5811ea4..be3e53319b6 100644 --- a/google/resource_logging_project_bucket_config.go +++ b/google/services/logging/resource_logging_project_bucket_config.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package logging import ( "fmt" diff --git a/google/resource_logging_project_sink.go b/google/services/logging/resource_logging_project_sink.go similarity index 99% rename from google/resource_logging_project_sink.go rename to google/services/logging/resource_logging_project_sink.go index 45b51ab8967..18391398364 100644 --- a/google/resource_logging_project_sink.go +++ b/google/services/logging/resource_logging_project_sink.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package logging import ( "context" diff --git a/google/services/logging/resource_logging_project_sink_internal_test.go b/google/services/logging/resource_logging_project_sink_internal_test.go new file mode 100644 index 00000000000..6ff61edb5c8 --- /dev/null +++ b/google/services/logging/resource_logging_project_sink_internal_test.go @@ -0,0 +1,65 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package logging + +import ( + "testing" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" +) + +func TestLoggingProjectSink_bigqueryOptionCustomizedDiff(t *testing.T) { + t.Parallel() + + type LoggingProjectSink struct { + BigqueryOptions int + UniqueWriterIdentity bool + } + cases := map[string]struct { + ExpectedError bool + After LoggingProjectSink + }{ + "no biquery options with false unique writer identity": { + ExpectedError: false, + After: LoggingProjectSink{ + BigqueryOptions: 0, + UniqueWriterIdentity: false, + }, + }, + "no biquery options with true unique writer identity": { + ExpectedError: false, + After: LoggingProjectSink{ + BigqueryOptions: 0, + UniqueWriterIdentity: true, + }, + }, + "biquery options with false unique writer identity": { + ExpectedError: true, + After: LoggingProjectSink{ + BigqueryOptions: 1, + UniqueWriterIdentity: false, + }, + }, + "biquery options with true unique writer identity": { + ExpectedError: false, + After: LoggingProjectSink{ + BigqueryOptions: 1, + UniqueWriterIdentity: true, + }, + }, + } + + for tn, tc := range cases { + d := &tpgresource.ResourceDiffMock{ + After: map[string]interface{}{ + "bigquery_options.#": tc.After.BigqueryOptions, + "unique_writer_identity": tc.After.UniqueWriterIdentity, + }, + } + err := resourceLoggingProjectSinkCustomizeDiffFunc(d) + hasError := err != nil + if tc.ExpectedError != hasError { + t.Errorf("%v: expected has error %v, but was %v", tn, tc.ExpectedError, hasError) + } + } +} diff --git a/google/resource_logging_sink.go b/google/services/logging/resource_logging_sink.go similarity index 99% rename from google/resource_logging_sink.go rename to google/services/logging/resource_logging_sink.go index 5c028bf7f94..39bee93d433 100644 --- a/google/resource_logging_sink.go +++ b/google/services/logging/resource_logging_sink.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package logging import ( "fmt" @@ -253,7 +253,7 @@ func flattenLoggingSinkExclusion(exclusions []*logging.LogExclusion) []map[strin func resourceLoggingSinkImportState(sinkType string) schema.StateFunc { return func(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - loggingSinkId, err := parseLoggingSinkId(d.Id()) + loggingSinkId, err := ParseLoggingSinkId(d.Id()) if err != nil { return nil, err } diff --git a/google/data_source_google_monitoring_uptime_check_ips.go b/google/services/monitoring/data_source_google_monitoring_uptime_check_ips.go similarity index 99% rename from google/data_source_google_monitoring_uptime_check_ips.go rename to google/services/monitoring/data_source_google_monitoring_uptime_check_ips.go index 92bf6ff7f34..ca4808c1f97 100644 --- a/google/data_source_google_monitoring_uptime_check_ips.go +++ b/google/services/monitoring/data_source_google_monitoring_uptime_check_ips.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package monitoring import ( "fmt" diff --git a/google/resource_monitoring_dashboard.go b/google/services/monitoring/resource_monitoring_dashboard.go similarity index 99% rename from google/resource_monitoring_dashboard.go rename to google/services/monitoring/resource_monitoring_dashboard.go index b288d51b983..1f0eca31c92 100644 --- a/google/resource_monitoring_dashboard.go +++ b/google/services/monitoring/resource_monitoring_dashboard.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package monitoring import ( "fmt" diff --git a/google/data_source_secret_manager_secret_version.go b/google/services/secretmanager/data_source_secret_manager_secret_version.go similarity index 99% rename from google/data_source_secret_manager_secret_version.go rename to google/services/secretmanager/data_source_secret_manager_secret_version.go index e439657ac46..d099bcb74b2 100644 --- a/google/data_source_secret_manager_secret_version.go +++ b/google/services/secretmanager/data_source_secret_manager_secret_version.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package secretmanager import ( "encoding/base64" diff --git a/google/data_source_secret_manager_secret_version_access.go b/google/services/secretmanager/data_source_secret_manager_secret_version_access.go similarity index 99% rename from google/data_source_secret_manager_secret_version_access.go rename to google/services/secretmanager/data_source_secret_manager_secret_version_access.go index 54972b641ef..230d2956086 100644 --- a/google/data_source_secret_manager_secret_version_access.go +++ b/google/services/secretmanager/data_source_secret_manager_secret_version_access.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package secretmanager import ( "encoding/base64" diff --git a/google/resource_endpoints_service.go b/google/services/servicemanagement/resource_endpoints_service.go similarity index 99% rename from google/resource_endpoints_service.go rename to google/services/servicemanagement/resource_endpoints_service.go index a7a207093ed..6c75318c2a8 100644 --- a/google/resource_endpoints_service.go +++ b/google/services/servicemanagement/resource_endpoints_service.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package servicemanagement import ( "context" diff --git a/google/services/servicemanagement/resource_endpoints_service_internal_test.go b/google/services/servicemanagement/resource_endpoints_service_internal_test.go new file mode 100644 index 00000000000..8c459215bd5 --- /dev/null +++ b/google/services/servicemanagement/resource_endpoints_service_internal_test.go @@ -0,0 +1,63 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package servicemanagement + +import ( + "reflect" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func TestEndpointsService_grpcMigrateState(t *testing.T) { + cases := map[string]struct { + StateVersion int + Attributes map[string]string + ExpectedAttributes map[string]string + Meta interface{} + }{ + "update from protoc_output to protoc_output_base64": { + StateVersion: 0, + Attributes: map[string]string{ + "protoc_output": "123456789", + "name": "testcase", + }, + ExpectedAttributes: map[string]string{ + "protoc_output_base64": "MTIzNDU2Nzg5", + "protoc_output": "", + "name": "testcase", + }, + Meta: &transport_tpg.Config{Project: "gcp-project", Region: "us-central1"}, + }, + "update from non-protoc_output": { + StateVersion: 0, + Attributes: map[string]string{ + "openapi_config": "foo bar baz", + "name": "testcase-2", + }, + ExpectedAttributes: map[string]string{ + "openapi_config": "foo bar baz", + "name": "testcase-2", + }, + Meta: &transport_tpg.Config{Project: "gcp-project", Region: "us-central1"}, + }, + } + + for tn, tc := range cases { + is := &terraform.InstanceState{ + ID: tc.Attributes["name"], + Attributes: tc.Attributes, + } + + is, err := migrateEndpointsService(tc.StateVersion, is, tc.Meta) + + if err != nil { + t.Fatalf("bad: %s, err: %#v", tn, err) + } + + if !reflect.DeepEqual(is.Attributes, tc.ExpectedAttributes) { + t.Fatalf("Attributes should be `%s` but are `%s`", tc.ExpectedAttributes, is.Attributes) + } + } +} diff --git a/google/resource_endpoints_service_migration.go b/google/services/servicemanagement/resource_endpoints_service_migration.go similarity index 96% rename from google/resource_endpoints_service_migration.go rename to google/services/servicemanagement/resource_endpoints_service_migration.go index e8aed2c6578..876da6a5904 100644 --- a/google/resource_endpoints_service_migration.go +++ b/google/services/servicemanagement/resource_endpoints_service_migration.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package servicemanagement import ( "encoding/base64" diff --git a/google/data_source_google_sql_ca_certs.go b/google/services/sql/data_source_google_sql_ca_certs.go similarity index 99% rename from google/data_source_google_sql_ca_certs.go rename to google/services/sql/data_source_google_sql_ca_certs.go index cc9ea2cdd16..52be56bd7ae 100644 --- a/google/data_source_google_sql_ca_certs.go +++ b/google/services/sql/data_source_google_sql_ca_certs.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package sql import ( "fmt" diff --git a/google/data_source_google_sql_tiers.go b/google/services/sql/data_source_google_sql_tiers.go similarity index 99% rename from google/data_source_google_sql_tiers.go rename to google/services/sql/data_source_google_sql_tiers.go index edebf6e324a..6a324202f6b 100644 --- a/google/data_source_google_sql_tiers.go +++ b/google/services/sql/data_source_google_sql_tiers.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package sql import ( "fmt" diff --git a/google/data_source_sql_backup_run.go b/google/services/sql/data_source_sql_backup_run.go similarity index 99% rename from google/data_source_sql_backup_run.go rename to google/services/sql/data_source_sql_backup_run.go index 16e3349a077..df9aed4c1d1 100644 --- a/google/data_source_sql_backup_run.go +++ b/google/services/sql/data_source_sql_backup_run.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package sql import ( "fmt" diff --git a/google/data_source_sql_database_instance.go b/google/services/sql/data_source_sql_database_instance.go similarity index 97% rename from google/data_source_sql_database_instance.go rename to google/services/sql/data_source_sql_database_instance.go index 0e9d0422153..81e2d15fdd8 100644 --- a/google/data_source_sql_database_instance.go +++ b/google/services/sql/data_source_sql_database_instance.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package sql import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" diff --git a/google/data_source_sql_database_instances.go b/google/services/sql/data_source_sql_database_instances.go similarity index 99% rename from google/data_source_sql_database_instances.go rename to google/services/sql/data_source_sql_database_instances.go index 54eefe09d45..55c04619994 100644 --- a/google/data_source_sql_database_instances.go +++ b/google/services/sql/data_source_sql_database_instances.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package sql import ( "fmt" diff --git a/google/resource_sql_database_instance.go b/google/services/sql/resource_sql_database_instance.go similarity index 99% rename from google/resource_sql_database_instance.go rename to google/services/sql/resource_sql_database_instance.go index b71abd2b397..a0419ae1d18 100644 --- a/google/resource_sql_database_instance.go +++ b/google/services/sql/resource_sql_database_instance.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package sql import ( "context" diff --git a/google/services/sql/resource_sql_database_instance_internal_test.go b/google/services/sql/resource_sql_database_instance_internal_test.go new file mode 100644 index 00000000000..0293069cac7 --- /dev/null +++ b/google/services/sql/resource_sql_database_instance_internal_test.go @@ -0,0 +1,40 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package sql + +import ( + "testing" +) + +func TestMaintenanceVersionDiffSuppress(t *testing.T) { + cases := map[string]struct { + Old, New string + ShouldSuppress bool + }{ + "older configuration maintenance version than current version should suppress diff": { + Old: "MYSQL_8_0_26.R20220508.01_09", + New: "MYSQL_5_7_37.R20210508.01_03", + ShouldSuppress: true, + }, + "older configuration maintenance version than current version should suppress diff with lexicographically smaller database version": { + Old: "MYSQL_5_8_10.R20220508.01_09", + New: "MYSQL_5_8_7.R20210508.01_03", + ShouldSuppress: true, + }, + "newer configuration maintenance version than current version should not suppress diff": { + Old: "MYSQL_5_7_37.R20210508.01_03", + New: "MYSQL_8_0_26.R20220508.01_09", + ShouldSuppress: false, + }, + } + + for tn, tc := range cases { + tc := tc + t.Run(tn, func(t *testing.T) { + t.Parallel() + if maintenanceVersionDiffSuppress("version", tc.Old, tc.New, nil) != tc.ShouldSuppress { + t.Fatalf("%q => %q expect DiffSuppress to return %t", tc.Old, tc.New, tc.ShouldSuppress) + } + }) + } +} diff --git a/google/services/sql/resource_sql_database_internal_test.go b/google/services/sql/resource_sql_database_internal_test.go new file mode 100644 index 00000000000..2ae54b9c0dc --- /dev/null +++ b/google/services/sql/resource_sql_database_internal_test.go @@ -0,0 +1,41 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package sql + +import ( + "testing" +) + +func TestCaseDiffDashSuppress(t *testing.T) { + cases := map[string]struct { + Old, New string + ExpectDiffSuppress bool + }{ + "PD_HDD": { + Old: "PD_HDD", + New: "pd-hdd", + ExpectDiffSuppress: true, + }, + "PD_SSD": { + Old: "PD_SSD", + New: "pd-ssd", + ExpectDiffSuppress: true, + }, + "pd-hdd": { + Old: "pd-hdd", + New: "PD_HDD", + ExpectDiffSuppress: false, + }, + "pd-ssd": { + Old: "pd-ssd", + New: "PD_SSD", + ExpectDiffSuppress: false, + }, + } + + for tn, tc := range cases { + if caseDiffDashSuppress(tn, tc.Old, tc.New, nil) != tc.ExpectDiffSuppress { + t.Errorf("bad: %s, %q => %q expect DiffSuppress to return %t", tn, tc.Old, tc.New, tc.ExpectDiffSuppress) + } + } +} diff --git a/google/resource_sql_ssl_cert.go b/google/services/sql/resource_sql_ssl_cert.go similarity index 99% rename from google/resource_sql_ssl_cert.go rename to google/services/sql/resource_sql_ssl_cert.go index b70df407504..91abc16d8b8 100644 --- a/google/resource_sql_ssl_cert.go +++ b/google/services/sql/resource_sql_ssl_cert.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package sql import ( "fmt" diff --git a/google/resource_sql_user.go b/google/services/sql/resource_sql_user.go similarity index 99% rename from google/resource_sql_user.go rename to google/services/sql/resource_sql_user.go index f91c862474d..6ca23f91e61 100644 --- a/google/resource_sql_user.go +++ b/google/services/sql/resource_sql_user.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package sql import ( "fmt" diff --git a/google/resource_sql_user_migrate.go b/google/services/sql/resource_sql_user_migrate.go similarity index 98% rename from google/resource_sql_user_migrate.go rename to google/services/sql/resource_sql_user_migrate.go index 80bc4b8e38c..814c9f406ba 100644 --- a/google/resource_sql_user_migrate.go +++ b/google/services/sql/resource_sql_user_migrate.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package sql import ( "fmt" diff --git a/google/resource_sql_user_migrate_test.go b/google/services/sql/resource_sql_user_migrate_test.go similarity index 99% rename from google/resource_sql_user_migrate_test.go rename to google/services/sql/resource_sql_user_migrate_test.go index 6c83552851c..2586be647ee 100644 --- a/google/resource_sql_user_migrate_test.go +++ b/google/services/sql/resource_sql_user_migrate_test.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package sql import ( "testing" diff --git a/google/data_source_google_storage_bucket.go b/google/services/storage/data_source_google_storage_bucket.go similarity index 98% rename from google/data_source_google_storage_bucket.go rename to google/services/storage/data_source_google_storage_bucket.go index 572640f3976..33a46a3555d 100644 --- a/google/data_source_google_storage_bucket.go +++ b/google/services/storage/data_source_google_storage_bucket.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package storage import ( "log" diff --git a/google/data_source_google_storage_bucket_object.go b/google/services/storage/data_source_google_storage_bucket_object.go similarity index 99% rename from google/data_source_google_storage_bucket_object.go rename to google/services/storage/data_source_google_storage_bucket_object.go index db8ad216a35..3858ec91d24 100644 --- a/google/data_source_google_storage_bucket_object.go +++ b/google/services/storage/data_source_google_storage_bucket_object.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package storage import ( "fmt" diff --git a/google/data_source_google_storage_project_service_account.go b/google/services/storage/data_source_google_storage_project_service_account.go similarity index 99% rename from google/data_source_google_storage_project_service_account.go rename to google/services/storage/data_source_google_storage_project_service_account.go index 70df8608da3..eac2ae55f08 100644 --- a/google/data_source_google_storage_project_service_account.go +++ b/google/services/storage/data_source_google_storage_project_service_account.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package storage import ( "fmt" diff --git a/google/data_source_storage_bucket_object_content.go b/google/services/storage/data_source_storage_bucket_object_content.go similarity index 99% rename from google/data_source_storage_bucket_object_content.go rename to google/services/storage/data_source_storage_bucket_object_content.go index ee5403a4806..c562d32f23a 100644 --- a/google/data_source_storage_bucket_object_content.go +++ b/google/services/storage/data_source_storage_bucket_object_content.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package storage import ( "fmt" diff --git a/google/data_source_storage_object_signed_url.go b/google/services/storage/data_source_storage_object_signed_url.go similarity index 99% rename from google/data_source_storage_object_signed_url.go rename to google/services/storage/data_source_storage_object_signed_url.go index 7f6f1f9d78c..b7a3ca87d49 100644 --- a/google/data_source_storage_object_signed_url.go +++ b/google/services/storage/data_source_storage_object_signed_url.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package storage import ( "bytes" diff --git a/google/services/storage/data_source_storage_object_signed_url_internal_test.go b/google/services/storage/data_source_storage_object_signed_url_internal_test.go new file mode 100644 index 00000000000..a8ad47db65e --- /dev/null +++ b/google/services/storage/data_source_storage_object_signed_url_internal_test.go @@ -0,0 +1,96 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package storage + +import ( + "testing" + + "bytes" + "encoding/base64" + "net/url" + + "golang.org/x/oauth2/google" +) + +const fakeCredentials = `{ + "type": "service_account", + "project_id": "gcp-project", + "private_key_id": "29a54056cee3d6886d9e8515a959af538ab5add9", + "private_key": "-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEAsGHDAdHZfi81LgVeeMHXYLgNDpcFYhoBykYtTDdNyA5AixID\n8JdKlCmZ6qLNnZrbs4JlBJfmzw6rjUC5bVBFg5NwYVBu3+3Msa4rgLsTGsjPH9rt\nC+QFnFhcmzg3zz8eeXBqJdhw7wmn1Xa9SsC3h6YWveBk98ecyE7yGe8J8xGphjk7\nEQ/KBmRK/EJD0ZwuYW1W4Bv5f5fca7qvi9rCprEmL8//uy0qCwoJj2jU3zc5p72M\npkSZb1XlYxxTEo/h9WCEvWS9pGhy6fJ0sA2RsBHqU4Y5O7MJEei9yu5fVSZUi05f\n/ggfUID+cFEq0Z/A98whKPEBBJ/STdEaqEEkBwIDAQABAoIBAED6EsvF0dihbXbh\ntXbI+h4AT5cTXYFRUV2B0sgkC3xqe65/2YG1Sl0gojoE9bhcxxjvLWWuy/F1Vw93\nS5gQnTsmgpzm86F8yg6euhn3UMdqOJtknDToMITzLFJmOHEZsJFOL1x3ysrUhMan\nsn4qVrIbJn+WfbumBoToSFnzbHflacOh06ZRbYa2bpSPMfGGFtwqQjRadn5+pync\nlCjaupcg209sM0qEk/BDSzHvWL1VgLMdiKBx574TSwS0o569+7vPNt92Ydi7kARo\nreOzkkF4L3xNhKZnmls2eGH6A8cp1KZXoMLFuO+IwvBMA0O29LsUlKJU4PjBrf+7\nwaslnMECgYEA5bJv0L6DKZQD3RCBLue4/mDg0GHZqAhJBS6IcaXeaWeH6PgGZggV\nMGkWnULltJIYFwtaueTfjWqciAeocKx+rqoRjuDMOGgcrEf6Y+b5AqF+IjQM66Ll\nIYPUt3FCIc69z5LNEtyP4DSWsFPJ5UhAoG4QRlDTqT5q0gKHFjeLdeECgYEAxJRk\nkrsWmdmUs5NH9pyhTdEDIc59EuJ8iOqOLzU8xUw6/s2GSClopEFJeeEoIWhLuPY3\nX3bFt4ppl/ksLh05thRs4wXRxqhnokjD3IcGu3l6Gb5QZTYwb0VfN+q2tWVEE8Qc\nPQURheUsM2aP/gpJVQvNsWVmkT0Ijc3J8bR2hucCgYEAjOF4e0ueHu5NwFTTJvWx\nHTRGLwkU+l66ipcT0MCvPW7miRk2s3XZqSuLV0Ekqi/A3sF0D/g0tQPipfwsb48c\n0/wzcLKoDyCsFW7AQG315IswVcIe+peaeYfl++1XZmzrNlkPtrXY+ObIVbXOavZ5\nzOw0xyvj5jYGRnCOci33N4ECgYA91EKx2ABq0YGw3aEj0u31MMlgZ7b1KqFq2wNv\nm7oKgEiJ/hC/P673AsXefNAHeetfOKn/77aOXQ2LTEb2FiEhwNjiquDpL+ywoVxh\nT2LxsmqSEEbvHpUrWlFxn/Rpp3k7ElKjaqWxTHyTii2+BHQ+OKEwq6kQA3deSpy6\n1jz1fwKBgQDLqbdq5FA63PWqApfNVykXukg9MASIcg/0fjADFaHTPDvJjhFutxRP\nppI5Q95P12CQ/eRBZKJnRlkhkL8tfPaWPzzOpCTjID7avRhx2oLmstmYuXx0HluE\ncqXLbAV9WDpIJ3Bpa/S8tWujWhLDmixn2JeAdurWS+naH9U9e4I6Rw==\n-----END RSA PRIVATE KEY-----\n", + "client_email": "user@gcp-project.iam.gserviceaccount.com", + "client_id": "103198861025845558729", + "auth_uri": "https://accounts.google.com/o/oauth2/auth", + "token_uri": "https://accounts.google.com/o/oauth2/token", + "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", + "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/user%40gcp-project.iam.gserviceaccount.com" +} +` + +// The following values are derived from the output of the `gsutil signurl` command. +// i.e. +// gsutil signurl fake_creds.json gs://tf-test-bucket-6159205297736845881/path/to/file +// URL HTTP Method Expiration Signed URL +// gs://tf-test-bucket-6159205297736845881/path/to/file GET 2016-08-12 14:03:30 https://storage.googleapis.com/tf-test-bucket-6159205297736845881/path/to/file?GoogleAccessId=user@gcp-project.iam.gserviceaccount.com&Expires=1470967410&Signature=JJvE2Jc%2BeoagyS1qRACKBGUkgLkKjw7cGymHhtB4IzzN3nbXDqr0acRWGy0%2BEpZ3HYNDalEYsK0lR9Q0WCgty5I0JKmPIuo9hOYa1xTNH%2B22xiWsekxGV%2FcA9FXgWpi%2BFt7fBmMk4dhDe%2BuuYc7N79hd0FYuSBNW1Wp32Bluoe4SNkNAB%2BuIDd9KqPzqs09UAbBoz2y4WxXOQnRyR8GAfb8B%2FDtv62gYjtmp%2F6%2Fyr6xj7byWKZdQt8kEftQLTQmP%2F17Efjp6p%2BXo71Q0F9IhAFiqWfp3Ij8hHDSebLcVb2ULXyHNNQpHBOhFgALrFW3I6Uc3WciLEOsBS9Ej3EGdTg%3D%3D + +const testUrlPath = "/tf-test-bucket-6159205297736845881/path/to/file" +const testUrlExpires = 1470967410 +const testUrlExpectedSignatureBase64Encoded = "JJvE2Jc%2BeoagyS1qRACKBGUkgLkKjw7cGymHhtB4IzzN3nbXDqr0acRWGy0%2BEpZ3HYNDalEYsK0lR9Q0WCgty5I0JKmPIuo9hOYa1xTNH%2B22xiWsekxGV%2FcA9FXgWpi%2BFt7fBmMk4dhDe%2BuuYc7N79hd0FYuSBNW1Wp32Bluoe4SNkNAB%2BuIDd9KqPzqs09UAbBoz2y4WxXOQnRyR8GAfb8B%2FDtv62gYjtmp%2F6%2Fyr6xj7byWKZdQt8kEftQLTQmP%2F17Efjp6p%2BXo71Q0F9IhAFiqWfp3Ij8hHDSebLcVb2ULXyHNNQpHBOhFgALrFW3I6Uc3WciLEOsBS9Ej3EGdTg%3D%3D" +const testUrlExpectedUrl = "https://storage.googleapis.com/tf-test-bucket-6159205297736845881/path/to/file?GoogleAccessId=user@gcp-project.iam.gserviceaccount.com&Expires=1470967410&Signature=JJvE2Jc%2BeoagyS1qRACKBGUkgLkKjw7cGymHhtB4IzzN3nbXDqr0acRWGy0%2BEpZ3HYNDalEYsK0lR9Q0WCgty5I0JKmPIuo9hOYa1xTNH%2B22xiWsekxGV%2FcA9FXgWpi%2BFt7fBmMk4dhDe%2BuuYc7N79hd0FYuSBNW1Wp32Bluoe4SNkNAB%2BuIDd9KqPzqs09UAbBoz2y4WxXOQnRyR8GAfb8B%2FDtv62gYjtmp%2F6%2Fyr6xj7byWKZdQt8kEftQLTQmP%2F17Efjp6p%2BXo71Q0F9IhAFiqWfp3Ij8hHDSebLcVb2ULXyHNNQpHBOhFgALrFW3I6Uc3WciLEOsBS9Ej3EGdTg%3D%3D" + +func TestUrlData_Signing(t *testing.T) { + urlData := &UrlData{ + HttpMethod: "GET", + Expires: testUrlExpires, + Path: testUrlPath, + } + // unescape and decode the expected signature + expectedSig, err := url.QueryUnescape(testUrlExpectedSignatureBase64Encoded) + if err != nil { + t.Error(err) + } + expected, err := base64.StdEncoding.DecodeString(expectedSig) + if err != nil { + t.Error(err) + } + + // load fake service account credentials + cfg, err := google.JWTConfigFromJSON([]byte(fakeCredentials), "") + if err != nil { + t.Error(err) + } + + // create url data signature + toSign := urlData.SigningString() + result, err := SignString(toSign, cfg) + if err != nil { + t.Error(err) + } + + // compare to expected value + if !bytes.Equal(result, expected) { + t.Errorf("Signatures do not match:\n%x\n%x\n", expected, result) + } + +} + +func TestUrlData_SignedUrl(t *testing.T) { + // load fake service account credentials + cfg, err := google.JWTConfigFromJSON([]byte(fakeCredentials), "") + if err != nil { + t.Error(err) + } + + urlData := &UrlData{ + HttpMethod: "GET", + Expires: testUrlExpires, + Path: testUrlPath, + JwtConfig: cfg, + } + result, err := urlData.SignedUrl() + if err != nil { + t.Errorf("Could not generated signed url: %+v", err) + } + if result != testUrlExpectedUrl { + t.Errorf("URL does not match expected value:\n%s\n%s", testUrlExpectedUrl, result) + } +} diff --git a/google/resource_storage_bucket.go b/google/services/storage/resource_storage_bucket.go similarity index 99% rename from google/resource_storage_bucket.go rename to google/services/storage/resource_storage_bucket.go index 68099bfe8a7..247bc765d80 100644 --- a/google/resource_storage_bucket.go +++ b/google/services/storage/resource_storage_bucket.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package storage import ( "bytes" @@ -489,7 +489,7 @@ func resourceStorageBucketCreate(d *schema.ResourceData, meta interface{}) error // Get the bucket and location bucket := d.Get("name").(string) - if err := checkGCSName(bucket); err != nil { + if err := tpgresource.CheckGCSName(bucket); err != nil { return err } location := d.Get("location").(string) diff --git a/google/services/storage/resource_storage_bucket_acl.go b/google/services/storage/resource_storage_bucket_acl.go new file mode 100644 index 00000000000..7d14651a639 --- /dev/null +++ b/google/services/storage/resource_storage_bucket_acl.go @@ -0,0 +1,403 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package storage + +import ( + "context" + "fmt" + "log" + "strconv" + "strings" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "google.golang.org/api/storage/v1" +) + +func ResourceStorageBucketAcl() *schema.Resource { + return &schema.Resource{ + Create: resourceStorageBucketAclCreate, + Read: resourceStorageBucketAclRead, + Update: resourceStorageBucketAclUpdate, + Delete: resourceStorageBucketAclDelete, + CustomizeDiff: resourceStorageRoleEntityCustomizeDiff, + + Schema: map[string]*schema.Schema{ + "bucket": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the bucket it applies to.`, + }, + + "default_acl": { + Type: schema.TypeString, + Optional: true, + Description: `Configure this ACL to be the default ACL.`, + }, + + "predefined_acl": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"role_entity"}, + Description: `The canned GCS ACL to apply. Must be set if role_entity is not.`, + }, + + "role_entity": { + Type: schema.TypeList, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + ConflictsWith: []string{"predefined_acl"}, + Description: `List of role/entity pairs in the form ROLE:entity. See GCS Bucket ACL documentation for more details. Must be set if predefined_acl is not.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceStorageRoleEntityCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, meta interface{}) error { + keys := diff.GetChangedKeysPrefix("role_entity") + if len(keys) < 1 { + return nil + } + count := diff.Get("role_entity.#").(int) + if count < 1 { + return nil + } + state := map[string]struct{}{} + conf := map[string]struct{}{} + for i := 0; i < count; i++ { + old, new := diff.GetChange(fmt.Sprintf("role_entity.%d", i)) + state[old.(string)] = struct{}{} + conf[new.(string)] = struct{}{} + } + if len(state) != len(conf) { + return nil + } + for k := range state { + if _, ok := conf[k]; !ok { + // project-owners- is explicitly stripped from the roles that this + // resource will delete + if strings.Contains(k, "OWNER:project-owners-") { + continue + } + return nil + } + } + return diff.Clear("role_entity") +} + +type RoleEntity struct { + Role string + Entity string +} + +func getBucketAclId(bucket string) string { + return bucket + "-acl" +} + +func GetRoleEntityPair(role_entity string) (*RoleEntity, error) { + split := strings.Split(role_entity, ":") + if len(split) != 2 { + return nil, fmt.Errorf("Error, each role entity pair must be " + + "formatted as ROLE:entity") + } + + return &RoleEntity{Role: split[0], Entity: split[1]}, nil +} + +func resourceStorageBucketAclCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + bucket := d.Get("bucket").(string) + predefined_acl := "" + default_acl := "" + role_entity := make([]interface{}, 0) + + if v, ok := d.GetOk("predefined_acl"); ok { + predefined_acl = v.(string) + } + + if v, ok := d.GetOk("role_entity"); ok { + role_entity = v.([]interface{}) + } + + if v, ok := d.GetOk("default_acl"); ok { + default_acl = v.(string) + } + + lockName, err := tpgresource.ReplaceVars(d, config, "storage/buckets/{{bucket}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + if len(predefined_acl) > 0 { + res, err := config.NewStorageClient(userAgent).Buckets.Get(bucket).Do() + + if err != nil { + return fmt.Errorf("Error reading bucket %s: %v", bucket, err) + } + + _, err = config.NewStorageClient(userAgent).Buckets.Update(bucket, + res).PredefinedAcl(predefined_acl).Do() + + if err != nil { + return fmt.Errorf("Error updating bucket %s: %v", bucket, err) + } + + } + + if len(role_entity) > 0 { + current, err := config.NewStorageClient(userAgent).BucketAccessControls.List(bucket).Do() + if err != nil { + return fmt.Errorf("Error retrieving current ACLs: %s", err) + } + for _, v := range role_entity { + pair, err := GetRoleEntityPair(v.(string)) + if err != nil { + return err + } + var alreadyInserted bool + for _, cur := range current.Items { + if cur.Entity == pair.Entity && cur.Role == pair.Role { + alreadyInserted = true + break + } + } + if alreadyInserted { + log.Printf("[DEBUG]: pair %s-%s already exists, not trying to insert again\n", pair.Role, pair.Entity) + continue + } + bucketAccessControl := &storage.BucketAccessControl{ + Role: pair.Role, + Entity: pair.Entity, + } + + log.Printf("[DEBUG]: storing re %s-%s", pair.Role, pair.Entity) + + _, err = config.NewStorageClient(userAgent).BucketAccessControls.Insert(bucket, bucketAccessControl).Do() + + if err != nil { + return fmt.Errorf("Error updating ACL for bucket %s: %v", bucket, err) + } + } + + } + + if len(default_acl) > 0 { + res, err := config.NewStorageClient(userAgent).Buckets.Get(bucket).Do() + + if err != nil { + return fmt.Errorf("Error reading bucket %s: %v", bucket, err) + } + + _, err = config.NewStorageClient(userAgent).Buckets.Update(bucket, + res).PredefinedDefaultObjectAcl(default_acl).Do() + + if err != nil { + return fmt.Errorf("Error updating bucket %s: %v", bucket, err) + } + + } + + d.SetId(getBucketAclId(bucket)) + return resourceStorageBucketAclRead(d, meta) +} + +func resourceStorageBucketAclRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + bucket := d.Get("bucket").(string) + + // The API offers no way to retrieve predefined ACLs, + // and we can't tell which access controls were created + // by the predefined roles, so... + // + // This is, needless to say, a bad state of affairs and + // should be fixed. + if _, ok := d.GetOk("role_entity"); ok { + res, err := config.NewStorageClient(userAgent).BucketAccessControls.List(bucket).Do() + + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Storage Bucket ACL for bucket %q", d.Get("bucket").(string))) + } + entities := make([]string, 0, len(res.Items)) + for _, item := range res.Items { + entities = append(entities, item.Role+":"+item.Entity) + } + + if err := d.Set("role_entity", entities); err != nil { + return fmt.Errorf("Error setting role_entity: %s", err) + } + } else { + // if we don't set `role_entity` to nil (effectively setting it + // to empty in Terraform state), because it's computed now, + // Terraform will think it's missing from state, is supposed + // to be there, and throw up a diff for role_entity.#. So it + // must always be set in state. + if err := d.Set("role_entity", nil); err != nil { + return fmt.Errorf("Error setting role_entity: %s", err) + } + } + + return nil +} + +func resourceStorageBucketAclUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + bucket := d.Get("bucket").(string) + + lockName, err := tpgresource.ReplaceVars(d, config, "storage/buckets/{{bucket}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + if d.HasChange("role_entity") { + bkt, err := config.NewStorageClient(userAgent).Buckets.Get(bucket).Do() + if err != nil { + return fmt.Errorf("Error reading bucket %q: %v", bucket, err) + } + + project := strconv.FormatUint(bkt.ProjectNumber, 10) + o, n := d.GetChange("role_entity") + old_re, new_re := o.([]interface{}), n.([]interface{}) + + old_re_map := make(map[string]string) + for _, v := range old_re { + res, err := GetRoleEntityPair(v.(string)) + + if err != nil { + return fmt.Errorf( + "Old state has malformed Role/Entity pair: %v", err) + } + + old_re_map[res.Entity] = res.Role + } + + for _, v := range new_re { + pair, err := GetRoleEntityPair(v.(string)) + + bucketAccessControl := &storage.BucketAccessControl{ + Role: pair.Role, + Entity: pair.Entity, + } + + // If the old state entity's role doesn't match the new one, it needs to be inserted + if old_re_map[pair.Entity] != bucketAccessControl.Role { + _, err = config.NewStorageClient(userAgent).BucketAccessControls.Insert( + bucket, bucketAccessControl).Do() + } + + // Now we only store the keys that have to be removed + delete(old_re_map, pair.Entity) + + if err != nil { + return fmt.Errorf("Error updating ACL for bucket %s: %v", bucket, err) + } + } + + for entity, role := range old_re_map { + if entity == fmt.Sprintf("project-owners-%s", project) && role == "OWNER" { + log.Printf("[WARN]: Skipping %s-%s; not deleting owner ACL.", role, entity) + continue + } + log.Printf("[DEBUG]: removing entity %s", entity) + err := config.NewStorageClient(userAgent).BucketAccessControls.Delete(bucket, entity).Do() + + if err != nil { + return fmt.Errorf("Error updating ACL for bucket %s: %v", bucket, err) + } + } + + return resourceStorageBucketAclRead(d, meta) + } + + if d.HasChange("default_acl") { + default_acl := d.Get("default_acl").(string) + + res, err := config.NewStorageClient(userAgent).Buckets.Get(bucket).Do() + + if err != nil { + return fmt.Errorf("Error reading bucket %s: %v", bucket, err) + } + + _, err = config.NewStorageClient(userAgent).Buckets.Update(bucket, + res).PredefinedDefaultObjectAcl(default_acl).Do() + + if err != nil { + return fmt.Errorf("Error updating bucket %s: %v", bucket, err) + } + + return resourceStorageBucketAclRead(d, meta) + } + + return nil +} + +func resourceStorageBucketAclDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + bucket := d.Get("bucket").(string) + + lockName, err := tpgresource.ReplaceVars(d, config, "storage/buckets/{{bucket}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + bkt, err := config.NewStorageClient(userAgent).Buckets.Get(bucket).Do() + if err != nil { + return fmt.Errorf("Error retrieving bucket %q: %v", bucket, err) + } + project := strconv.FormatUint(bkt.ProjectNumber, 10) + + re_local := d.Get("role_entity").([]interface{}) + for _, v := range re_local { + res, err := GetRoleEntityPair(v.(string)) + if err != nil { + return err + } + + if res.Entity == fmt.Sprintf("project-owners-%s", project) && res.Role == "OWNER" { + log.Printf("[WARN]: Skipping %s-%s; not deleting owner ACL.", res.Role, res.Entity) + continue + } + + log.Printf("[DEBUG]: removing entity %s", res.Entity) + + err = config.NewStorageClient(userAgent).BucketAccessControls.Delete(bucket, res.Entity).Do() + + if err != nil { + return fmt.Errorf("Error deleting entity %s ACL: %s", res.Entity, err) + } + } + + return nil +} diff --git a/google/services/storage/resource_storage_bucket_internal_test.go b/google/services/storage/resource_storage_bucket_internal_test.go new file mode 100644 index 00000000000..607b9c67375 --- /dev/null +++ b/google/services/storage/resource_storage_bucket_internal_test.go @@ -0,0 +1,86 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package storage + +import ( + "testing" +) + +func TestLabelDiffSuppress(t *testing.T) { + cases := map[string]struct { + K, Old, New string + ExpectDiffSuppress bool + }{ + "missing goog-dataplex-asset-id": { + K: "labels.goog-dataplex-asset-id", + Old: "test-bucket", + New: "", + ExpectDiffSuppress: true, + }, + "explicit goog-dataplex-asset-id": { + K: "labels.goog-dataplex-asset-id", + Old: "test-bucket", + New: "test-bucket-1", + ExpectDiffSuppress: false, + }, + "missing goog-dataplex-lake-id": { + K: "labels.goog-dataplex-lake-id", + Old: "test-lake", + New: "", + ExpectDiffSuppress: true, + }, + "explicit goog-dataplex-lake-id": { + K: "labels.goog-dataplex-lake-id", + Old: "test-lake", + New: "test-lake-1", + ExpectDiffSuppress: false, + }, + "missing goog-dataplex-project-id": { + K: "labels.goog-dataplex-project-id", + Old: "test-project-12345", + New: "", + ExpectDiffSuppress: true, + }, + "explicit goog-dataplex-project-id": { + K: "labels.goog-dataplex-project-id", + Old: "test-project-12345", + New: "test-project-12345-1", + ExpectDiffSuppress: false, + }, + "missing goog-dataplex-zone-id": { + K: "labels.goog-dataplex-zone-id", + Old: "test-zone1", + New: "", + ExpectDiffSuppress: true, + }, + "explicit goog-dataplex-zone-id": { + K: "labels.goog-dataplex-zone-id", + Old: "test-zone1", + New: "test-zone1-1", + ExpectDiffSuppress: false, + }, + "labels.%": { + K: "labels.%", + Old: "5", + New: "1", + ExpectDiffSuppress: true, + }, + "deleted custom key": { + K: "labels.my-label", + Old: "my-value", + New: "", + ExpectDiffSuppress: false, + }, + "added custom key": { + K: "labels.my-label", + Old: "", + New: "my-value", + ExpectDiffSuppress: false, + }, + } + for tn, tc := range cases { + if resourceDataplexLabelDiffSuppress(tc.K, tc.Old, tc.New, nil) != tc.ExpectDiffSuppress { + t.Errorf("bad: %s, %q: %q => %q expect DiffSuppress to return %t", tn, tc.K, tc.Old, tc.New, tc.ExpectDiffSuppress) + } + } +} diff --git a/google/resource_storage_bucket_object.go b/google/services/storage/resource_storage_bucket_object.go similarity index 99% rename from google/resource_storage_bucket_object.go rename to google/services/storage/resource_storage_bucket_object.go index 06d1ef738fd..8c815b79e12 100644 --- a/google/resource_storage_bucket_object.go +++ b/google/services/storage/resource_storage_bucket_object.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package storage import ( "bytes" diff --git a/google/resource_storage_default_object_acl.go b/google/services/storage/resource_storage_default_object_acl.go similarity index 99% rename from google/resource_storage_default_object_acl.go rename to google/services/storage/resource_storage_default_object_acl.go index fb5bfc5ec69..b47237427ae 100644 --- a/google/resource_storage_default_object_acl.go +++ b/google/services/storage/resource_storage_default_object_acl.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package storage import ( "fmt" diff --git a/google/services/storage/resource_storage_notification.go b/google/services/storage/resource_storage_notification.go new file mode 100644 index 00000000000..93c869956cd --- /dev/null +++ b/google/services/storage/resource_storage_notification.go @@ -0,0 +1,198 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package storage + +import ( + "fmt" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/services/pubsub" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "google.golang.org/api/storage/v1" +) + +func ResourceStorageNotification() *schema.Resource { + return &schema.Resource{ + Create: resourceStorageNotificationCreate, + Read: resourceStorageNotificationRead, + Delete: resourceStorageNotificationDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "bucket": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the bucket.`, + }, + + "payload_format": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{"JSON_API_V1", "NONE"}, false), + Description: `The desired content of the Payload. One of "JSON_API_V1" or "NONE".`, + }, + + "topic": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The Cloud Pub/Sub topic to which this subscription publishes. Expects either the topic name, assumed to belong to the default GCP provider project, or the project-level name, i.e. projects/my-gcp-project/topics/my-topic or my-topic. If the project is not set in the provider, you will need to use the project-level name.`, + }, + + "custom_attributes": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: ` A set of key/value attribute pairs to attach to each Cloud Pub/Sub message published for this notification subscription`, + }, + + "event_types": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{ + "OBJECT_FINALIZE", "OBJECT_METADATA_UPDATE", "OBJECT_DELETE", "OBJECT_ARCHIVE"}, + false), + }, + Description: `List of event type filters for this notification config. If not specified, Cloud Storage will send notifications for all event types. The valid types are: "OBJECT_FINALIZE", "OBJECT_METADATA_UPDATE", "OBJECT_DELETE", "OBJECT_ARCHIVE"`, + }, + + "object_name_prefix": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Specifies a prefix path filter for this notification config. Cloud Storage will only send notifications for objects in this bucket whose names begin with the specified prefix.`, + }, + + "notification_id": { + Type: schema.TypeString, + Computed: true, + Description: `The ID of the created notification.`, + }, + + "self_link": { + Type: schema.TypeString, + Computed: true, + Description: `The URI of the created resource.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceStorageNotificationCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + bucket := d.Get("bucket").(string) + + topicName := d.Get("topic").(string) + computedTopicName := pubsub.GetComputedTopicName("", topicName) + if computedTopicName != topicName { + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + computedTopicName = pubsub.GetComputedTopicName(project, topicName) + } + + storageNotification := &storage.Notification{ + CustomAttributes: tpgresource.ExpandStringMap(d, "custom_attributes"), + EventTypes: tpgresource.ConvertStringSet(d.Get("event_types").(*schema.Set)), + ObjectNamePrefix: d.Get("object_name_prefix").(string), + PayloadFormat: d.Get("payload_format").(string), + Topic: computedTopicName, + } + + res, err := config.NewStorageClient(userAgent).Notifications.Insert(bucket, storageNotification).Do() + if err != nil { + return fmt.Errorf("Error creating notification config for bucket %s: %v", bucket, err) + } + + d.SetId(fmt.Sprintf("%s/notificationConfigs/%s", bucket, res.Id)) + + return resourceStorageNotificationRead(d, meta) +} + +func resourceStorageNotificationRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + bucket, notificationID := ResourceStorageNotificationParseID(d.Id()) + + res, err := config.NewStorageClient(userAgent).Notifications.Get(bucket, notificationID).Do() + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Notification configuration %s for bucket %s", notificationID, bucket)) + } + + if err := d.Set("bucket", bucket); err != nil { + return fmt.Errorf("Error setting bucket: %s", err) + } + if err := d.Set("payload_format", res.PayloadFormat); err != nil { + return fmt.Errorf("Error setting payload_format: %s", err) + } + if err := d.Set("topic", res.Topic); err != nil { + return fmt.Errorf("Error setting topic: %s", err) + } + if err := d.Set("object_name_prefix", res.ObjectNamePrefix); err != nil { + return fmt.Errorf("Error setting object_name_prefix: %s", err) + } + if err := d.Set("event_types", res.EventTypes); err != nil { + return fmt.Errorf("Error setting event_types: %s", err) + } + if err := d.Set("notification_id", notificationID); err != nil { + return fmt.Errorf("Error setting notification_id: %s", err) + } + if err := d.Set("self_link", res.SelfLink); err != nil { + return fmt.Errorf("Error setting self_link: %s", err) + } + if err := d.Set("custom_attributes", res.CustomAttributes); err != nil { + return fmt.Errorf("Error setting custom_attributes: %s", err) + } + + return nil +} + +func resourceStorageNotificationDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + bucket, notificationID := ResourceStorageNotificationParseID(d.Id()) + + err = config.NewStorageClient(userAgent).Notifications.Delete(bucket, notificationID).Do() + if err != nil { + return fmt.Errorf("Error deleting notification configuration %s for bucket %s: %v", notificationID, bucket, err) + } + + return nil +} + +func ResourceStorageNotificationParseID(id string) (string, string) { + //bucket, NotificationID + parts := strings.Split(id, "/") + + return parts[0], parts[2] +} diff --git a/google/resource_storage_object_acl.go b/google/services/storage/resource_storage_object_acl.go similarity index 99% rename from google/resource_storage_object_acl.go rename to google/services/storage/resource_storage_object_acl.go index 57d301eadcd..d0a8a543f26 100644 --- a/google/resource_storage_object_acl.go +++ b/google/services/storage/resource_storage_object_acl.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package storage import ( "context" diff --git a/google/data_source_google_storage_transfer_project_service_account.go b/google/services/storagetransfer/data_source_google_storage_transfer_project_service_account.go similarity index 98% rename from google/data_source_google_storage_transfer_project_service_account.go rename to google/services/storagetransfer/data_source_google_storage_transfer_project_service_account.go index 9b8127c5a78..2961665fb26 100644 --- a/google/data_source_google_storage_transfer_project_service_account.go +++ b/google/services/storagetransfer/data_source_google_storage_transfer_project_service_account.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package storagetransfer import ( "fmt" diff --git a/google/resource_storage_transfer_job.go b/google/services/storagetransfer/resource_storage_transfer_job.go similarity index 99% rename from google/resource_storage_transfer_job.go rename to google/services/storagetransfer/resource_storage_transfer_job.go index cfc99647907..088f73be8b7 100644 --- a/google/resource_storage_transfer_job.go +++ b/google/services/storagetransfer/resource_storage_transfer_job.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package storagetransfer import ( "fmt" diff --git a/google/data_source_tags_tag_key.go b/google/services/tags/data_source_tags_tag_key.go similarity index 99% rename from google/data_source_tags_tag_key.go rename to google/services/tags/data_source_tags_tag_key.go index 7bad34621a2..61498e2193e 100644 --- a/google/data_source_tags_tag_key.go +++ b/google/services/tags/data_source_tags_tag_key.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package tags import ( "errors" diff --git a/google/data_source_tags_tag_value.go b/google/services/tags/data_source_tags_tag_value.go similarity index 99% rename from google/data_source_tags_tag_value.go rename to google/services/tags/data_source_tags_tag_value.go index 2bbe86d75b4..134ffa4bc16 100644 --- a/google/data_source_tags_tag_value.go +++ b/google/services/tags/data_source_tags_tag_value.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package tags import ( "errors" diff --git a/google/resource_tags_location_tag_bindings.go b/google/services/tags/resource_tags_location_tag_bindings.go similarity index 99% rename from google/resource_tags_location_tag_bindings.go rename to google/services/tags/resource_tags_location_tag_bindings.go index a2bdfb1224d..88ab9ae1f96 100644 --- a/google/resource_tags_location_tag_bindings.go +++ b/google/services/tags/resource_tags_location_tag_bindings.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package tags import ( "fmt" diff --git a/google/data_source_tpu_tensorflow_versions.go b/google/services/tpu/data_source_tpu_tensorflow_versions.go similarity index 99% rename from google/data_source_tpu_tensorflow_versions.go rename to google/services/tpu/data_source_tpu_tensorflow_versions.go index d2d98dcfaa0..25a23b064b9 100644 --- a/google/data_source_tpu_tensorflow_versions.go +++ b/google/services/tpu/data_source_tpu_tensorflow_versions.go @@ -1,6 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package google +package tpu import ( "fmt"