From b2a6ce3ba9ea27d491ad809aa0ea3a1f0530878d Mon Sep 17 00:00:00 2001 From: Josh Soref Date: Thu, 2 Jan 2020 17:06:49 -0500 Subject: [PATCH 01/19] Use more markdown for Bug --- .github/ISSUE_TEMPLATE/bug.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug.md b/.github/ISSUE_TEMPLATE/bug.md index d7f4812647c..d108137d5d2 100644 --- a/.github/ISSUE_TEMPLATE/bug.md +++ b/.github/ISSUE_TEMPLATE/bug.md @@ -10,9 +10,9 @@ about: For when something is there, but doesn't work how it should. ### Community Note * Please vote on this issue by adding a 👍 [reaction](https://blog.github.com/2016-03-10-add-reactions-to-pull-requests-issues-and-comments/) to the original issue to help the community and maintainers prioritize this request -* Please do not leave "+1" or "me too" comments, they generate extra noise for issue followers and do not help prioritize the request +* Please do not leave _+1_ or _me too_ comments, they generate extra noise for issue followers and do not help prioritize the request * If you are interested in working on this issue or have submitted a pull request, please leave a comment -* If an issue is assigned to the "modular-magician" user, it is either in the process of being autogenerated, or is planned to be autogenerated soon. If an issue is assigned to a user, that user is claiming responsibility for the issue. If an issue is assigned to "hashibot", a community member has claimed the issue already. +* If an issue is assigned to the `modular-magician` user, it is either in the process of being autogenerated, or is planned to be autogenerated soon. If an issue is assigned to a user, that user is claiming responsibility for the issue. If an issue is assigned to `hashibot`, a community member has claimed the issue already. From 440f5948c2eda42469a0e88f284e62ad8920d7a4 Mon Sep 17 00:00:00 2001 From: Josh Soref Date: Thu, 2 Jan 2020 17:08:07 -0500 Subject: [PATCH 02/19] Consistently use sentences for each bullet --- .github/ISSUE_TEMPLATE/bug.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug.md b/.github/ISSUE_TEMPLATE/bug.md index d108137d5d2..6b912991f93 100644 --- a/.github/ISSUE_TEMPLATE/bug.md +++ b/.github/ISSUE_TEMPLATE/bug.md @@ -9,9 +9,9 @@ about: For when something is there, but doesn't work how it should. ### Community Note -* Please vote on this issue by adding a 👍 [reaction](https://blog.github.com/2016-03-10-add-reactions-to-pull-requests-issues-and-comments/) to the original issue to help the community and maintainers prioritize this request -* Please do not leave _+1_ or _me too_ comments, they generate extra noise for issue followers and do not help prioritize the request -* If you are interested in working on this issue or have submitted a pull request, please leave a comment +* Please vote on this issue by adding a 👍 [reaction](https://blog.github.com/2016-03-10-add-reactions-to-pull-requests-issues-and-comments/) to the original issue to help the community and maintainers prioritize this request. +* Please do not leave _+1_ or _me too_ comments, they generate extra noise for issue followers and do not help prioritize the request. +* If you are interested in working on this issue or have submitted a pull request, please leave a comment. * If an issue is assigned to the `modular-magician` user, it is either in the process of being autogenerated, or is planned to be autogenerated soon. If an issue is assigned to a user, that user is claiming responsibility for the issue. If an issue is assigned to `hashibot`, a community member has claimed the issue already. From 045b52d7e1e58fa11e51225d99172b1cd33280ff Mon Sep 17 00:00:00 2001 From: Josh Soref Date: Thu, 2 Jan 2020 17:10:05 -0500 Subject: [PATCH 03/19] Rewrite bug reproduction block --- .github/ISSUE_TEMPLATE/bug.md | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug.md b/.github/ISSUE_TEMPLATE/bug.md index 6b912991f93..be64d3fd830 100644 --- a/.github/ISSUE_TEMPLATE/bug.md +++ b/.github/ISSUE_TEMPLATE/bug.md @@ -31,12 +31,16 @@ about: For when something is there, but doesn't work how it should. ```tf -# Copy-paste your Terraform configurations here - for large Terraform configs, -# please use a service like Dropbox and share a link to the ZIP file. For -# security, you can also encrypt the files using our GPG public key: https://www.hashicorp.com/security +# Copy-paste your Terraform configurations here. +# +# For large Terraform configs, please use a service like Dropbox and share a link to the ZIP file. +# For security, you can also encrypt the files using our GPG public key: +# https://www.hashicorp.com/security +# # If reproducing the bug involves modifying the config file (e.g., apply a config, -# change a value, apply the config again, see the bug) then please include both the -# version of the config before the change, and the version of the config after the change. +# change a value, apply the config again, see the bug), then please include both: +# * the version of the config before the change, and +# * the version of the config after the change. ``` ### Debug Output From 41d06c25b6acdab9e4dc74ca4913133721396ea1 Mon Sep 17 00:00:00 2001 From: Chris Stephens Date: Mon, 6 Jan 2020 23:08:11 +0000 Subject: [PATCH 04/19] Allow domain mapping to succeed if DNS is pending Signed-off-by: Modular Magician --- google/cloudrun_polling.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/google/cloudrun_polling.go b/google/cloudrun_polling.go index 4750183d410..04fb129df5b 100644 --- a/google/cloudrun_polling.go +++ b/google/cloudrun_polling.go @@ -59,6 +59,11 @@ func (s KnativeStatus) LatestMessage() string { func (s KnativeStatus) State(res interface{}) string { for _, condition := range s.Status.Conditions { if condition.Type == "Ready" { + // DomainMapping can enter a 'terminal' state of waiting for external verification + // of DNS records. + if condition.Reason == "CertificatePending" { + return "Ready:CertificatePending" + } return fmt.Sprintf("%s:%s", condition.Type, condition.Status) } } @@ -76,7 +81,7 @@ func (p *CloudRunPolling) PendingStates() []string { return []string{"Ready:Unknown", "Empty"} } func (p *CloudRunPolling) TargetStates() []string { - return []string{"Ready:True"} + return []string{"Ready:True", "Ready:CertificatePending"} } func (p *CloudRunPolling) ErrorStates() []string { return []string{"Ready:False"} From 0bf2ad17933c5b8b610b1b26644d89c605d9296c Mon Sep 17 00:00:00 2001 From: Petar Marinkovic <13387474+marinkovicpetar@users.noreply.github.com> Date: Tue, 7 Jan 2020 00:54:46 +0100 Subject: [PATCH 05/19] Updated google_folder.html (#4149) * Updated google_folder.html The page in the first example shows that you should use organization_id with value of 1234567. In the Import example, it's not clear whether organization_id is user, or folder_id is used. API call behind this import command is only accepting folder_id (can be checked when setting TF_LOG to trace and viewing the API call) * Update website/docs/r/google_folder.html.markdown Co-Authored-By: Dana Hoffman Co-authored-by: Dana Hoffman --- website/docs/r/google_folder.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/google_folder.html.markdown b/website/docs/r/google_folder.html.markdown index 6f90117df58..bde78e4d042 100644 --- a/website/docs/r/google_folder.html.markdown +++ b/website/docs/r/google_folder.html.markdown @@ -61,7 +61,7 @@ exported: ## Import -Folders can be imported using the folder autogenerated `name`, e.g. +Folders can be imported using the folder's id, e.g. ``` # Both syntaxes are valid From a0521bd7d4a7d81aa4ab6c77a5809c419aaaf0d5 Mon Sep 17 00:00:00 2001 From: The Magician Date: Mon, 6 Jan 2020 16:05:42 -0800 Subject: [PATCH 06/19] add google_kms_secret_ciphertext resource, deprecate datasource (#5314) Signed-off-by: Modular Magician Co-authored-by: Dana Hoffman --- ...ata_source_google_kms_secret_ciphertext.go | 3 +- ...ource_google_kms_secret_ciphertext_test.go | 82 +-------- google/provider.go | 5 +- google/resource_kms_secret_ciphertext.go | 165 ++++++++++++++++++ google/resource_kms_secret_ciphertext_test.go | 82 +++++++++ ...google_kms_secret_ciphertext.html.markdown | 2 + ...ackend_bucket_signed_url_key.html.markdown | 13 -- ...ckend_service_signed_url_key.html.markdown | 13 -- .../r/kms_secret_ciphertext.html.markdown | 126 +++++++++++++ website/google.erb | 3 + 10 files changed, 388 insertions(+), 106 deletions(-) create mode 100644 google/resource_kms_secret_ciphertext.go create mode 100644 google/resource_kms_secret_ciphertext_test.go create mode 100644 website/docs/r/kms_secret_ciphertext.html.markdown diff --git a/google/data_source_google_kms_secret_ciphertext.go b/google/data_source_google_kms_secret_ciphertext.go index 1a78e450bf5..2be6daca165 100644 --- a/google/data_source_google_kms_secret_ciphertext.go +++ b/google/data_source_google_kms_secret_ciphertext.go @@ -13,7 +13,8 @@ import ( func dataSourceGoogleKmsSecretCiphertext() *schema.Resource { return &schema.Resource{ - Read: dataSourceGoogleKmsSecretCiphertextRead, + DeprecationMessage: "Use the google_kms_secret_ciphertext resource instead.", + Read: dataSourceGoogleKmsSecretCiphertextRead, Schema: map[string]*schema.Schema{ "crypto_key": { Type: schema.TypeString, diff --git a/google/data_source_google_kms_secret_ciphertext_test.go b/google/data_source_google_kms_secret_ciphertext_test.go index 16675d09066..f13409161fa 100644 --- a/google/data_source_google_kms_secret_ciphertext_test.go +++ b/google/data_source_google_kms_secret_ciphertext_test.go @@ -1,113 +1,41 @@ package google import ( - "encoding/base64" "fmt" - "log" "testing" "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/terraform" - "google.golang.org/api/cloudkms/v1" ) -func TestAccKmsSecretCiphertext_basic(t *testing.T) { +func TestAccDataKmsSecretCiphertext_basic(t *testing.T) { t.Parallel() - projectOrg := getTestOrgFromEnv(t) - projectBillingAccount := getTestBillingAccountFromEnv(t) - - projectId := "terraform-" + acctest.RandString(10) - keyRingName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - cryptoKeyName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + kms := BootstrapKMSKey(t) plaintext := fmt.Sprintf("secret-%s", acctest.RandString(10)) - // The first test creates resources needed to encrypt plaintext and produce ciphertext resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, Steps: []resource.TestStep{ { - Config: testGoogleKmsCryptoKey_basic(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName), + Config: testGoogleKmsSecretCiphertext_datasource(kms.CryptoKey.Name, plaintext), Check: func(s *terraform.State) error { - cryptoKeyId, err := getCryptoKeyId(s, "google_kms_crypto_key.crypto_key") + plaintext, err := testAccDecryptSecretDataWithCryptoKey(s, kms.CryptoKey.Name, "data.google_kms_secret_ciphertext.acceptance") if err != nil { return err } - // The second test asserts that the data source created a ciphertext that can be decrypted to the correct plaintext - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testGoogleKmsSecretCiphertext_datasource(cryptoKeyId.terraformId(), plaintext), - Check: func(s *terraform.State) error { - plaintext, err := testAccDecryptSecretDataWithCryptoKey(s, cryptoKeyId, "data.google_kms_secret_ciphertext.acceptance") - - if err != nil { - return err - } - - return resource.TestCheckResourceAttr("data.google_kms_secret_ciphertext.acceptance", "plaintext", plaintext)(s) - }, - }, - }, - }) - - return nil + return resource.TestCheckResourceAttr("data.google_kms_secret_ciphertext.acceptance", "plaintext", plaintext)(s) }, }, }, }) } -func getCryptoKeyId(s *terraform.State, cryptoKeyResourceName string) (*kmsCryptoKeyId, error) { - config := testAccProvider.Meta().(*Config) - rs, ok := s.RootModule().Resources[cryptoKeyResourceName] - if !ok { - return nil, fmt.Errorf("Resource not found: %s", cryptoKeyResourceName) - } - - return parseKmsCryptoKeyId(rs.Primary.Attributes["id"], config) -} - -func testAccDecryptSecretDataWithCryptoKey(s *terraform.State, cryptoKeyId *kmsCryptoKeyId, secretCiphertextResourceName string) (string, error) { - config := testAccProvider.Meta().(*Config) - rs, ok := s.RootModule().Resources[secretCiphertextResourceName] - if !ok { - return "", fmt.Errorf("Resource not found: %s", secretCiphertextResourceName) - } - ciphertext, ok := rs.Primary.Attributes["ciphertext"] - if !ok { - return "", fmt.Errorf("Attribute 'ciphertext' not found in resource '%s'", secretCiphertextResourceName) - } - - kmsDecryptRequest := &cloudkms.DecryptRequest{ - Ciphertext: ciphertext, - } - - decryptResponse, err := config.clientKms.Projects.Locations.KeyRings.CryptoKeys.Decrypt(cryptoKeyId.cryptoKeyId(), kmsDecryptRequest).Do() - - if err != nil { - return "", fmt.Errorf("Error decrypting ciphertext: %s", err) - } - - plaintextBytes, err := base64.StdEncoding.DecodeString(decryptResponse.Plaintext) - - if err != nil { - return "", err - } - - plaintext := string(plaintextBytes) - log.Printf("[INFO] Successfully decrypted ciphertext and got plaintext: %s", plaintext) - - return plaintext, nil -} - func testGoogleKmsSecretCiphertext_datasource(cryptoKeyTerraformId, plaintext string) string { return fmt.Sprintf(` data "google_kms_secret_ciphertext" "acceptance" { diff --git a/google/provider.go b/google/provider.go index 2a2bb55fd96..b95f77c44f2 100644 --- a/google/provider.go +++ b/google/provider.go @@ -477,9 +477,9 @@ func Provider() terraform.ResourceProvider { return provider } -// Generated resources: 95 +// Generated resources: 96 // Generated IAM resources: 45 -// Total generated resources: 140 +// Total generated resources: 141 func ResourceMap() map[string]*schema.Resource { resourceMap, _ := ResourceMapWithErrors() return resourceMap @@ -597,6 +597,7 @@ func ResourceMapWithErrors() (map[string]*schema.Resource, error) { "google_identity_platform_tenant": resourceIdentityPlatformTenant(), "google_kms_key_ring": resourceKMSKeyRing(), "google_kms_crypto_key": resourceKMSCryptoKey(), + "google_kms_secret_ciphertext": resourceKMSSecretCiphertext(), "google_logging_metric": resourceLoggingMetric(), "google_ml_engine_model": resourceMLEngineModel(), "google_monitoring_alert_policy": resourceMonitoringAlertPolicy(), diff --git a/google/resource_kms_secret_ciphertext.go b/google/resource_kms_secret_ciphertext.go new file mode 100644 index 00000000000..712c216fb7e --- /dev/null +++ b/google/resource_kms_secret_ciphertext.go @@ -0,0 +1,165 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "encoding/base64" + "fmt" + "log" + "reflect" + "regexp" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func resourceKMSSecretCiphertext() *schema.Resource { + return &schema.Resource{ + Create: resourceKMSSecretCiphertextCreate, + Read: resourceKMSSecretCiphertextRead, + Delete: resourceKMSSecretCiphertextDelete, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(4 * time.Minute), + Delete: schema.DefaultTimeout(4 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "crypto_key": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The full name of the CryptoKey that will be used to encrypt the provided plaintext. +Format: ''projects/{{project}}/locations/{{location}}/keyRings/{{keyRing}}/cryptoKeys/{{cryptoKey}}''`, + }, + "plaintext": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The plaintext to be encrypted.`, + Sensitive: true, + }, + "ciphertext": { + Type: schema.TypeString, + Computed: true, + Description: `Contains the result of encrypting the provided plaintext, encoded in base64.`, + }, + }, + } +} + +func resourceKMSSecretCiphertextCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + obj := make(map[string]interface{}) + plaintextProp, err := expandKMSSecretCiphertextPlaintext(d.Get("plaintext"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("plaintext"); !isEmptyValue(reflect.ValueOf(plaintextProp)) && (ok || !reflect.DeepEqual(v, plaintextProp)) { + obj["plaintext"] = plaintextProp + } + + url, err := replaceVars(d, config, "{{KMSBasePath}}{{crypto_key}}:encrypt") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new SecretCiphertext: %#v", obj) + var project string + if parts := regexp.MustCompile(`projects\/([^\/]+)\/`).FindStringSubmatch(url); parts != nil { + project = parts[1] + } + res, err := sendRequestWithTimeout(config, "POST", project, url, obj, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf("Error creating SecretCiphertext: %s", err) + } + + // Store the ID now + id, err := replaceVars(d, config, "{{crypto_key}}/{{ciphertext}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating SecretCiphertext %q: %#v", d.Id(), res) + + // we don't set anything on read and instead do it all in create + ciphertext, ok := res["ciphertext"] + if !ok { + return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") + } + d.Set("ciphertext", ciphertext.(string)) + + id, err = replaceVars(d, config, "{{crypto_key}}/{{ciphertext}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return resourceKMSSecretCiphertextRead(d, meta) +} + +func resourceKMSSecretCiphertextRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + url, err := replaceVars(d, config, "{{KMSBasePath}}{{crypto_key}}") + if err != nil { + return err + } + + var project string + if parts := regexp.MustCompile(`projects\/([^\/]+)\/`).FindStringSubmatch(url); parts != nil { + project = parts[1] + } + res, err := sendRequest(config, "GET", project, url, nil) + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("KMSSecretCiphertext %q", d.Id())) + } + + res, err = resourceKMSSecretCiphertextDecoder(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Decoding the object has resulted in it being gone. It may be marked deleted + log.Printf("[DEBUG] Removing KMSSecretCiphertext because it no longer exists.") + d.SetId("") + return nil + } + + return nil +} + +func resourceKMSSecretCiphertextDelete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[WARNING] KMS SecretCiphertext resources"+ + " cannot be deleted from GCP. The resource %s will be removed from Terraform"+ + " state, but will still be present on the server.", d.Id()) + d.SetId("") + + return nil +} + +func expandKMSSecretCiphertextPlaintext(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + if v == nil { + return nil, nil + } + + return base64.StdEncoding.EncodeToString([]byte(v.(string))), nil +} + +func resourceKMSSecretCiphertextDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + return res, nil +} diff --git a/google/resource_kms_secret_ciphertext_test.go b/google/resource_kms_secret_ciphertext_test.go new file mode 100644 index 00000000000..03400ca461f --- /dev/null +++ b/google/resource_kms_secret_ciphertext_test.go @@ -0,0 +1,82 @@ +package google + +import ( + "encoding/base64" + "fmt" + "log" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/terraform" + "google.golang.org/api/cloudkms/v1" +) + +func TestAccKmsSecretCiphertext_basic(t *testing.T) { + t.Parallel() + + kms := BootstrapKMSKey(t) + + plaintext := fmt.Sprintf("secret-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testGoogleKmsSecretCiphertext(kms.CryptoKey.Name, plaintext), + Check: func(s *terraform.State) error { + plaintext, err := testAccDecryptSecretDataWithCryptoKey(s, kms.CryptoKey.Name, "google_kms_secret_ciphertext.acceptance") + + if err != nil { + return err + } + + return resource.TestCheckResourceAttr("google_kms_secret_ciphertext.acceptance", "plaintext", plaintext)(s) + }, + }, + }, + }) +} + +func testAccDecryptSecretDataWithCryptoKey(s *terraform.State, cryptoKeyId string, secretCiphertextResourceName string) (string, error) { + config := testAccProvider.Meta().(*Config) + rs, ok := s.RootModule().Resources[secretCiphertextResourceName] + if !ok { + return "", fmt.Errorf("Resource not found: %s", secretCiphertextResourceName) + } + ciphertext, ok := rs.Primary.Attributes["ciphertext"] + if !ok { + return "", fmt.Errorf("Attribute 'ciphertext' not found in resource '%s'", secretCiphertextResourceName) + } + + kmsDecryptRequest := &cloudkms.DecryptRequest{ + Ciphertext: ciphertext, + } + + decryptResponse, err := config.clientKms.Projects.Locations.KeyRings.CryptoKeys.Decrypt(cryptoKeyId, kmsDecryptRequest).Do() + + if err != nil { + return "", fmt.Errorf("Error decrypting ciphertext: %s", err) + } + + plaintextBytes, err := base64.StdEncoding.DecodeString(decryptResponse.Plaintext) + + if err != nil { + return "", err + } + + plaintext := string(plaintextBytes) + log.Printf("[INFO] Successfully decrypted ciphertext and got plaintext: %s", plaintext) + + return plaintext, nil +} + +func testGoogleKmsSecretCiphertext(cryptoKeyTerraformId, plaintext string) string { + return fmt.Sprintf(` +resource "google_kms_secret_ciphertext" "acceptance" { + crypto_key = "%s" + plaintext = "%s" +} +`, cryptoKeyTerraformId, plaintext) +} diff --git a/website/docs/d/google_kms_secret_ciphertext.html.markdown b/website/docs/d/google_kms_secret_ciphertext.html.markdown index cbfc407d53e..9e06d5c1057 100644 --- a/website/docs/d/google_kms_secret_ciphertext.html.markdown +++ b/website/docs/d/google_kms_secret_ciphertext.html.markdown @@ -9,6 +9,8 @@ description: |- # google\_kms\_secret\_ciphertext +!> **Warning:** This data source is deprecated. Use the [`google_kms_secret_ciphertext`](../../r/kms_secret_ciphertext.html) **resource** instead. + This data source allows you to encrypt data with Google Cloud KMS and use the ciphertext within your resource definitions. diff --git a/website/docs/r/compute_backend_bucket_signed_url_key.html.markdown b/website/docs/r/compute_backend_bucket_signed_url_key.html.markdown index 54a5ae20dd7..0ce1c4957e5 100644 --- a/website/docs/r/compute_backend_bucket_signed_url_key.html.markdown +++ b/website/docs/r/compute_backend_bucket_signed_url_key.html.markdown @@ -94,19 +94,6 @@ This resource provides the following - `create` - Default is 4 minutes. - `delete` - Default is 4 minutes. -## Import - -BackendBucketSignedUrlKey can be imported using any of these accepted formats: - -``` -$ terraform import google_compute_backend_bucket_signed_url_key.default projects/{{project}}/global/backendBuckets/{{backend_bucket}}/{{name}} -$ terraform import google_compute_backend_bucket_signed_url_key.default {{project}}/{{backend_bucket}}/{{name}} -$ terraform import google_compute_backend_bucket_signed_url_key.default {{backend_bucket}}/{{name}} -``` - --> If you're importing a resource with beta features, make sure to include `-provider=google-beta` -as an argument so that Terraform uses the correct provider to import your resource. - ## User Project Overrides This resource supports [User Project Overrides](https://www.terraform.io/docs/providers/google/guides/provider_reference.html#user_project_override). diff --git a/website/docs/r/compute_backend_service_signed_url_key.html.markdown b/website/docs/r/compute_backend_service_signed_url_key.html.markdown index d4946a1c70b..afd2809c04d 100644 --- a/website/docs/r/compute_backend_service_signed_url_key.html.markdown +++ b/website/docs/r/compute_backend_service_signed_url_key.html.markdown @@ -132,19 +132,6 @@ This resource provides the following - `create` - Default is 4 minutes. - `delete` - Default is 4 minutes. -## Import - -BackendServiceSignedUrlKey can be imported using any of these accepted formats: - -``` -$ terraform import google_compute_backend_service_signed_url_key.default projects/{{project}}/global/backendServices/{{backend_service}}/{{name}} -$ terraform import google_compute_backend_service_signed_url_key.default {{project}}/{{backend_service}}/{{name}} -$ terraform import google_compute_backend_service_signed_url_key.default {{backend_service}}/{{name}} -``` - --> If you're importing a resource with beta features, make sure to include `-provider=google-beta` -as an argument so that Terraform uses the correct provider to import your resource. - ## User Project Overrides This resource supports [User Project Overrides](https://www.terraform.io/docs/providers/google/guides/provider_reference.html#user_project_override). diff --git a/website/docs/r/kms_secret_ciphertext.html.markdown b/website/docs/r/kms_secret_ciphertext.html.markdown new file mode 100644 index 00000000000..4bb0c4365da --- /dev/null +++ b/website/docs/r/kms_secret_ciphertext.html.markdown @@ -0,0 +1,126 @@ +--- +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file in +# .github/CONTRIBUTING.md. +# +# ---------------------------------------------------------------------------- +subcategory: "Cloud KMS" +layout: "google" +page_title: "Google: google_kms_secret_ciphertext" +sidebar_current: "docs-google-kms-secret-ciphertext" +description: |- + Encrypts secret data with Google Cloud KMS and provides access to the ciphertext. +--- + +# google\_kms\_secret\_ciphertext + +Encrypts secret data with Google Cloud KMS and provides access to the ciphertext. + + +~> **NOTE**: Using this resource will allow you to conceal secret data within your +resource definitions, but it does not take care of protecting that data in the +logging output, plan output, or state output. Please take care to secure your secret +data outside of resource definitions. + + +To get more information about SecretCiphertext, see: + +* [API documentation](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys/encrypt) +* How-to Guides + * [Encrypting and decrypting data with a symmetric key](https://cloud.google.com/kms/docs/encrypt-decrypt) + +## Example Usage - Kms Secret Ciphertext Basic + + +```hcl +resource "google_kms_key_ring" "keyring" { + name = "keyring-example" + location = "global" +} + +resource "google_kms_crypto_key" "cryptokey" { + name = "crypto-key-example" + key_ring = google_kms_key_ring.keyring.id + rotation_period = "100000s" + + lifecycle { + prevent_destroy = true + } +} + +resource "google_kms_secret_ciphertext" "my_password" { + crypto_key = google_kms_crypto_key.cryptokey.id + plaintext = "my-secret-password" +} + +resource "google_compute_instance" "instance" { + name = "my-instance" + machine_type = "n1-standard-1" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = "debian-cloud/debian-9" + } + } + + network_interface { + network = "default" + + access_config { + } + } + + metadata = { + password = google_kms_secret_ciphertext.my_password.ciphertext + } +} +``` + +## Argument Reference + +The following arguments are supported: + + +* `plaintext` - + (Required) + The plaintext to be encrypted. + +* `crypto_key` - + (Required) + The full name of the CryptoKey that will be used to encrypt the provided plaintext. + Format: `'projects/{{project}}/locations/{{location}}/keyRings/{{keyRing}}/cryptoKeys/{{cryptoKey}}'` + + +- - - + + + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + + +* `ciphertext` - + Contains the result of encrypting the provided plaintext, encoded in base64. + + +## Timeouts + +This resource provides the following +[Timeouts](/docs/configuration/resources.html#timeouts) configuration options: + +- `create` - Default is 4 minutes. +- `delete` - Default is 4 minutes. + +## User Project Overrides + +This resource supports [User Project Overrides](https://www.terraform.io/docs/providers/google/guides/provider_reference.html#user_project_override). diff --git a/website/google.erb b/website/google.erb index d7c556cb5b4..ed50c8727cf 100644 --- a/website/google.erb +++ b/website/google.erb @@ -865,6 +865,9 @@ > google_kms_key_ring_iam_policy + > + google_kms_secret_ciphertext + From 4d7c0a26ee2033087a7a3d8ff3fbcdffd140ab36 Mon Sep 17 00:00:00 2001 From: The Magician Date: Tue, 7 Jan 2020 09:24:39 -0800 Subject: [PATCH 07/19] Allow add/removing Bigtable clusters (#5318) Signed-off-by: Modular Magician Co-authored-by: Riley Karson --- google/resource_bigtable_instance.go | 146 +++++++++++++----- google/resource_bigtable_instance_test.go | 46 +++++- .../docs/r/bigtable_instance.html.markdown | 16 +- 3 files changed, 164 insertions(+), 44 deletions(-) diff --git a/google/resource_bigtable_instance.go b/google/resource_bigtable_instance.go index 5b22f30c832..3340a7099b6 100644 --- a/google/resource_bigtable_instance.go +++ b/google/resource_bigtable_instance.go @@ -44,12 +44,10 @@ func resourceBigtableInstance() *schema.Resource { "cluster_id": { Type: schema.TypeString, Required: true, - ForceNew: true, }, "zone": { Type: schema.TypeString, Required: true, - ForceNew: true, }, "num_nodes": { Type: schema.TypeInt, @@ -60,7 +58,6 @@ func resourceBigtableInstance() *schema.Resource { Type: schema.TypeString, Optional: true, Default: "SSD", - ForceNew: true, ValidateFunc: validation.StringInSlice([]string{"SSD", "HDD"}, false), }, }, @@ -212,27 +209,28 @@ func resourceBigtableInstanceUpdate(d *schema.ResourceData, meta interface{}) er } defer c.Close() - clusters, err := c.Clusters(ctx, d.Get("name").(string)) - if err != nil { - return fmt.Errorf("Error retrieving clusters for instance %s", err.Error()) + conf := &bigtable.InstanceWithClustersConfig{ + InstanceID: d.Get("name").(string), } - clusterMap := make(map[string]*bigtable.ClusterInfo, len(clusters)) - for _, cluster := range clusters { - clusterMap[cluster.Name] = cluster - } - - for _, cluster := range d.Get("cluster").([]interface{}) { - config := cluster.(map[string]interface{}) - cluster_id := config["cluster_id"].(string) - if cluster, ok := clusterMap[cluster_id]; ok { - if cluster.ServeNodes != config["num_nodes"].(int) { - err = c.UpdateCluster(ctx, d.Get("name").(string), cluster.Name, int32(config["num_nodes"].(int))) - if err != nil { - return fmt.Errorf("Error updating cluster %s for instance %s", cluster.Name, d.Get("name").(string)) - } - } - } + displayName, ok := d.GetOk("display_name") + if !ok { + displayName = conf.InstanceID + } + conf.DisplayName = displayName.(string) + + switch d.Get("instance_type").(string) { + case "DEVELOPMENT": + conf.InstanceType = bigtable.DEVELOPMENT + case "PRODUCTION": + conf.InstanceType = bigtable.PRODUCTION + } + + conf.Clusters = expandBigtableClusters(d.Get("cluster").([]interface{}), conf.InstanceID) + + _, err = bigtable.UpdateInstanceAndSyncClusters(ctx, c, conf) + if err != nil { + return fmt.Errorf("Error updating instance. %s", err) } return resourceBigtableInstanceRead(d, meta) @@ -305,6 +303,7 @@ func expandBigtableClusters(clusters []interface{}, instanceID string) []bigtabl return results } +// resourceBigtableInstanceValidateDevelopment validates restrictions specific to DEVELOPMENT clusters func resourceBigtableInstanceValidateDevelopment(diff *schema.ResourceDiff, meta interface{}) error { if diff.Get("instance_type").(string) != "DEVELOPMENT" { return nil @@ -318,46 +317,115 @@ func resourceBigtableInstanceValidateDevelopment(diff *schema.ResourceDiff, meta return nil } +// resourceBigtableInstanceClusterReorderTypeList causes the cluster block to +// act like a TypeSet while it's a TypeList underneath. It preserves state +// ordering on updates, and causes the resource to get recreated if it would +// attempt to perform an impossible change. +// This doesn't use the standard unordered list utility (https://github.com/GoogleCloudPlatform/magic-modules/blob/master/templates/terraform/unordered_list_customize_diff.erb) +// because some fields can't be modified using the API and we recreate the instance +// when they're changed. func resourceBigtableInstanceClusterReorderTypeList(diff *schema.ResourceDiff, meta interface{}) error { - old_count, new_count := diff.GetChange("cluster.#") + oldCount, newCount := diff.GetChange("cluster.#") // simulate Required:true, MinItems:1, MaxItems:4 for "cluster" - if new_count.(int) < 1 { + if newCount.(int) < 1 { return fmt.Errorf("config is invalid: Too few cluster blocks: Should have at least 1 \"cluster\" block") } - if new_count.(int) > 4 { + if newCount.(int) > 4 { return fmt.Errorf("config is invalid: Too many cluster blocks: No more than 4 \"cluster\" blocks are allowed") } - if old_count.(int) != new_count.(int) { + // exit early if we're in create (name's old value is nil) + n, _ := diff.GetChange("name") + if n == nil || n == "" { return nil } - var old_ids []string - clusters := make(map[string]interface{}, new_count.(int)) + oldIds := []string{} + clusters := make(map[string]interface{}, newCount.(int)) - for i := 0; i < new_count.(int); i++ { - old_id, new_id := diff.GetChange(fmt.Sprintf("cluster.%d.cluster_id", i)) - if old_id != nil && old_id != "" { - old_ids = append(old_ids, old_id.(string)) + for i := 0; i < oldCount.(int); i++ { + oldId, _ := diff.GetChange(fmt.Sprintf("cluster.%d.cluster_id", i)) + if oldId != nil && oldId != "" { + oldIds = append(oldIds, oldId.(string)) } + } + log.Printf("[DEBUG] Saw old ids: %#v", oldIds) + + for i := 0; i < newCount.(int); i++ { + _, newId := diff.GetChange(fmt.Sprintf("cluster.%d.cluster_id", i)) _, c := diff.GetChange(fmt.Sprintf("cluster.%d", i)) - clusters[new_id.(string)] = c + clusters[newId.(string)] = c + } + + // create a list of clusters using the old order when possible to minimise + // diffs + // initially, add matching clusters to their index by id (nil otherwise) + // then, fill in nils with new clusters. + // [a, b, c, e] -> [c, a, d] becomes [a, nil, c] followed by [a, d, c] + var orderedClusters []interface{} + for i := 0; i < newCount.(int); i++ { + // when i is out of range of old, all values are nil + if i >= len(oldIds) { + orderedClusters = append(orderedClusters, nil) + continue + } + + oldId := oldIds[i] + if c, ok := clusters[oldId]; ok { + log.Printf("[DEBUG] Matched: %#v", oldId) + orderedClusters = append(orderedClusters, c) + delete(clusters, oldId) + } else { + orderedClusters = append(orderedClusters, nil) + } } - // reorder clusters according to the old cluster order - var old_cluster_order []interface{} - for _, id := range old_ids { - if c, ok := clusters[id]; ok { - old_cluster_order = append(old_cluster_order, c) + log.Printf("[DEBUG] Remaining clusters: %#v", clusters) + for _, elem := range clusters { + for i, e := range orderedClusters { + if e == nil { + orderedClusters[i] = elem + } } } - err := diff.SetNew("cluster", old_cluster_order) + err := diff.SetNew("cluster", orderedClusters) if err != nil { return fmt.Errorf("Error setting cluster diff: %s", err) } + // Clusters can't have their zone / storage_type updated, ForceNew if it's + // changed. This will show a diff with the old state on the left side and + // the unmodified new state on the right and the ForceNew attributed to the + // _old state index_ even if the diff appears to have moved. + // This depends on the clusters having been reordered already by the prior + // SetNew call. + // We've implemented it here because it doesn't return an error in the + // client and silently fails. + for i := 0; i < newCount.(int); i++ { + oldId, newId := diff.GetChange(fmt.Sprintf("cluster.%d.cluster_id", i)) + if oldId != newId { + continue + } + + oZone, nZone := diff.GetChange(fmt.Sprintf("cluster.%d.zone", i)) + if oZone != nZone { + err := diff.ForceNew(fmt.Sprintf("cluster.%d.zone", i)) + if err != nil { + return fmt.Errorf("Error setting cluster diff: %s", err) + } + } + + oST, nST := diff.GetChange(fmt.Sprintf("cluster.%d.storage_type", i)) + if oST != nST { + err := diff.ForceNew(fmt.Sprintf("cluster.%d.storage_type", i)) + if err != nil { + return fmt.Errorf("Error setting cluster diff: %s", err) + } + } + } + return nil } diff --git a/google/resource_bigtable_instance_test.go b/google/resource_bigtable_instance_test.go index e03f962c279..efef6cfbfd1 100644 --- a/google/resource_bigtable_instance_test.go +++ b/google/resource_bigtable_instance_test.go @@ -68,7 +68,23 @@ func TestAccBigtableInstance_cluster(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccBigtableInstance_cluster_reordered(instanceName, 5), + Config: testAccBigtableInstance_clusterReordered(instanceName, 5), + }, + { + ResourceName: "google_bigtable_instance.instance", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccBigtableInstance_clusterModified(instanceName, 5), + }, + { + ResourceName: "google_bigtable_instance.instance", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccBigtableInstance_clusterReordered(instanceName, 5), }, { ResourceName: "google_bigtable_instance.instance", @@ -225,7 +241,7 @@ resource "google_bigtable_instance" "instance" { `, instanceName, instanceName, instanceName, instanceName, instanceName, instanceName) } -func testAccBigtableInstance_cluster_reordered(instanceName string, numNodes int) string { +func testAccBigtableInstance_clusterReordered(instanceName string, numNodes int) string { return fmt.Sprintf(` resource "google_bigtable_instance" "instance" { name = "%s" @@ -257,6 +273,32 @@ resource "google_bigtable_instance" "instance" { `, instanceName, instanceName, numNodes, instanceName, numNodes, instanceName, numNodes, instanceName, numNodes) } +func testAccBigtableInstance_clusterModified(instanceName string, numNodes int) string { + return fmt.Sprintf(` +resource "google_bigtable_instance" "instance" { + name = "%s" + cluster { + cluster_id = "%s-c" + zone = "us-central1-c" + num_nodes = %d + storage_type = "HDD" + } + cluster { + cluster_id = "%s-a" + zone = "us-central1-a" + num_nodes = %d + storage_type = "HDD" + } + cluster { + cluster_id = "%s-b" + zone = "us-central1-b" + num_nodes = %d + storage_type = "HDD" + } +} +`, instanceName, instanceName, numNodes, instanceName, numNodes, instanceName, numNodes) +} + func testAccBigtableInstance_development(instanceName string) string { return fmt.Sprintf(` resource "google_bigtable_instance" "instance" { diff --git a/website/docs/r/bigtable_instance.html.markdown b/website/docs/r/bigtable_instance.html.markdown index 083db724b39..47205456ba7 100644 --- a/website/docs/r/bigtable_instance.html.markdown +++ b/website/docs/r/bigtable_instance.html.markdown @@ -68,11 +68,21 @@ The `cluster` block supports the following arguments: * `cluster_id` - (Required) The ID of the Cloud Bigtable cluster. -* `zone` - (Required) The zone to create the Cloud Bigtable cluster in. Each cluster must have a different zone in the same region. Zones that support Bigtable instances are noted on the [Cloud Bigtable locations page](https://cloud.google.com/bigtable/docs/locations). +* `zone` - (Required) The zone to create the Cloud Bigtable cluster in. Each +cluster must have a different zone in the same region. Zones that support +Bigtable instances are noted on the [Cloud Bigtable locations page](https://cloud.google.com/bigtable/docs/locations). -* `num_nodes` - (Optional) The number of nodes in your Cloud Bigtable cluster. Required, with a minimum of `3` for a `PRODUCTION` instance. Must be left unset for a `DEVELOPMENT` instance. +* `num_nodes` - (Optional) The number of nodes in your Cloud Bigtable cluster. +Required, with a minimum of `3` for a `PRODUCTION` instance. Must be left unset +for a `DEVELOPMENT` instance. -* `storage_type` - (Optional) The storage type to use. One of `"SSD"` or `"HDD"`. Defaults to `"SSD"`. +* `storage_type` - (Optional) The storage type to use. One of `"SSD"` or +`"HDD"`. Defaults to `"SSD"`. + +!> **Warning:** Modifying the `storage_type` or `zone` of an existing cluster (by +`cluster_id`) will cause Terraform to delete/recreate the entire +`google_bigtable_instance` resource. If these values are changing, use a new +`cluster_id`. ## Attributes Reference From f004d58dee6013cb72b3209e6e71b69684a3f751 Mon Sep 17 00:00:00 2001 From: The Magician Date: Tue, 7 Jan 2020 10:19:23 -0800 Subject: [PATCH 08/19] Add bootstrapped test networks for service networking tests (#5316) Signed-off-by: Modular Magician Co-authored-by: emily --- google/bootstrap_utils_test.go | 61 ++++++++++++ ...urce_service_networking_connection_test.go | 95 +++++-------------- 2 files changed, 85 insertions(+), 71 deletions(-) diff --git a/google/bootstrap_utils_test.go b/google/bootstrap_utils_test.go index fcab7f2e1f5..f654f55e4db 100644 --- a/google/bootstrap_utils_test.go +++ b/google/bootstrap_utils_test.go @@ -6,6 +6,7 @@ import ( "log" "os" "testing" + "time" "google.golang.org/api/cloudkms/v1" "google.golang.org/api/iam/v1" @@ -230,3 +231,63 @@ func BootstrapServiceAccount(t *testing.T, project, testRunner string) string { return sa.Email } + +const SharedTestNetworkPrefix = "tf-bootstrap-net-" + +// BootstrapSharedServiceNetworkingConsumerNetwork will return a shared compute network +// for service networking test to prevent hitting limits on tenancy projects. +// +// This will either return an existing network or create one if it hasn't been created +// in the project yet. One consumer network/tenant project we don't own is created +// per producer network (i.e. network created by test), with a hard limit set. +func BootstrapSharedServiceNetworkingConsumerNetwork(t *testing.T, testId string) string { + if v := os.Getenv("TF_ACC"); v == "" { + log.Println("Acceptance tests and bootstrapping skipped unless env 'TF_ACC' set") + // If not running acceptance tests, return an empty string + return "" + } + + project := getTestProjectFromEnv() + networkName := SharedTestNetworkPrefix + testId + config := &Config{ + Credentials: getTestCredsFromEnv(), + Project: project, + Region: getTestRegionFromEnv(), + Zone: getTestZoneFromEnv(), + } + ConfigureBasePaths(config) + if err := config.LoadAndValidate(context.Background()); err != nil { + t.Errorf("Unable to bootstrap network: %s", err) + } + + log.Printf("[DEBUG] Getting shared test network %q", networkName) + _, err := config.clientCompute.Networks.Get(project, networkName).Do() + if err != nil && isGoogleApiErrorWithCode(err, 404) { + log.Printf("[DEBUG] Network %q not found, bootstrapping", networkName) + url := fmt.Sprintf("%sprojects/%s/global/networks", config.ComputeBasePath, project) + netObj := map[string]interface{}{ + "name": networkName, + "autoCreateSubnetworks": false, + } + + res, err := sendRequestWithTimeout(config, "POST", project, url, netObj, 4*time.Minute) + if err != nil { + t.Fatalf("Error bootstrapping shared test network %q: %s", networkName, err) + } + + log.Printf("[DEBUG] Waiting for network creation to finish") + err = computeOperationWaitTime(config, res, project, "Error bootstrapping shared test network", 4) + if err != nil { + t.Fatalf("Error bootstrapping shared test network %q: %s", networkName, err) + } + } + + network, err := config.clientCompute.Networks.Get(project, networkName).Do() + if err != nil { + t.Errorf("Error getting shared test network %q: %s", networkName, err) + } + if network == nil { + t.Fatalf("Error getting shared test network %q: is nil", networkName) + } + return network.Name +} diff --git a/google/resource_service_networking_connection_test.go b/google/resource_service_networking_connection_test.go index 65263055da9..4932b7f7ef2 100644 --- a/google/resource_service_networking_connection_test.go +++ b/google/resource_service_networking_connection_test.go @@ -9,19 +9,20 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/terraform" ) -func TestAccServiceNetworkingConnectionCreate(t *testing.T) { +func TestAccServiceNetworkingConnection_create(t *testing.T) { t.Parallel() + network := BootstrapSharedServiceNetworkingConsumerNetwork(t, "service-networking-connection-create") + addr := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + service := "servicenetworking.googleapis.com" + resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testServiceNetworkingConnectionDestroy(service, network), Steps: []resource.TestStep{ { - Config: testAccServiceNetworkingConnection( - fmt.Sprintf("tf-test-%s", acctest.RandString(10)), - fmt.Sprintf("tf-test-%s", acctest.RandString(10)), - "servicenetworking.googleapis.com", - ), + Config: testAccServiceNetworkingConnection(network, addr, "servicenetworking.googleapis.com"), }, { ResourceName: "google_service_networking_connection.foobar", @@ -32,49 +33,21 @@ func TestAccServiceNetworkingConnectionCreate(t *testing.T) { }) } -// Standard checkDestroy cannot be used here because destroying the network will delete -// all the networking connections so this would return false positives. -func TestAccServiceNetworkingConnectionDestroy(t *testing.T) { +func TestAccServiceNetworkingConnection_update(t *testing.T) { t.Parallel() - network := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - addressRange := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccServiceNetworkingConnection( - network, - addressRange, - "servicenetworking.googleapis.com", - ), - }, - { - Config: testAccServiceNetworkingConnectionDestroy(network, addressRange), - Check: resource.ComposeTestCheckFunc( - testServiceNetworkingConnectionDestroy("servicenetworking.googleapis.com", network, getTestProjectFromEnv()), - ), - }, - }, - }) -} - -func TestAccServiceNetworkingConnectionUpdate(t *testing.T) { - t.Parallel() + network := BootstrapSharedServiceNetworkingConsumerNetwork(t, "service-networking-connection-update") + addr1 := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + addr2 := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + service := "servicenetworking.googleapis.com" - network := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testServiceNetworkingConnectionDestroy(service, network), Steps: []resource.TestStep{ { - Config: testAccServiceNetworkingConnection( - network, - fmt.Sprintf("tf-test-%s", acctest.RandString(10)), - "servicenetworking.googleapis.com", - ), + Config: testAccServiceNetworkingConnection(network, addr1, "servicenetworking.googleapis.com"), }, { ResourceName: "google_service_networking_connection.foobar", @@ -82,11 +55,7 @@ func TestAccServiceNetworkingConnectionUpdate(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccServiceNetworkingConnection( - network, - fmt.Sprintf("tf-test-%s", acctest.RandString(10)), - "servicenetworking.googleapis.com", - ), + Config: testAccServiceNetworkingConnection(network, addr2, "servicenetworking.googleapis.com"), }, { ResourceName: "google_service_networking_connection.foobar", @@ -98,11 +67,11 @@ func TestAccServiceNetworkingConnectionUpdate(t *testing.T) { } -func testServiceNetworkingConnectionDestroy(parent, network, project string) resource.TestCheckFunc { +func testServiceNetworkingConnectionDestroy(parent, network string) resource.TestCheckFunc { return func(s *terraform.State) error { config := testAccProvider.Meta().(*Config) parentService := "services/" + parent - networkName := fmt.Sprintf("projects/%s/global/networks/%s", project, network) + networkName := fmt.Sprintf("projects/%s/global/networks/%s", getTestProjectFromEnv(), network) response, err := config.clientServiceNetworking.Services.Connections.List(parentService). Network(networkName).Do() @@ -122,7 +91,7 @@ func testServiceNetworkingConnectionDestroy(parent, network, project string) res func testAccServiceNetworkingConnection(networkName, addressRangeName, serviceName string) string { return fmt.Sprintf(` -resource "google_compute_network" "foobar" { +data "google_compute_network" "servicenet" { name = "%s" } @@ -131,29 +100,13 @@ resource "google_compute_global_address" "foobar" { purpose = "VPC_PEERING" address_type = "INTERNAL" prefix_length = 16 - network = google_compute_network.foobar.self_link + network = data.google_compute_network.servicenet.self_link } resource "google_service_networking_connection" "foobar" { - network = google_compute_network.foobar.self_link + network = data.google_compute_network.servicenet.self_link service = "%s" reserved_peering_ranges = [google_compute_global_address.foobar.name] } `, networkName, addressRangeName, serviceName) } - -func testAccServiceNetworkingConnectionDestroy(networkName, addressRangeName string) string { - return fmt.Sprintf(` -resource "google_compute_network" "foobar" { - name = "%s" -} - -resource "google_compute_global_address" "foobar" { - name = "%s" - purpose = "VPC_PEERING" - address_type = "INTERNAL" - prefix_length = 16 - network = google_compute_network.foobar.self_link -} -`, networkName, addressRangeName) -} From 11931bf1b8418f0ecd77e831933a86170d54723d Mon Sep 17 00:00:00 2001 From: Paddy Date: Tue, 7 Jan 2020 13:37:24 -0800 Subject: [PATCH 09/19] Update CHANGELOG.md --- CHANGELOG.md | 46 +++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 45 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9b3c4a32114..d49dadb10a9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,48 @@ -## 3.3.1 (Unreleased) +## 3.4.1 (Unreleased) +## 3.4.0 (January 07, 2020) + +DEPRECATIONS: +* kms: deprecated `data.google_kms_secret_ciphertext` as there was no way to make it idempotent. Instead, use the `google_kms_secret_ciphertext` resource. ([#5314](https://github.com/terraform-providers/terraform-provider-google/pull/5314)) + +BREAKING CHANGES: +* cloudrun: Changed `google_cloud_run_domain_mapping` to correctly match Cloud Run API expected format for `spec.route_name`, {serviceName}, instead of invalid projects/{project}/global/services/{serviceName} ([#5264](https://github.com/terraform-providers/terraform-provider-google/pull/5264)) +* compute: Added back ConflictsWith restrictions for ExactlyOneOf restrictions that were removed in v3.3.0 for `google_compute_firewall`, `google_compute_health_check`, and `google_compute_region_health_check`. This effectively changes an API-side failure that was only accessible in v3.3.0 to a plan-time one. ([#5220](https://github.com/terraform-providers/terraform-provider-google/pull/5220)) +* logging: Changed `google_logging_metric.metric_descriptors.labels` from a list to a set ([#5258](https://github.com/terraform-providers/terraform-provider-google/pull/5258)) +* resourcemanager: Added back ConflictsWith restrictions for ExactlyOneOf restrictions that were removed in v3.3.0 for `google_organization_policy`, `google_folder_organization_policy`, and `google_project_organization_policy`. This effectively changes an API-side failure that was only accessible in v3.3.0 to a plan-time one. ([#5220](https://github.com/terraform-providers/terraform-provider-google/pull/5220)) + +FEATURES: +* **New Data Source:** google_sql_ca_certs ([#5306](https://github.com/terraform-providers/terraform-provider-google/pull/5306)) +* **New Resource:** `google_identity_platform_default_supported_idp_config` ([#5199](https://github.com/terraform-providers/terraform-provider-google/pull/5199)) +* **New Resource:** `google_identity_platform_inbound_saml_config` ([#5199](https://github.com/terraform-providers/terraform-provider-google/pull/5199)) +* **New Resource:** `google_identity_platform_oauth_idp_config` ([#5199](https://github.com/terraform-providers/terraform-provider-google/pull/5199)) +* **New Resource:** `google_identity_platform_tenant_default_supported_idp_config` ([#5199](https://github.com/terraform-providers/terraform-provider-google/pull/5199)) +* **New Resource:** `google_identity_platform_tenant_inbound_saml_config` ([#5199](https://github.com/terraform-providers/terraform-provider-google/pull/5199)) +* **New Resource:** `google_identity_platform_tenant_oauth_idp_config` ([#5199](https://github.com/terraform-providers/terraform-provider-google/pull/5199)) +* **New Resource:** `google_identity_platform_tenant` ([#5199](https://github.com/terraform-providers/terraform-provider-google/pull/5199)) +* **New Resource:** `google_kms_crypto_key_iam_policy` ([#5247](https://github.com/terraform-providers/terraform-provider-google/pull/5247)) +* **New Resource:** `google_kms_secret_ciphertext` ([#5314](https://github.com/terraform-providers/terraform-provider-google/pull/5314)) + +IMPROVEMENTS: +* composer: Increased default timeouts for `google_composer_environment` ([#5223](https://github.com/terraform-providers/terraform-provider-google/pull/5223)) +* compute: Added graceful termination to `container_cluster` create calls so that partially created clusters will resume the original operation if the Terraform process is killed mid create. ([#5217](https://github.com/terraform-providers/terraform-provider-google/pull/5217)) +* compute: Fixed `google_compute_disk_resource_policy_attachment` parsing of region from zone to allow for provider-level zone and make error message more accurate` ([#5257](https://github.com/terraform-providers/terraform-provider-google/pull/5257)) +* provider: Reduced default `send_after` controlling the time interval after which a batched request sends. ([#5268](https://github.com/terraform-providers/terraform-provider-google/pull/5268)) + +BUG FIXES: +* all: fixed issue where many fields that were removed in 3.0.0 would show a diff when they were removed from config ([#5313](https://github.com/terraform-providers/terraform-provider-google/pull/5313)) +* bigquery: fixed `bigquery_table.encryption_configuration` to correctly recreate the table when modified ([#5321](https://github.com/terraform-providers/terraform-provider-google/pull/5321)) +* cloudrun: Changed `google_cloud_run_domain_mapping` to correctly match Cloud Run API expected format for `spec.route_name`, {serviceName}, instead of invalid projects/{project}/global/services/{serviceName} ([#5264](https://github.com/terraform-providers/terraform-provider-google/pull/5264)) +* cloudrun: Changed `cloud_run_domain_mapping` to poll for success or failure and throw an appropriate error when ready status returns as false. ([#5267](https://github.com/terraform-providers/terraform-provider-google/pull/5267)) +* cloudrun: Fixed `google_cloudrun_service` to allow update instead of force-recreation for changes in `spec` `env` and `command` fields ([#5269](https://github.com/terraform-providers/terraform-provider-google/pull/5269)) +* cloudrun: Removed unsupported update for `google_cloud_run_domain_mapping` to allow force-recreation. ([#5253](https://github.com/terraform-providers/terraform-provider-google/pull/5253)) +* cloudrun: Stopped returning an error when a `cloud_run_domain_mapping` was waiting on DNS verification. ([#5315](https://github.com/terraform-providers/terraform-provider-google/pull/5315)) +* compute: Fixed `google_compute_backend_service` to allow updating `cdn_policy.cache_key_policy.*` fields to false or empty. ([#5276](https://github.com/terraform-providers/terraform-provider-google/pull/5276)) +* compute: Fixed behaviour where `google_compute_subnetwork` did not record a value for `name` when `self_link` was specified. ([#5288](https://github.com/terraform-providers/terraform-provider-google/pull/5288)) +* container: fixed issue where an empty variable in `tags` would cause a crash ([#5226](https://github.com/terraform-providers/terraform-provider-google/pull/5226)) +* endpoints: Added operation wait for `google_endpoints_service` to fix 403 "Service not found" errors during initial creation ([#5259](https://github.com/terraform-providers/terraform-provider-google/pull/5259)) +* logging: Made `google_logging_metric.metric_descriptors.labels` a set to prevent diff from ordering ([#5258](https://github.com/terraform-providers/terraform-provider-google/pull/5258)) +* resourcemanager: added retries for `data.google_organization` ([#5246](https://github.com/terraform-providers/terraform-provider-google/pull/5246)) + ## 3.3.0 (December 17, 2019) FEATURES: From 2c3f94b474b6a8d5b38a309c24ab0ebf6969bce2 Mon Sep 17 00:00:00 2001 From: The Magician Date: Tue, 7 Jan 2020 13:47:01 -0800 Subject: [PATCH 10/19] fix docs for google_bigquery_default_service_account (#5329) Signed-off-by: Modular Magician Co-authored-by: Martin Nowak --- ...tml => google_bigquery_default_service_account.html.markdown} | 1 + 1 file changed, 1 insertion(+) rename website/docs/d/{google_bigquery_default_service_account.html => google_bigquery_default_service_account.html.markdown} (97%) diff --git a/website/docs/d/google_bigquery_default_service_account.html b/website/docs/d/google_bigquery_default_service_account.html.markdown similarity index 97% rename from website/docs/d/google_bigquery_default_service_account.html rename to website/docs/d/google_bigquery_default_service_account.html.markdown index 4fb7ea51756..c9717c7ef3a 100644 --- a/website/docs/d/google_bigquery_default_service_account.html +++ b/website/docs/d/google_bigquery_default_service_account.html.markdown @@ -1,4 +1,5 @@ --- +subcategory: "BigQuery" layout: "google" page_title: "Google: google_bigquery_default_service_account" sidebar_current: "docs-google-datasource-bigquery-default-service-account" From f79d431bb2bbf109bb76c9375cc3624a05fc55ad Mon Sep 17 00:00:00 2001 From: The Magician Date: Tue, 7 Jan 2020 14:10:25 -0800 Subject: [PATCH 11/19] Nil return for absent Bigtable resources (#5331) Signed-off-by: Modular Magician Co-authored-by: Brian Hildebrandt --- google/resource_bigtable_gc_policy.go | 2 +- google/resource_bigtable_instance.go | 2 +- google/resource_bigtable_table.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/google/resource_bigtable_gc_policy.go b/google/resource_bigtable_gc_policy.go index d77f9ea1cac..aa40d98ccfc 100644 --- a/google/resource_bigtable_gc_policy.go +++ b/google/resource_bigtable_gc_policy.go @@ -151,7 +151,7 @@ func resourceBigtableGCPolicyRead(d *schema.ResourceData, meta interface{}) erro if err != nil { log.Printf("[WARN] Removing %s because it's gone", name) d.SetId("") - return fmt.Errorf("Error retrieving table. Could not find %s in %s. %s", name, instanceName, err) + return nil } for _, fi := range ti.FamilyInfos { diff --git a/google/resource_bigtable_instance.go b/google/resource_bigtable_instance.go index 3340a7099b6..e6212ffbbf0 100644 --- a/google/resource_bigtable_instance.go +++ b/google/resource_bigtable_instance.go @@ -159,7 +159,7 @@ func resourceBigtableInstanceRead(d *schema.ResourceData, meta interface{}) erro if err != nil { log.Printf("[WARN] Removing %s because it's gone", instanceName) d.SetId("") - return fmt.Errorf("Error retrieving instance. Could not find %s. %s", instanceName, err) + return nil } d.Set("project", project) diff --git a/google/resource_bigtable_table.go b/google/resource_bigtable_table.go index 5a24e522402..4c63a6dbf19 100644 --- a/google/resource_bigtable_table.go +++ b/google/resource_bigtable_table.go @@ -142,7 +142,7 @@ func resourceBigtableTableRead(d *schema.ResourceData, meta interface{}) error { if err != nil { log.Printf("[WARN] Removing %s because it's gone", name) d.SetId("") - return fmt.Errorf("Error retrieving table. Could not find %s in %s. %s", name, instanceName, err) + return nil } d.Set("project", project) From 598fe6a5fcebd97bbbbc0eba5aa03b7f59a4e265 Mon Sep 17 00:00:00 2001 From: megan07 Date: Wed, 8 Jan 2020 19:05:36 +0000 Subject: [PATCH 12/19] add lifecycle_config to dataproc_cluster.cluster_config Signed-off-by: Modular Magician --- google/resource_dataproc_cluster.go | 1 - 1 file changed, 1 deletion(-) diff --git a/google/resource_dataproc_cluster.go b/google/resource_dataproc_cluster.go index ffb24ea2a68..6b899b24eac 100644 --- a/google/resource_dataproc_cluster.go +++ b/google/resource_dataproc_cluster.go @@ -701,7 +701,6 @@ func resourceDataprocClusterCreate(d *schema.ResourceData, meta interface{}) err log.Printf("[INFO] Dataproc cluster %s has been created", cluster.ClusterName) return resourceDataprocClusterRead(d, meta) - } func expandClusterConfig(d *schema.ResourceData, config *Config) (*dataproc.ClusterConfig, error) { From 4018ee13d67059726d758e266d67c2a49e102e26 Mon Sep 17 00:00:00 2001 From: The Magician Date: Wed, 8 Jan 2020 12:41:59 -0800 Subject: [PATCH 13/19] Add warnings about custom role format for IAM bindings (#5335) Signed-off-by: Modular Magician Co-authored-by: emily --- google/resource_iam_binding.go | 1 + google/resource_sql_database_instance_test.go | 1 + website/docs/r/bigtable_instance_iam.html.markdown | 3 +++ .../docs/r/binary_authorization_attestor_iam.html.markdown | 3 +++ website/docs/r/cloud_run_service_iam.html.markdown | 3 +++ .../docs/r/cloudfunctions_cloud_function_iam.html.markdown | 3 +++ website/docs/r/compute_instance_iam.html.markdown | 3 +++ website/docs/r/compute_subnetwork_iam.html.markdown | 3 +++ website/docs/r/dataproc_cluster_iam.html.markdown | 3 +++ website/docs/r/dataproc_job_iam.html.markdown | 3 +++ .../docs/r/google_billing_account_iam_member.html.markdown | 3 +++ website/docs/r/google_folder_iam_binding.html.markdown | 3 +++ website/docs/r/google_folder_iam_member.html.markdown | 3 +++ website/docs/r/google_organization_iam_binding.md | 3 +++ website/docs/r/google_organization_iam_member.html.markdown | 3 +++ website/docs/r/google_project_iam.html.markdown | 4 ++++ website/docs/r/google_service_account_iam.html.markdown | 3 +++ website/docs/r/healthcare_dataset_iam.html.markdown | 3 +++ website/docs/r/iap_app_engine_service_iam.html.markdown | 3 +++ website/docs/r/iap_app_engine_version_iam.html.markdown | 3 +++ website/docs/r/iap_web_backend_service_iam.html.markdown | 3 +++ website/docs/r/iap_web_iam.html.markdown | 3 +++ website/docs/r/iap_web_type_app_engine_iam.html.markdown | 3 +++ website/docs/r/iap_web_type_compute_iam.html.markdown | 3 +++ website/docs/r/pubsub_subscription_iam.html.markdown | 3 +++ website/docs/r/pubsub_topic_iam.html.markdown | 3 +++ website/docs/r/runtimeconfig_config_iam.html.markdown | 3 +++ website/docs/r/sourcerepo_repository_iam.html.markdown | 3 +++ website/docs/r/spanner_database_iam.html.markdown | 4 ++++ website/docs/r/spanner_instance_iam.html.markdown | 3 +++ website/docs/r/storage_bucket_iam.html.markdown | 3 +++ 31 files changed, 91 insertions(+) diff --git a/google/resource_iam_binding.go b/google/resource_iam_binding.go index b98d25a09b6..902685f3429 100644 --- a/google/resource_iam_binding.go +++ b/google/resource_iam_binding.go @@ -110,6 +110,7 @@ func resourceIamBindingRead(newUpdaterFunc newResourceIamUpdaterFunc) schema.Rea } if binding == nil { + log.Printf("[WARNING] Binding for role %q not found, assuming it has no members. If you expected existing members bound for this role, make sure your role is correctly formatted.", eBinding.Role) log.Printf("[DEBUG] Binding for role %q and condition %+v not found in policy for %s, assuming it has no members.", eBinding.Role, eCondition, updater.DescribeResource()) d.Set("role", eBinding.Role) d.Set("members", nil) diff --git a/google/resource_sql_database_instance_test.go b/google/resource_sql_database_instance_test.go index 11c939db0d2..069037b74f2 100644 --- a/google/resource_sql_database_instance_test.go +++ b/google/resource_sql_database_instance_test.go @@ -10,6 +10,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/terraform" + sqladmin "google.golang.org/api/sqladmin/v1beta4" ) diff --git a/website/docs/r/bigtable_instance_iam.html.markdown b/website/docs/r/bigtable_instance_iam.html.markdown index 1fc149aff35..73e89a320eb 100644 --- a/website/docs/r/bigtable_instance_iam.html.markdown +++ b/website/docs/r/bigtable_instance_iam.html.markdown @@ -107,3 +107,6 @@ $ terraform import google_bigtable_instance_iam_binding.editor "projects/{projec $ terraform import google_bigtable_instance_iam_member.editor "projects/{project}/instances/{instance} roles/editor user:jane@example.com" ``` + +-> **Custom Roles**: If you're importing a IAM resource with a custom role, make sure to use the + full name of the custom role, e.g. `[projects/my-project|organizations/my-org]/roles/my-custom-role`. diff --git a/website/docs/r/binary_authorization_attestor_iam.html.markdown b/website/docs/r/binary_authorization_attestor_iam.html.markdown index 4a48ed18b39..db207a34baa 100644 --- a/website/docs/r/binary_authorization_attestor_iam.html.markdown +++ b/website/docs/r/binary_authorization_attestor_iam.html.markdown @@ -138,6 +138,9 @@ $ terraform import google_binary_authorization_attestor_iam_policy.editor projec -> If you're importing a resource with beta features, make sure to include `-provider=google-beta` as an argument so that Terraform uses the correct provider to import your resource. +-> **Custom Roles**: If you're importing a IAM resource with a custom role, make sure to use the + full name of the custom role, e.g. `[projects/my-project|organizations/my-org]/roles/my-custom-role`. + ## User Project Overrides This resource supports [User Project Overrides](https://www.terraform.io/docs/providers/google/guides/provider_reference.html#user_project_override). diff --git a/website/docs/r/cloud_run_service_iam.html.markdown b/website/docs/r/cloud_run_service_iam.html.markdown index 842339e14b8..02283820875 100644 --- a/website/docs/r/cloud_run_service_iam.html.markdown +++ b/website/docs/r/cloud_run_service_iam.html.markdown @@ -143,6 +143,9 @@ $ terraform import google_cloud_run_service_iam_policy.editor projects/{{project -> If you're importing a resource with beta features, make sure to include `-provider=google-beta` as an argument so that Terraform uses the correct provider to import your resource. +-> **Custom Roles**: If you're importing a IAM resource with a custom role, make sure to use the + full name of the custom role, e.g. `[projects/my-project|organizations/my-org]/roles/my-custom-role`. + ## User Project Overrides This resource supports [User Project Overrides](https://www.terraform.io/docs/providers/google/guides/provider_reference.html#user_project_override). diff --git a/website/docs/r/cloudfunctions_cloud_function_iam.html.markdown b/website/docs/r/cloudfunctions_cloud_function_iam.html.markdown index 5501bfea688..782c7fa544a 100644 --- a/website/docs/r/cloudfunctions_cloud_function_iam.html.markdown +++ b/website/docs/r/cloudfunctions_cloud_function_iam.html.markdown @@ -145,6 +145,9 @@ $ terraform import google_cloudfunctions_function_iam_policy.editor projects/{{p -> If you're importing a resource with beta features, make sure to include `-provider=google-beta` as an argument so that Terraform uses the correct provider to import your resource. +-> **Custom Roles**: If you're importing a IAM resource with a custom role, make sure to use the + full name of the custom role, e.g. `[projects/my-project|organizations/my-org]/roles/my-custom-role`. + ## User Project Overrides This resource supports [User Project Overrides](https://www.terraform.io/docs/providers/google/guides/provider_reference.html#user_project_override). diff --git a/website/docs/r/compute_instance_iam.html.markdown b/website/docs/r/compute_instance_iam.html.markdown index 91e165166dc..dec0c356b50 100644 --- a/website/docs/r/compute_instance_iam.html.markdown +++ b/website/docs/r/compute_instance_iam.html.markdown @@ -145,6 +145,9 @@ $ terraform import google_compute_instance_iam_policy.editor projects/{{project} -> If you're importing a resource with beta features, make sure to include `-provider=google-beta` as an argument so that Terraform uses the correct provider to import your resource. +-> **Custom Roles**: If you're importing a IAM resource with a custom role, make sure to use the + full name of the custom role, e.g. `[projects/my-project|organizations/my-org]/roles/my-custom-role`. + ## User Project Overrides This resource supports [User Project Overrides](https://www.terraform.io/docs/providers/google/guides/provider_reference.html#user_project_override). diff --git a/website/docs/r/compute_subnetwork_iam.html.markdown b/website/docs/r/compute_subnetwork_iam.html.markdown index 715dc05407e..83de671c212 100644 --- a/website/docs/r/compute_subnetwork_iam.html.markdown +++ b/website/docs/r/compute_subnetwork_iam.html.markdown @@ -146,6 +146,9 @@ $ terraform import google_compute_subnetwork_iam_policy.editor projects/{{projec -> If you're importing a resource with beta features, make sure to include `-provider=google-beta` as an argument so that Terraform uses the correct provider to import your resource. +-> **Custom Roles**: If you're importing a IAM resource with a custom role, make sure to use the + full name of the custom role, e.g. `[projects/my-project|organizations/my-org]/roles/my-custom-role`. + ## User Project Overrides This resource supports [User Project Overrides](https://www.terraform.io/docs/providers/google/guides/provider_reference.html#user_project_override). diff --git a/website/docs/r/dataproc_cluster_iam.html.markdown b/website/docs/r/dataproc_cluster_iam.html.markdown index 8e9dbbe84cc..707f9d532f2 100644 --- a/website/docs/r/dataproc_cluster_iam.html.markdown +++ b/website/docs/r/dataproc_cluster_iam.html.markdown @@ -111,3 +111,6 @@ $ terraform import google_dataproc_cluster_iam_binding.editor "projects/{project $ terraform import google_dataproc_cluster_iam_member.editor "projects/{project}/regions/{region}/clusters/{cluster} roles/editor user:jane@example.com" ``` + +-> **Custom Roles**: If you're importing a IAM resource with a custom role, make sure to use the + full name of the custom role, e.g. `[projects/my-project|organizations/my-org]/roles/my-custom-role`. diff --git a/website/docs/r/dataproc_job_iam.html.markdown b/website/docs/r/dataproc_job_iam.html.markdown index 0cdea119c9d..8fc24c1cd23 100644 --- a/website/docs/r/dataproc_job_iam.html.markdown +++ b/website/docs/r/dataproc_job_iam.html.markdown @@ -111,3 +111,6 @@ $ terraform import google_dataproc_job_iam_binding.editor "projects/{project}/re $ terraform import google_dataproc_job_iam_member.editor "projects/{project}/regions/{region}/jobs/{job_id} roles/editor user:jane@example.com" ``` + +-> **Custom Roles**: If you're importing a IAM resource with a custom role, make sure to use the + full name of the custom role, e.g. `[projects/my-project|organizations/my-org]/roles/my-custom-role`. diff --git a/website/docs/r/google_billing_account_iam_member.html.markdown b/website/docs/r/google_billing_account_iam_member.html.markdown index 68e27a0e81d..2576a765576 100644 --- a/website/docs/r/google_billing_account_iam_member.html.markdown +++ b/website/docs/r/google_billing_account_iam_member.html.markdown @@ -50,3 +50,6 @@ IAM member imports use space-delimited identifiers; the resource in question, th ``` $ terraform import google_billing_account_iam_member.binding "your-billing-account-id roles/viewer user:foo@example.com" ``` + +-> **Custom Roles**: If you're importing a IAM member with a custom role, make sure to use the + full name of the custom role, e.g. `[projects/my-project|organizations/my-org]/roles/my-custom-role`. diff --git a/website/docs/r/google_folder_iam_binding.html.markdown b/website/docs/r/google_folder_iam_binding.html.markdown index 8b8a453fffa..bb824d1d97a 100644 --- a/website/docs/r/google_folder_iam_binding.html.markdown +++ b/website/docs/r/google_folder_iam_binding.html.markdown @@ -70,3 +70,6 @@ IAM binding imports use space-delimited identifiers; first the resource in quest ``` $ terraform import google_folder_iam_binding.viewer "folder-name roles/viewer" ``` + +-> **Custom Roles**: If you're importing a IAM binding with a custom role, make sure to use the + full name of the custom role, e.g. `[projects/my-project|organizations/my-org]/roles/my-custom-role`. diff --git a/website/docs/r/google_folder_iam_member.html.markdown b/website/docs/r/google_folder_iam_member.html.markdown index bf01ecaf8a9..6f3d79a89ae 100644 --- a/website/docs/r/google_folder_iam_member.html.markdown +++ b/website/docs/r/google_folder_iam_member.html.markdown @@ -62,3 +62,6 @@ IAM member imports use space-delimited identifiers; the resource in question, th ``` $ terraform import google_folder_iam_member.my_project "folder-name roles/viewer user:foo@example.com" ``` + +-> **Custom Roles**: If you're importing a IAM member with a custom role, make sure to use the + full name of the custom role, e.g. `[projects/my-project|organizations/my-org]/roles/my-custom-role`. diff --git a/website/docs/r/google_organization_iam_binding.md b/website/docs/r/google_organization_iam_binding.md index b387c18c231..f00196b5b9b 100644 --- a/website/docs/r/google_organization_iam_binding.md +++ b/website/docs/r/google_organization_iam_binding.md @@ -59,3 +59,6 @@ IAM binding imports use space-delimited identifiers; first the resource in quest ``` $ terraform import google_organization_iam_binding.my_org "your-org-id roles/viewer" ``` + +-> **Custom Roles**: If you're importing a IAM resource with a custom role, make sure to use the + full name of the custom role, e.g. `[projects/my-project|organizations/my-org]/roles/my-custom-role`. diff --git a/website/docs/r/google_organization_iam_member.html.markdown b/website/docs/r/google_organization_iam_member.html.markdown index df547c2937c..d88733f6633 100644 --- a/website/docs/r/google_organization_iam_member.html.markdown +++ b/website/docs/r/google_organization_iam_member.html.markdown @@ -51,3 +51,6 @@ IAM member imports use space-delimited identifiers; the resource in question, th ``` $ terraform import google_organization_iam_member.my_org "your-org-id roles/viewer user:foo@example.com" ``` + +-> **Custom Roles**: If you're importing a IAM member with a custom role, make sure to use the + full name of the custom role, e.g. `[projects/my-project|organizations/my-org]/roles/my-custom-role`. diff --git a/website/docs/r/google_project_iam.html.markdown b/website/docs/r/google_project_iam.html.markdown index bf29aa11d73..89ac8006ca7 100644 --- a/website/docs/r/google_project_iam.html.markdown +++ b/website/docs/r/google_project_iam.html.markdown @@ -235,3 +235,7 @@ IAM audit config imports use the identifier of the resource in question and the ``` terraform import google_project_iam_audit_config.my_project "your-project-id foo.googleapis.com" ``` + +-> **Custom Roles**: If you're importing a IAM resource with a custom role, make sure to use the + full name of the custom role, e.g. `[projects/my-project|organizations/my-org]/roles/my-custom-role`. + diff --git a/website/docs/r/google_service_account_iam.html.markdown b/website/docs/r/google_service_account_iam.html.markdown index 9c45fb7da72..8f87bd776ca 100644 --- a/website/docs/r/google_service_account_iam.html.markdown +++ b/website/docs/r/google_service_account_iam.html.markdown @@ -189,6 +189,9 @@ $ terraform import google_service_account_iam_binding.admin-account-iam "project $ terraform import google_service_account_iam_member.admin-account-iam "projects/{your-project-id}/serviceAccounts/{your-service-account-email} roles/editor user:foo@example.com" ``` +-> **Custom Roles**: If you're importing a IAM resource with a custom role, make sure to use the +full name of the custom role, e.g. `[projects/my-project|organizations/my-org]/roles/my-custom-role`. + With conditions: ``` $ terraform import -provider=google-beta google_service_account_iam_binding.admin-account-iam "projects/{your-project-id}/serviceAccounts/{your-service-account-email} iam.serviceAccountUser expires_after_2019_12_31" diff --git a/website/docs/r/healthcare_dataset_iam.html.markdown b/website/docs/r/healthcare_dataset_iam.html.markdown index 37b418c4f16..4c25184171c 100644 --- a/website/docs/r/healthcare_dataset_iam.html.markdown +++ b/website/docs/r/healthcare_dataset_iam.html.markdown @@ -115,3 +115,6 @@ IAM policy imports use the identifier of the resource in question. This policy ``` $ terraform import google_healthcare_dataset_iam_policy.dataset_iam your-project-id/location-name/dataset-name ``` + +-> **Custom Roles**: If you're importing a IAM resource with a custom role, make sure to use the + full name of the custom role, e.g. `[projects/my-project|organizations/my-org]/roles/my-custom-role`. diff --git a/website/docs/r/iap_app_engine_service_iam.html.markdown b/website/docs/r/iap_app_engine_service_iam.html.markdown index 2441b8ac1c4..1c398313447 100644 --- a/website/docs/r/iap_app_engine_service_iam.html.markdown +++ b/website/docs/r/iap_app_engine_service_iam.html.markdown @@ -143,6 +143,9 @@ $ terraform import google_iap_app_engine_service_iam_policy.editor projects/{{pr -> If you're importing a resource with beta features, make sure to include `-provider=google-beta` as an argument so that Terraform uses the correct provider to import your resource. +-> **Custom Roles**: If you're importing a IAM resource with a custom role, make sure to use the + full name of the custom role, e.g. `[projects/my-project|organizations/my-org]/roles/my-custom-role`. + ## User Project Overrides This resource supports [User Project Overrides](https://www.terraform.io/docs/providers/google/guides/provider_reference.html#user_project_override). diff --git a/website/docs/r/iap_app_engine_version_iam.html.markdown b/website/docs/r/iap_app_engine_version_iam.html.markdown index 8f776a94cb1..eb6f1d4f4be 100644 --- a/website/docs/r/iap_app_engine_version_iam.html.markdown +++ b/website/docs/r/iap_app_engine_version_iam.html.markdown @@ -147,6 +147,9 @@ $ terraform import google_iap_app_engine_version_iam_policy.editor projects/{{pr -> If you're importing a resource with beta features, make sure to include `-provider=google-beta` as an argument so that Terraform uses the correct provider to import your resource. +-> **Custom Roles**: If you're importing a IAM resource with a custom role, make sure to use the + full name of the custom role, e.g. `[projects/my-project|organizations/my-org]/roles/my-custom-role`. + ## User Project Overrides This resource supports [User Project Overrides](https://www.terraform.io/docs/providers/google/guides/provider_reference.html#user_project_override). diff --git a/website/docs/r/iap_web_backend_service_iam.html.markdown b/website/docs/r/iap_web_backend_service_iam.html.markdown index 2976fee50a3..e9552538036 100644 --- a/website/docs/r/iap_web_backend_service_iam.html.markdown +++ b/website/docs/r/iap_web_backend_service_iam.html.markdown @@ -138,6 +138,9 @@ $ terraform import google_iap_web_backend_service_iam_policy.editor projects/{{p -> If you're importing a resource with beta features, make sure to include `-provider=google-beta` as an argument so that Terraform uses the correct provider to import your resource. +-> **Custom Roles**: If you're importing a IAM resource with a custom role, make sure to use the + full name of the custom role, e.g. `[projects/my-project|organizations/my-org]/roles/my-custom-role`. + ## User Project Overrides This resource supports [User Project Overrides](https://www.terraform.io/docs/providers/google/guides/provider_reference.html#user_project_override). diff --git a/website/docs/r/iap_web_iam.html.markdown b/website/docs/r/iap_web_iam.html.markdown index 87fba1a6363..9ccf5c7711c 100644 --- a/website/docs/r/iap_web_iam.html.markdown +++ b/website/docs/r/iap_web_iam.html.markdown @@ -133,6 +133,9 @@ $ terraform import google_iap_web_iam_policy.editor projects/{{project}}/iap_web -> If you're importing a resource with beta features, make sure to include `-provider=google-beta` as an argument so that Terraform uses the correct provider to import your resource. +-> **Custom Roles**: If you're importing a IAM resource with a custom role, make sure to use the + full name of the custom role, e.g. `[projects/my-project|organizations/my-org]/roles/my-custom-role`. + ## User Project Overrides This resource supports [User Project Overrides](https://www.terraform.io/docs/providers/google/guides/provider_reference.html#user_project_override). diff --git a/website/docs/r/iap_web_type_app_engine_iam.html.markdown b/website/docs/r/iap_web_type_app_engine_iam.html.markdown index bd5836630fc..80fec55d830 100644 --- a/website/docs/r/iap_web_type_app_engine_iam.html.markdown +++ b/website/docs/r/iap_web_type_app_engine_iam.html.markdown @@ -138,6 +138,9 @@ $ terraform import google_iap_web_type_app_engine_iam_policy.editor projects/{{p -> If you're importing a resource with beta features, make sure to include `-provider=google-beta` as an argument so that Terraform uses the correct provider to import your resource. +-> **Custom Roles**: If you're importing a IAM resource with a custom role, make sure to use the + full name of the custom role, e.g. `[projects/my-project|organizations/my-org]/roles/my-custom-role`. + ## User Project Overrides This resource supports [User Project Overrides](https://www.terraform.io/docs/providers/google/guides/provider_reference.html#user_project_override). diff --git a/website/docs/r/iap_web_type_compute_iam.html.markdown b/website/docs/r/iap_web_type_compute_iam.html.markdown index bb37997bd55..eb1ea3135b4 100644 --- a/website/docs/r/iap_web_type_compute_iam.html.markdown +++ b/website/docs/r/iap_web_type_compute_iam.html.markdown @@ -133,6 +133,9 @@ $ terraform import google_iap_web_type_compute_iam_policy.editor projects/{{proj -> If you're importing a resource with beta features, make sure to include `-provider=google-beta` as an argument so that Terraform uses the correct provider to import your resource. +-> **Custom Roles**: If you're importing a IAM resource with a custom role, make sure to use the + full name of the custom role, e.g. `[projects/my-project|organizations/my-org]/roles/my-custom-role`. + ## User Project Overrides This resource supports [User Project Overrides](https://www.terraform.io/docs/providers/google/guides/provider_reference.html#user_project_override). diff --git a/website/docs/r/pubsub_subscription_iam.html.markdown b/website/docs/r/pubsub_subscription_iam.html.markdown index 5b137125231..960d1b8ca9c 100644 --- a/website/docs/r/pubsub_subscription_iam.html.markdown +++ b/website/docs/r/pubsub_subscription_iam.html.markdown @@ -104,3 +104,6 @@ $ terraform import google_pubsub_subscription_iam_binding.editor "projects/{your $ terraform import google_pubsub_subscription_iam_member.editor "projects/{your-project-id}/subscriptions/{your-subscription-name} roles/editor jane@example.com" ``` + +-> **Custom Roles**: If you're importing a IAM resource with a custom role, make sure to use the + full name of the custom role, e.g. `[projects/my-project|organizations/my-org]/roles/my-custom-role`. diff --git a/website/docs/r/pubsub_topic_iam.html.markdown b/website/docs/r/pubsub_topic_iam.html.markdown index c78d54a0135..4f01964ccfe 100644 --- a/website/docs/r/pubsub_topic_iam.html.markdown +++ b/website/docs/r/pubsub_topic_iam.html.markdown @@ -138,6 +138,9 @@ $ terraform import google_pubsub_topic_iam_policy.editor projects/{{project}}/to -> If you're importing a resource with beta features, make sure to include `-provider=google-beta` as an argument so that Terraform uses the correct provider to import your resource. +-> **Custom Roles**: If you're importing a IAM resource with a custom role, make sure to use the + full name of the custom role, e.g. `[projects/my-project|organizations/my-org]/roles/my-custom-role`. + ## User Project Overrides This resource supports [User Project Overrides](https://www.terraform.io/docs/providers/google/guides/provider_reference.html#user_project_override). diff --git a/website/docs/r/runtimeconfig_config_iam.html.markdown b/website/docs/r/runtimeconfig_config_iam.html.markdown index 090153cc210..09d82f8a808 100644 --- a/website/docs/r/runtimeconfig_config_iam.html.markdown +++ b/website/docs/r/runtimeconfig_config_iam.html.markdown @@ -138,6 +138,9 @@ $ terraform import google_runtimeconfig_config_iam_policy.editor projects/{{proj -> If you're importing a resource with beta features, make sure to include `-provider=google-beta` as an argument so that Terraform uses the correct provider to import your resource. +-> **Custom Roles**: If you're importing a IAM resource with a custom role, make sure to use the + full name of the custom role, e.g. `[projects/my-project|organizations/my-org]/roles/my-custom-role`. + ## User Project Overrides This resource supports [User Project Overrides](https://www.terraform.io/docs/providers/google/guides/provider_reference.html#user_project_override). diff --git a/website/docs/r/sourcerepo_repository_iam.html.markdown b/website/docs/r/sourcerepo_repository_iam.html.markdown index 34332f72aef..daf8d2d35f9 100644 --- a/website/docs/r/sourcerepo_repository_iam.html.markdown +++ b/website/docs/r/sourcerepo_repository_iam.html.markdown @@ -137,6 +137,9 @@ $ terraform import google_sourcerepo_repository_iam_policy.editor projects/{{pro -> If you're importing a resource with beta features, make sure to include `-provider=google-beta` as an argument so that Terraform uses the correct provider to import your resource. +-> **Custom Roles**: If you're importing a IAM resource with a custom role, make sure to use the + full name of the custom role, e.g. `[projects/my-project|organizations/my-org]/roles/my-custom-role`. + ## User Project Overrides This resource supports [User Project Overrides](https://www.terraform.io/docs/providers/google/guides/provider_reference.html#user_project_override). diff --git a/website/docs/r/spanner_database_iam.html.markdown b/website/docs/r/spanner_database_iam.html.markdown index d386ef2d101..a910b23838e 100644 --- a/website/docs/r/spanner_database_iam.html.markdown +++ b/website/docs/r/spanner_database_iam.html.markdown @@ -125,3 +125,7 @@ IAM policy imports use the identifier of the resource in question, e.g. ``` $ terraform import google_spanner_database_iam_policy.database project-name/instance-name/database-name ``` + +-> **Custom Roles**: If you're importing a IAM resource with a custom role, make sure to use the + full name of the custom role, e.g. `[projects/my-project|organizations/my-org]/roles/my-custom-role`. + diff --git a/website/docs/r/spanner_instance_iam.html.markdown b/website/docs/r/spanner_instance_iam.html.markdown index 1b301502fc0..b2c953fad79 100644 --- a/website/docs/r/spanner_instance_iam.html.markdown +++ b/website/docs/r/spanner_instance_iam.html.markdown @@ -120,3 +120,6 @@ IAM policy imports use the identifier of the resource in question, e.g. ``` $ terraform import google_spanner_instance_iam_policy.instance project-name/instance-name ``` + +-> **Custom Roles**: If you're importing a IAM resource with a custom role, make sure to use the + full name of the custom role, e.g. `[projects/my-project|organizations/my-org]/roles/my-custom-role`. diff --git a/website/docs/r/storage_bucket_iam.html.markdown b/website/docs/r/storage_bucket_iam.html.markdown index b27d1bc4d6c..9a108c3ac54 100644 --- a/website/docs/r/storage_bucket_iam.html.markdown +++ b/website/docs/r/storage_bucket_iam.html.markdown @@ -134,6 +134,9 @@ $ terraform import google_storage_bucket_iam_policy.editor b/{{bucket}} -> If you're importing a resource with beta features, make sure to include `-provider=google-beta` as an argument so that Terraform uses the correct provider to import your resource. +-> **Custom Roles**: If you're importing a IAM resource with a custom role, make sure to use the + full name of the custom role, e.g. `[projects/my-project|organizations/my-org]/roles/my-custom-role`. + ## User Project Overrides This resource supports [User Project Overrides](https://www.terraform.io/docs/providers/google/guides/provider_reference.html#user_project_override). From f32dd69fb2d0cff81e33db7fc7404dd2839549bc Mon Sep 17 00:00:00 2001 From: The Magician Date: Wed, 8 Jan 2020 12:46:54 -0800 Subject: [PATCH 14/19] Add mutex to peering create (#5338) Signed-off-by: Modular Magician Co-authored-by: emily --- google/resource_compute_network_peering.go | 33 ++++++++++++++++------ 1 file changed, 25 insertions(+), 8 deletions(-) diff --git a/google/resource_compute_network_peering.go b/google/resource_compute_network_peering.go index 9f533a296f4..845ed1c065d 100644 --- a/google/resource_compute_network_peering.go +++ b/google/resource_compute_network_peering.go @@ -73,10 +73,22 @@ func resourceComputeNetworkPeeringCreate(d *schema.ResourceData, meta interface{ if err != nil { return err } + peerNetworkFieldValue, err := ParseNetworkFieldValue(d.Get("peer_network").(string), d, config) + if err != nil { + return err + } request := &computeBeta.NetworksAddPeeringRequest{} request.NetworkPeering = expandNetworkPeering(d) + // Only one peering operation at a time can be performed for a given network. + // Lock on both networks, sorted so we don't deadlock for A <--> B peering pairs. + peeringLockNames := sortedNetworkPeeringMutexKeys(networkFieldValue, peerNetworkFieldValue) + for _, kn := range peeringLockNames { + mutexKV.Lock(kn) + defer mutexKV.Unlock(kn) + } + addOp, err := config.clientComputeBeta.Networks.AddPeering(networkFieldValue.Project, networkFieldValue.Name, request).Do() if err != nil { return fmt.Errorf("Error adding network peering: %s", err) @@ -139,10 +151,13 @@ func resourceComputeNetworkPeeringDelete(d *schema.ResourceData, meta interface{ Name: name, } - // Only one delete peering operation at a time can be performed inside any peered VPCs. - peeringLockName := getNetworkPeeringLockName(networkFieldValue.Name, peerNetworkFieldValue.Name) - mutexKV.Lock(peeringLockName) - defer mutexKV.Unlock(peeringLockName) + // Only one peering operation at a time can be performed for a given network. + // Lock on both networks, sorted so we don't deadlock for A <--> B peering pairs. + peeringLockNames := sortedNetworkPeeringMutexKeys(networkFieldValue, peerNetworkFieldValue) + for _, kn := range peeringLockNames { + mutexKV.Lock(kn) + defer mutexKV.Unlock(kn) + } removeOp, err := config.clientCompute.Networks.RemovePeering(networkFieldValue.Project, networkFieldValue.Name, request).Do() if err != nil { @@ -177,13 +192,15 @@ func expandNetworkPeering(d *schema.ResourceData) *computeBeta.NetworkPeering { } } -func getNetworkPeeringLockName(networkName, peerNetworkName string) string { +func sortedNetworkPeeringMutexKeys(networkName, peerNetworkName *GlobalFieldValue) []string { // Whether you delete the peering from network A to B or the one from B to A, they // cannot happen at the same time. - networks := []string{networkName, peerNetworkName} + networks := []string{ + fmt.Sprintf("%s/peerings", networkName.RelativeLink()), + fmt.Sprintf("%s/peerings", peerNetworkName.RelativeLink()), + } sort.Strings(networks) - - return fmt.Sprintf("network_peering/%s/%s", networks[0], networks[1]) + return networks } func resourceComputeNetworkPeeringImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { From 5957448c1849d80b422d73388d303714e20bb0bc Mon Sep 17 00:00:00 2001 From: The Magician Date: Wed, 8 Jan 2020 16:25:26 -0800 Subject: [PATCH 15/19] Add default_if_empty for quic_override (#5351) Signed-off-by: Modular Magician Co-authored-by: Riley Karson --- google/resource_compute_target_https_proxy.go | 3 +++ google/resource_sql_database_instance_test.go | 1 - 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/google/resource_compute_target_https_proxy.go b/google/resource_compute_target_https_proxy.go index 9d3bce7c78b..241e571c3f3 100644 --- a/google/resource_compute_target_https_proxy.go +++ b/google/resource_compute_target_https_proxy.go @@ -457,6 +457,9 @@ func flattenComputeTargetHttpsProxyName(v interface{}, d *schema.ResourceData) i } func flattenComputeTargetHttpsProxyQuicOverride(v interface{}, d *schema.ResourceData) interface{} { + if v == nil || v.(string) == "" { + return "NONE" + } return v } diff --git a/google/resource_sql_database_instance_test.go b/google/resource_sql_database_instance_test.go index 069037b74f2..11c939db0d2 100644 --- a/google/resource_sql_database_instance_test.go +++ b/google/resource_sql_database_instance_test.go @@ -10,7 +10,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/terraform" - sqladmin "google.golang.org/api/sqladmin/v1beta4" ) From 4d6a8d54ec78fcdeec5d29f771f4af2998602443 Mon Sep 17 00:00:00 2001 From: Chris Stephens Date: Thu, 9 Jan 2020 00:11:46 +0000 Subject: [PATCH 16/19] Batch errors now indicate how to disable batching Signed-off-by: Modular Magician --- google/batcher.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/google/batcher.go b/google/batcher.go index 9f1a0ecdcb9..6687c447d96 100644 --- a/google/batcher.go +++ b/google/batcher.go @@ -157,7 +157,10 @@ func (b *RequestBatcher) SendRequestWithTimeout(batchKey string, request *BatchR case resp := <-respCh: if resp.err != nil { // use wrapf so we can potentially extract the original error type - return nil, errwrap.Wrapf(fmt.Sprintf("Batch %q for request %q returned error: {{err}}", batchKey, request.DebugId), resp.err) + errMsg := fmt.Sprintf( + "Batch %q for request %q returned error: {{err}}. To debug individual requests, try disabling batching: https://www.terraform.io/docs/providers/google/guides/provider_reference.html#enable_batching", + batchKey, request.DebugId) + return nil, errwrap.Wrapf(errMsg, resp.err) } return resp.body, nil case <-ctx.Done(): From a3214a0baad67d28bdc666ecade895775744fc31 Mon Sep 17 00:00:00 2001 From: The Magician Date: Thu, 9 Jan 2020 12:43:10 -0800 Subject: [PATCH 17/19] Add default_if_empty to google_compute_router_nat defaults (#5353) Signed-off-by: Modular Magician Co-authored-by: Riley Karson --- ...source_access_context_manager_access_level.go | 3 ++- ...e_access_context_manager_service_perimeter.go | 3 ++- google/resource_compute_address.go | 3 ++- google/resource_compute_router_nat.go | 16 ++++++++++++++++ google/resource_compute_target_https_proxy.go | 3 ++- google/resource_dns_managed_zone.go | 3 ++- google/resource_logging_metric.go | 3 ++- 7 files changed, 28 insertions(+), 6 deletions(-) diff --git a/google/resource_access_context_manager_access_level.go b/google/resource_access_context_manager_access_level.go index 0594dc0ad95..dfc2496102b 100644 --- a/google/resource_access_context_manager_access_level.go +++ b/google/resource_access_context_manager_access_level.go @@ -462,9 +462,10 @@ func flattenAccessContextManagerAccessLevelBasic(v interface{}, d *schema.Resour return []interface{}{transformed} } func flattenAccessContextManagerAccessLevelBasicCombiningFunction(v interface{}, d *schema.ResourceData) interface{} { - if v == nil || v.(string) == "" { + if v == nil || isEmptyValue(reflect.ValueOf(v)) { return "AND" } + return v } diff --git a/google/resource_access_context_manager_service_perimeter.go b/google/resource_access_context_manager_service_perimeter.go index 9f91c189d9a..990630d4e03 100644 --- a/google/resource_access_context_manager_service_perimeter.go +++ b/google/resource_access_context_manager_service_perimeter.go @@ -407,9 +407,10 @@ func flattenAccessContextManagerServicePerimeterUpdateTime(v interface{}, d *sch } func flattenAccessContextManagerServicePerimeterPerimeterType(v interface{}, d *schema.ResourceData) interface{} { - if v == nil || v.(string) == "" { + if v == nil || isEmptyValue(reflect.ValueOf(v)) { return "PERIMETER_TYPE_REGULAR" } + return v } diff --git a/google/resource_compute_address.go b/google/resource_compute_address.go index d627d470946..e8a831e1300 100644 --- a/google/resource_compute_address.go +++ b/google/resource_compute_address.go @@ -353,9 +353,10 @@ func flattenComputeAddressAddress(v interface{}, d *schema.ResourceData) interfa } func flattenComputeAddressAddressType(v interface{}, d *schema.ResourceData) interface{} { - if v == nil || v.(string) == "" { + if v == nil || isEmptyValue(reflect.ValueOf(v)) { return "EXTERNAL" } + return v } diff --git a/google/resource_compute_router_nat.go b/google/resource_compute_router_nat.go index fa8c82a4306..dd5c3d07275 100644 --- a/google/resource_compute_router_nat.go +++ b/google/resource_compute_router_nat.go @@ -721,42 +721,58 @@ func flattenComputeRouterNatMinPortsPerVm(v interface{}, d *schema.ResourceData) } func flattenComputeRouterNatUdpIdleTimeoutSec(v interface{}, d *schema.ResourceData) interface{} { + if v == nil || isEmptyValue(reflect.ValueOf(v)) { + return 30 + } // Handles the string fixed64 format if strVal, ok := v.(string); ok { if intVal, err := strconv.ParseInt(strVal, 10, 64); err == nil { return intVal } // let terraform core handle it if we can't convert the string to an int. } + return v } func flattenComputeRouterNatIcmpIdleTimeoutSec(v interface{}, d *schema.ResourceData) interface{} { + if v == nil || isEmptyValue(reflect.ValueOf(v)) { + return 30 + } // Handles the string fixed64 format if strVal, ok := v.(string); ok { if intVal, err := strconv.ParseInt(strVal, 10, 64); err == nil { return intVal } // let terraform core handle it if we can't convert the string to an int. } + return v } func flattenComputeRouterNatTcpEstablishedIdleTimeoutSec(v interface{}, d *schema.ResourceData) interface{} { + if v == nil || isEmptyValue(reflect.ValueOf(v)) { + return 1200 + } // Handles the string fixed64 format if strVal, ok := v.(string); ok { if intVal, err := strconv.ParseInt(strVal, 10, 64); err == nil { return intVal } // let terraform core handle it if we can't convert the string to an int. } + return v } func flattenComputeRouterNatTcpTransitoryIdleTimeoutSec(v interface{}, d *schema.ResourceData) interface{} { + if v == nil || isEmptyValue(reflect.ValueOf(v)) { + return 30 + } // Handles the string fixed64 format if strVal, ok := v.(string); ok { if intVal, err := strconv.ParseInt(strVal, 10, 64); err == nil { return intVal } // let terraform core handle it if we can't convert the string to an int. } + return v } diff --git a/google/resource_compute_target_https_proxy.go b/google/resource_compute_target_https_proxy.go index 241e571c3f3..3b7777f6b2e 100644 --- a/google/resource_compute_target_https_proxy.go +++ b/google/resource_compute_target_https_proxy.go @@ -457,9 +457,10 @@ func flattenComputeTargetHttpsProxyName(v interface{}, d *schema.ResourceData) i } func flattenComputeTargetHttpsProxyQuicOverride(v interface{}, d *schema.ResourceData) interface{} { - if v == nil || v.(string) == "" { + if v == nil || isEmptyValue(reflect.ValueOf(v)) { return "NONE" } + return v } diff --git a/google/resource_dns_managed_zone.go b/google/resource_dns_managed_zone.go index 67473e8a7f1..0a937382f63 100644 --- a/google/resource_dns_managed_zone.go +++ b/google/resource_dns_managed_zone.go @@ -544,9 +544,10 @@ func flattenDNSManagedZoneLabels(v interface{}, d *schema.ResourceData) interfac } func flattenDNSManagedZoneVisibility(v interface{}, d *schema.ResourceData) interface{} { - if v == nil || v.(string) == "" { + if v == nil || isEmptyValue(reflect.ValueOf(v)) { return "public" } + return v } diff --git a/google/resource_logging_metric.go b/google/resource_logging_metric.go index 87e6dab1f33..e69bff67fd8 100644 --- a/google/resource_logging_metric.go +++ b/google/resource_logging_metric.go @@ -586,9 +586,10 @@ func flattenLoggingMetricMetricDescriptorLabelsDescription(v interface{}, d *sch } func flattenLoggingMetricMetricDescriptorLabelsValueType(v interface{}, d *schema.ResourceData) interface{} { - if v == nil || v.(string) == "" { + if v == nil || isEmptyValue(reflect.ValueOf(v)) { return "STRING" } + return v } From c7b7470c48e033ce701aa329c8030822c557a735 Mon Sep 17 00:00:00 2001 From: The Magician Date: Thu, 9 Jan 2020 12:49:35 -0800 Subject: [PATCH 18/19] Allow for retries of single requests in a batch on failure (#5355) Signed-off-by: Modular Magician Co-authored-by: emily --- google/batcher.go | 199 ++++++++++++++-------- google/batcher_test.go | 57 +++++-- google/iam_batching.go | 2 +- google/resource_google_project_service.go | 2 +- google/serviceusage_batching.go | 49 +++--- 5 files changed, 195 insertions(+), 114 deletions(-) diff --git a/google/batcher.go b/google/batcher.go index 6687c447d96..972b12c8b01 100644 --- a/google/batcher.go +++ b/google/batcher.go @@ -3,21 +3,19 @@ package google import ( "context" "fmt" + "github.com/hashicorp/errwrap" "log" "sync" "time" - - "github.com/hashicorp/errwrap" ) const defaultBatchSendIntervalSec = 3 -// RequestBatcher is a global batcher object that keeps track of -// existing batches. -// In general, a batcher should be created per service that requires batching -// in order to prevent blocking batching for one service due to another, -// and to minimize the possibility of overlap in batchKey formats -// (see SendRequestWithTimeout) +// RequestBatcher keeps track of batched requests globally. +// It should be created at a provider level. In general, one +// should be created per service that requires batching to: +// - prevent blocking batching for one service due to another, +// - minimize the possibility of overlap in batchKey formats (see SendRequestWithTimeout) type RequestBatcher struct { sync.Mutex @@ -27,39 +25,41 @@ type RequestBatcher struct { debugId string } -// BatchRequest represents a single request to a global batcher. -type BatchRequest struct { - // ResourceName represents the underlying resource for which - // a request is made. Its format is determined by what SendF expects, but - // typically should be the name of the parent GCP resource being changed. - ResourceName string - - // Body is this request's data to be passed to SendF, and may be combined - // with other bodies using CombineF. - Body interface{} - - // CombineF function determines how to combine bodies from two batches. - CombineF batcherCombineFunc - - // SendF function determines how to actually send a batched request to a - // third party service. The arguments given to this function are - // (ResourceName, Body) where Body may have been combined with other request - // Bodies. - SendF batcherSendFunc - - // ID for debugging request. This should be specific to a single request - // (i.e. per Terraform resource) - DebugId string -} - // These types are meant to be the public interface to batchers. They define -// logic to manage batch data type and behavior, and require service-specific -// implementations per type of request per service. -// Function type for combine existing batches and additional batch data -type batcherCombineFunc func(body interface{}, toAdd interface{}) (interface{}, error) +// batch data format and logic to send/combine batches, i.e. they require +// specific implementations per type of request. +type ( + // BatchRequest represents a single request to a global batcher. + BatchRequest struct { + // ResourceName represents the underlying resource for which + // a request is made. Its format is determined by what SendF expects, but + // typically should be the name of the parent GCP resource being changed. + ResourceName string + + // Body is this request's data to be passed to SendF, and may be combined + // with other bodies using CombineF. + Body interface{} + + // CombineF function determines how to combine bodies from two batches. + CombineF BatcherCombineFunc + + // SendF function determines how to actually send a batched request to a + // third party service. The arguments given to this function are + // (ResourceName, Body) where Body may have been combined with other request + // Bodies. + SendF BatcherSendFunc + + // ID for debugging request. This should be specific to a single request + // (i.e. per Terraform resource) + DebugId string + } -// Function type for sending a batch request -type batcherSendFunc func(resourceName string, body interface{}) (interface{}, error) + // BatcherCombineFunc is a function type for combine existing batches and additional batch data + BatcherCombineFunc func(body interface{}, toAdd interface{}) (interface{}, error) + + // BatcherSendFunc is a function type for sending a batch request + BatcherSendFunc func(resourceName string, body interface{}) (interface{}, error) +) // batchResponse bundles an API response (data, error) tuple. type batchResponse struct { @@ -67,16 +67,32 @@ type batchResponse struct { err error } -// startedBatch refers to a processed batch whose timer to send the request has -// already been started. The responses for the request is sent to each listener -// channel, representing parallel callers that are waiting on requests -// combined into this batch. +func (br *batchResponse) IsError() bool { + return br.err != nil +} + +// startedBatch refers to a registered batch to group batch requests coming in. +// The timer manages the time after which a given batch is sent. type startedBatch struct { batchKey string + + // Combined Batch Request *BatchRequest - listeners []chan batchResponse - timer *time.Timer + // subscribers is a registry of the requests (batchSubscriber) combined into this batcher. + + subscribers []batchSubscriber + + timer *time.Timer +} + +// batchSubscriber contains information required for a single request for a startedBatch. +type batchSubscriber struct { + // singleRequest is the original request this subscriber represents + singleRequest *BatchRequest + + // respCh is the channel created to communicate the result to a waiting goroutine.s + respCh chan batchResponse } // batchingConfig contains user configuration for controlling batch requests. @@ -94,8 +110,12 @@ func NewRequestBatcher(debugId string, ctx context.Context, config *batchingConf batches: make(map[string]*startedBatch), } + // Start goroutine to managing stopping the batcher if the provider-level parent context is closed. go func(b *RequestBatcher) { - <-ctx.Done() + // Block until parent context is closed + <-b.parentCtx.Done() + + log.Printf("[DEBUG] parent context canceled, cleaning up batcher batches") b.stop() }(batcher) @@ -108,19 +128,19 @@ func (b *RequestBatcher) stop() { log.Printf("[DEBUG] Stopping batcher %q", b.debugId) for batchKey, batch := range b.batches { - log.Printf("[DEBUG] Cleaning up batch request %q", batchKey) + log.Printf("[DEBUG] Cancelling started batch for batchKey %q", batchKey) batch.timer.Stop() - for _, l := range batch.listeners { - close(l) + for _, l := range batch.subscribers { + close(l.respCh) } } } -// SendRequestWithTimeout is expected to be called per parallel call. -// It manages waiting on the result of a batch request. +// SendRequestWithTimeout is a blocking call for making a single request, run alone or as part of a batch. +// It manages registering the single request with the batcher and waiting on the result. // -// Batch requests are grouped by the given batchKey. batchKey -// should be unique to the API request being sent, most likely similar to +// Params: +// batchKey: A string to group batchable requests. It should be unique to the API request being sent, similar to // the HTTP request URL with GCP resource ID included in the URL (the caller // may choose to use a key with method if needed to diff GET/read and // POST/create) @@ -182,40 +202,75 @@ func (b *RequestBatcher) registerBatchRequest(batchKey string, newRequest *Batch return batch.addRequest(newRequest) } + // Batch doesn't exist for given batch key - create a new batch. + log.Printf("[DEBUG] Creating new batch %q from request %q", newRequest.DebugId, batchKey) + // The calling goroutine will need a channel to wait on for a response. respCh := make(chan batchResponse, 1) + sub := batchSubscriber{ + singleRequest: newRequest, + respCh: respCh, + } - // Create a new batch. + // Create a new batch with copy of the given batch request. b.batches[batchKey] = &startedBatch{ - BatchRequest: newRequest, - batchKey: batchKey, - listeners: []chan batchResponse{respCh}, + BatchRequest: &BatchRequest{ + ResourceName: newRequest.ResourceName, + Body: newRequest.Body, + CombineF: newRequest.CombineF, + SendF: newRequest.SendF, + DebugId: fmt.Sprintf("Combined batch for started batch %q", batchKey), + }, + batchKey: batchKey, + subscribers: []batchSubscriber{sub}, } // Start a timer to send the request b.batches[batchKey].timer = time.AfterFunc(b.sendAfter, func() { batch := b.popBatch(batchKey) - - var resp batchResponse if batch == nil { - log.Printf("[DEBUG] Batch not found in saved batches, running single request batch %q", batchKey) - resp = newRequest.send() + log.Printf("[ERROR] batch should have been added to saved batches - just run as single request %q", newRequest.DebugId) + respCh <- newRequest.send() + close(respCh) } else { - log.Printf("[DEBUG] Sending batch %q combining %d requests)", batchKey, len(batch.listeners)) - resp = batch.send() - } - - // Send message to all goroutines waiting on result. - for _, ch := range batch.listeners { - ch <- resp - close(ch) + b.sendBatchWithSingleRetry(batchKey, batch) } }) return respCh, nil } +func (b *RequestBatcher) sendBatchWithSingleRetry(batchKey string, batch *startedBatch) { + log.Printf("[DEBUG] Sending batch %q combining %d requests)", batchKey, len(batch.subscribers)) + resp := batch.send() + + // If the batch failed and combines more than one request, retry each single request. + if resp.IsError() && len(batch.subscribers) > 1 { + log.Printf("[DEBUG] Batch failed with error: %v", resp.err) + log.Printf("[DEBUG] Sending each request in batch separately") + for _, sub := range batch.subscribers { + log.Printf("[DEBUG] Retrying single request %q", sub.singleRequest.DebugId) + singleResp := sub.singleRequest.send() + log.Printf("[DEBUG] Retried single request %q returned response: %v", sub.singleRequest.DebugId, singleResp) + + if singleResp.IsError() { + singleResp.err = errwrap.Wrapf( + "batch request and retry as single request failed - final error: {{err}}", + singleResp.err) + } + sub.respCh <- singleResp + close(sub.respCh) + } + } else { + // Send result to all subscribers + for _, sub := range batch.subscribers { + sub.respCh <- resp + close(sub.respCh) + } + } +} + // popBatch safely gets and removes a batch with given batchkey from the // RequestBatcher's started batches. func (b *RequestBatcher) popBatch(batchKey string) *startedBatch { @@ -246,7 +301,11 @@ func (batch *startedBatch) addRequest(newRequest *BatchRequest) (<-chan batchRes log.Printf("[DEBUG] Added batch request %q to batch. New batch body: %v", newRequest.DebugId, batch.Body) respCh := make(chan batchResponse, 1) - batch.listeners = append(batch.listeners, respCh) + sub := batchSubscriber{ + singleRequest: newRequest, + respCh: respCh, + } + batch.subscribers = append(batch.subscribers, sub) return respCh, nil } diff --git a/google/batcher_test.go b/google/batcher_test.go index 1078a63b48f..52902ab05bb 100644 --- a/google/batcher_test.go +++ b/google/batcher_test.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "log" "strings" "sync" "testing" @@ -139,41 +140,63 @@ func TestRequestBatcher_errInSend(t *testing.T) { enableBatching: true, }) - testResource := "resource for send error" - sendErrTmpl := "this is an expected error in send batch for resource %q" + // combineF keeps track of the batched indexes + testCombine := func(body interface{}, toAdd interface{}) (interface{}, error) { + return append(body.([]int), toAdd.([]int)...), nil + } - // combineF is no-op - testCombine := func(_ interface{}, _ interface{}) (interface{}, error) { + failIdx := 0 + testResource := "RESOURCE-SEND-ERROR" + expectedErrMsg := fmt.Sprintf("Error - batch %q contains idx %d", testResource, failIdx) + + testSendBatch := func(resourceName string, body interface{}) (interface{}, error) { + log.Printf("[DEBUG] sendBatch body: %+v", body) + for _, v := range body.([]int) { + if v == failIdx { + return nil, fmt.Errorf(expectedErrMsg) + } + } return nil, nil } - testSendBatch := func(resourceName string, cnt interface{}) (interface{}, error) { - return cnt, fmt.Errorf(sendErrTmpl, resourceName) - } + numRequests := 3 wg := sync.WaitGroup{} - wg.Add(2) + wg.Add(numRequests) - for i := 0; i < 2; i++ { + for i := 0; i < numRequests; i++ { go func(idx int) { defer wg.Done() req := &BatchRequest{ DebugId: fmt.Sprintf("sendError %d", idx), ResourceName: testResource, - Body: nil, + Body: []int{idx}, CombineF: testCombine, SendF: testSendBatch, } _, err := testBatcher.SendRequestWithTimeout("batchSendError", req, time.Duration(10)*time.Second) - if err == nil { - t.Errorf("expected error, got none") - return - } - expectedErr := fmt.Sprintf(sendErrTmpl, testResource) - if !strings.Contains(err.Error(), fmt.Sprintf(sendErrTmpl, testResource)) { - t.Errorf("expected error %q, got error: %v", expectedErr, err) + // Requests without index 0 should have succeeded + if idx == failIdx { + // We expect an error + if err == nil { + t.Errorf("expected error for request %d, got none", idx) + } + // Check error message + expectedErrPrefix := "batch request and retry as single request failed - final error: " + if !strings.Contains(err.Error(), expectedErrPrefix) { + t.Errorf("expected error %q to contain %q", err, expectedErrPrefix) + } + if !strings.Contains(err.Error(), expectedErrMsg) { + t.Errorf("expected error %q to contain %q", err, expectedErrMsg) + } + } else { + + // We shouldn't get error for non-failure index + if err != nil { + t.Errorf("expected request %d to succeed, got error: %v", idx, err) + } } }(i) } diff --git a/google/iam_batching.go b/google/iam_batching.go index 31cfb3b7fe0..e5ecbfe5525 100644 --- a/google/iam_batching.go +++ b/google/iam_batching.go @@ -42,7 +42,7 @@ func combineBatchIamPolicyModifiers(currV interface{}, toAddV interface{}) (inte return append(currModifiers, newModifiers...), nil } -func sendBatchModifyIamPolicy(updater ResourceIamUpdater) batcherSendFunc { +func sendBatchModifyIamPolicy(updater ResourceIamUpdater) BatcherSendFunc { return func(resourceName string, body interface{}) (interface{}, error) { modifiers, ok := body.([]iamPolicyModifyFunc) if !ok { diff --git a/google/resource_google_project_service.go b/google/resource_google_project_service.go index fe1a79ccf6b..34c94c9ae60 100644 --- a/google/resource_google_project_service.go +++ b/google/resource_google_project_service.go @@ -120,7 +120,7 @@ func resourceGoogleProjectServiceCreate(d *schema.ResourceData, meta interface{} } srv := d.Get("service").(string) - err = BatchRequestEnableServices(map[string]struct{}{srv: {}}, project, d, config) + err = BatchRequestEnableService(srv, project, d, config) if err != nil { return err } diff --git a/google/serviceusage_batching.go b/google/serviceusage_batching.go index 8ec706db0a5..9f9e66c77c7 100644 --- a/google/serviceusage_batching.go +++ b/google/serviceusage_batching.go @@ -16,35 +16,18 @@ const ( // BatchRequestEnableServices can be used to batch requests to enable services // across resource nodes, i.e. to batch creation of several // google_project_service(s) resources. -func BatchRequestEnableServices(services map[string]struct{}, project string, d *schema.ResourceData, config *Config) error { - // renamed service create calls are relatively likely to fail, so break out - // of the batched call to avoid failing that as well - for k := range services { - if v, ok := renamedServicesByOldAndNewServiceNames[k]; ok { - log.Printf("[DEBUG] found renamed service %s (with alternate name %s)", k, v) - delete(services, k) - // also remove the other name, so we don't enable it 2x in a row - delete(services, v) - - // use a short timeout- failures are likely - log.Printf("[DEBUG] attempting user-specified name %s", k) - err := enableServiceUsageProjectServices([]string{k}, project, config, 1*time.Minute) - if err != nil { - log.Printf("[DEBUG] saw error %s. attempting alternate name %v", err, v) - err2 := enableServiceUsageProjectServices([]string{v}, project, config, 1*time.Minute) - if err2 != nil { - return fmt.Errorf("Saw 2 subsequent errors attempting to enable a renamed service: %s / %s", err, err2) - } - } - } +func BatchRequestEnableService(service string, project string, d *schema.ResourceData, config *Config) error { + // Renamed service create calls are relatively likely to fail, so don't try to batch the call. + if altName, ok := renamedServicesByOldAndNewServiceNames[service]; ok { + return tryEnableRenamedService(service, altName, project, d, config) } req := &BatchRequest{ ResourceName: project, - Body: stringSliceFromGolangSet(services), + Body: []string{service}, CombineF: combineServiceUsageServicesBatches, SendF: sendBatchFuncEnableServices(config, d.Timeout(schema.TimeoutCreate)), - DebugId: fmt.Sprintf("Enable Project Services %s: %+v", project, services), + DebugId: fmt.Sprintf("Enable Project Service %q for project %q", service, project), } _, err := config.requestBatcherServiceUsage.SendRequestWithTimeout( @@ -54,6 +37,22 @@ func BatchRequestEnableServices(services map[string]struct{}, project string, d return err } +func tryEnableRenamedService(service, altName string, project string, d *schema.ResourceData, config *Config) error { + log.Printf("[DEBUG] found renamed service %s (with alternate name %s)", service, altName) + // use a short timeout- failures are likely + + log.Printf("[DEBUG] attempting enabling service with user-specified name %s", service) + err := enableServiceUsageProjectServices([]string{altName}, project, config, 1*time.Minute) + if err != nil { + log.Printf("[DEBUG] saw error %s. attempting alternate name %v", err, altName) + err2 := enableServiceUsageProjectServices([]string{altName}, project, config, 1*time.Minute) + if err2 != nil { + return fmt.Errorf("Saw 2 subsequent errors attempting to enable a renamed service: %s / %s", err, err2) + } + } + return nil +} + func BatchRequestReadServices(project string, d *schema.ResourceData, config *Config) (interface{}, error) { req := &BatchRequest{ ResourceName: project, @@ -83,7 +82,7 @@ func combineServiceUsageServicesBatches(srvsRaw interface{}, toAddRaw interface{ return append(srvs, toAdd...), nil } -func sendBatchFuncEnableServices(config *Config, timeout time.Duration) batcherSendFunc { +func sendBatchFuncEnableServices(config *Config, timeout time.Duration) BatcherSendFunc { return func(project string, toEnableRaw interface{}) (interface{}, error) { toEnable, ok := toEnableRaw.([]string) if !ok { @@ -93,7 +92,7 @@ func sendBatchFuncEnableServices(config *Config, timeout time.Duration) batcherS } } -func sendListServices(config *Config, timeout time.Duration) batcherSendFunc { +func sendListServices(config *Config, timeout time.Duration) BatcherSendFunc { return func(project string, _ interface{}) (interface{}, error) { return listCurrentlyEnabledServices(project, config, timeout) } From 460d44da4bd16ce787cfd17a0ee0cad0719eddc8 Mon Sep 17 00:00:00 2001 From: Modular Magician Date: Thu, 9 Jan 2020 20:53:03 +0000 Subject: [PATCH 19/19] Add default_if_empty to google_compute_router_nat defaults (#2946) Merged PR #2946. --- .changelog/2946.txt | 3 + google/batcher.go | 204 ++++++++-------------- google/batcher_test.go | 57 ++---- google/iam_batching.go | 2 +- google/resource_google_project_service.go | 2 +- google/serviceusage_batching.go | 49 +++--- 6 files changed, 118 insertions(+), 199 deletions(-) create mode 100644 .changelog/2946.txt diff --git a/.changelog/2946.txt b/.changelog/2946.txt new file mode 100644 index 00000000000..307f2798104 --- /dev/null +++ b/.changelog/2946.txt @@ -0,0 +1,3 @@ +```release-note:bug +compute: fixed `google_compute_router_nat` timeout fields causing a diff when using a long-lived resource +``` diff --git a/google/batcher.go b/google/batcher.go index 972b12c8b01..9f1a0ecdcb9 100644 --- a/google/batcher.go +++ b/google/batcher.go @@ -3,19 +3,21 @@ package google import ( "context" "fmt" - "github.com/hashicorp/errwrap" "log" "sync" "time" + + "github.com/hashicorp/errwrap" ) const defaultBatchSendIntervalSec = 3 -// RequestBatcher keeps track of batched requests globally. -// It should be created at a provider level. In general, one -// should be created per service that requires batching to: -// - prevent blocking batching for one service due to another, -// - minimize the possibility of overlap in batchKey formats (see SendRequestWithTimeout) +// RequestBatcher is a global batcher object that keeps track of +// existing batches. +// In general, a batcher should be created per service that requires batching +// in order to prevent blocking batching for one service due to another, +// and to minimize the possibility of overlap in batchKey formats +// (see SendRequestWithTimeout) type RequestBatcher struct { sync.Mutex @@ -25,41 +27,39 @@ type RequestBatcher struct { debugId string } -// These types are meant to be the public interface to batchers. They define -// batch data format and logic to send/combine batches, i.e. they require -// specific implementations per type of request. -type ( - // BatchRequest represents a single request to a global batcher. - BatchRequest struct { - // ResourceName represents the underlying resource for which - // a request is made. Its format is determined by what SendF expects, but - // typically should be the name of the parent GCP resource being changed. - ResourceName string - - // Body is this request's data to be passed to SendF, and may be combined - // with other bodies using CombineF. - Body interface{} - - // CombineF function determines how to combine bodies from two batches. - CombineF BatcherCombineFunc - - // SendF function determines how to actually send a batched request to a - // third party service. The arguments given to this function are - // (ResourceName, Body) where Body may have been combined with other request - // Bodies. - SendF BatcherSendFunc - - // ID for debugging request. This should be specific to a single request - // (i.e. per Terraform resource) - DebugId string - } +// BatchRequest represents a single request to a global batcher. +type BatchRequest struct { + // ResourceName represents the underlying resource for which + // a request is made. Its format is determined by what SendF expects, but + // typically should be the name of the parent GCP resource being changed. + ResourceName string + + // Body is this request's data to be passed to SendF, and may be combined + // with other bodies using CombineF. + Body interface{} + + // CombineF function determines how to combine bodies from two batches. + CombineF batcherCombineFunc + + // SendF function determines how to actually send a batched request to a + // third party service. The arguments given to this function are + // (ResourceName, Body) where Body may have been combined with other request + // Bodies. + SendF batcherSendFunc + + // ID for debugging request. This should be specific to a single request + // (i.e. per Terraform resource) + DebugId string +} - // BatcherCombineFunc is a function type for combine existing batches and additional batch data - BatcherCombineFunc func(body interface{}, toAdd interface{}) (interface{}, error) +// These types are meant to be the public interface to batchers. They define +// logic to manage batch data type and behavior, and require service-specific +// implementations per type of request per service. +// Function type for combine existing batches and additional batch data +type batcherCombineFunc func(body interface{}, toAdd interface{}) (interface{}, error) - // BatcherSendFunc is a function type for sending a batch request - BatcherSendFunc func(resourceName string, body interface{}) (interface{}, error) -) +// Function type for sending a batch request +type batcherSendFunc func(resourceName string, body interface{}) (interface{}, error) // batchResponse bundles an API response (data, error) tuple. type batchResponse struct { @@ -67,32 +67,16 @@ type batchResponse struct { err error } -func (br *batchResponse) IsError() bool { - return br.err != nil -} - -// startedBatch refers to a registered batch to group batch requests coming in. -// The timer manages the time after which a given batch is sent. +// startedBatch refers to a processed batch whose timer to send the request has +// already been started. The responses for the request is sent to each listener +// channel, representing parallel callers that are waiting on requests +// combined into this batch. type startedBatch struct { batchKey string - - // Combined Batch Request *BatchRequest - // subscribers is a registry of the requests (batchSubscriber) combined into this batcher. - - subscribers []batchSubscriber - - timer *time.Timer -} - -// batchSubscriber contains information required for a single request for a startedBatch. -type batchSubscriber struct { - // singleRequest is the original request this subscriber represents - singleRequest *BatchRequest - - // respCh is the channel created to communicate the result to a waiting goroutine.s - respCh chan batchResponse + listeners []chan batchResponse + timer *time.Timer } // batchingConfig contains user configuration for controlling batch requests. @@ -110,12 +94,8 @@ func NewRequestBatcher(debugId string, ctx context.Context, config *batchingConf batches: make(map[string]*startedBatch), } - // Start goroutine to managing stopping the batcher if the provider-level parent context is closed. go func(b *RequestBatcher) { - // Block until parent context is closed - <-b.parentCtx.Done() - - log.Printf("[DEBUG] parent context canceled, cleaning up batcher batches") + <-ctx.Done() b.stop() }(batcher) @@ -128,19 +108,19 @@ func (b *RequestBatcher) stop() { log.Printf("[DEBUG] Stopping batcher %q", b.debugId) for batchKey, batch := range b.batches { - log.Printf("[DEBUG] Cancelling started batch for batchKey %q", batchKey) + log.Printf("[DEBUG] Cleaning up batch request %q", batchKey) batch.timer.Stop() - for _, l := range batch.subscribers { - close(l.respCh) + for _, l := range batch.listeners { + close(l) } } } -// SendRequestWithTimeout is a blocking call for making a single request, run alone or as part of a batch. -// It manages registering the single request with the batcher and waiting on the result. +// SendRequestWithTimeout is expected to be called per parallel call. +// It manages waiting on the result of a batch request. // -// Params: -// batchKey: A string to group batchable requests. It should be unique to the API request being sent, similar to +// Batch requests are grouped by the given batchKey. batchKey +// should be unique to the API request being sent, most likely similar to // the HTTP request URL with GCP resource ID included in the URL (the caller // may choose to use a key with method if needed to diff GET/read and // POST/create) @@ -177,10 +157,7 @@ func (b *RequestBatcher) SendRequestWithTimeout(batchKey string, request *BatchR case resp := <-respCh: if resp.err != nil { // use wrapf so we can potentially extract the original error type - errMsg := fmt.Sprintf( - "Batch %q for request %q returned error: {{err}}. To debug individual requests, try disabling batching: https://www.terraform.io/docs/providers/google/guides/provider_reference.html#enable_batching", - batchKey, request.DebugId) - return nil, errwrap.Wrapf(errMsg, resp.err) + return nil, errwrap.Wrapf(fmt.Sprintf("Batch %q for request %q returned error: {{err}}", batchKey, request.DebugId), resp.err) } return resp.body, nil case <-ctx.Done(): @@ -202,75 +179,40 @@ func (b *RequestBatcher) registerBatchRequest(batchKey string, newRequest *Batch return batch.addRequest(newRequest) } - // Batch doesn't exist for given batch key - create a new batch. - log.Printf("[DEBUG] Creating new batch %q from request %q", newRequest.DebugId, batchKey) - // The calling goroutine will need a channel to wait on for a response. respCh := make(chan batchResponse, 1) - sub := batchSubscriber{ - singleRequest: newRequest, - respCh: respCh, - } - // Create a new batch with copy of the given batch request. + // Create a new batch. b.batches[batchKey] = &startedBatch{ - BatchRequest: &BatchRequest{ - ResourceName: newRequest.ResourceName, - Body: newRequest.Body, - CombineF: newRequest.CombineF, - SendF: newRequest.SendF, - DebugId: fmt.Sprintf("Combined batch for started batch %q", batchKey), - }, - batchKey: batchKey, - subscribers: []batchSubscriber{sub}, + BatchRequest: newRequest, + batchKey: batchKey, + listeners: []chan batchResponse{respCh}, } // Start a timer to send the request b.batches[batchKey].timer = time.AfterFunc(b.sendAfter, func() { batch := b.popBatch(batchKey) + + var resp batchResponse if batch == nil { - log.Printf("[ERROR] batch should have been added to saved batches - just run as single request %q", newRequest.DebugId) - respCh <- newRequest.send() - close(respCh) + log.Printf("[DEBUG] Batch not found in saved batches, running single request batch %q", batchKey) + resp = newRequest.send() } else { - b.sendBatchWithSingleRetry(batchKey, batch) + log.Printf("[DEBUG] Sending batch %q combining %d requests)", batchKey, len(batch.listeners)) + resp = batch.send() + } + + // Send message to all goroutines waiting on result. + for _, ch := range batch.listeners { + ch <- resp + close(ch) } }) return respCh, nil } -func (b *RequestBatcher) sendBatchWithSingleRetry(batchKey string, batch *startedBatch) { - log.Printf("[DEBUG] Sending batch %q combining %d requests)", batchKey, len(batch.subscribers)) - resp := batch.send() - - // If the batch failed and combines more than one request, retry each single request. - if resp.IsError() && len(batch.subscribers) > 1 { - log.Printf("[DEBUG] Batch failed with error: %v", resp.err) - log.Printf("[DEBUG] Sending each request in batch separately") - for _, sub := range batch.subscribers { - log.Printf("[DEBUG] Retrying single request %q", sub.singleRequest.DebugId) - singleResp := sub.singleRequest.send() - log.Printf("[DEBUG] Retried single request %q returned response: %v", sub.singleRequest.DebugId, singleResp) - - if singleResp.IsError() { - singleResp.err = errwrap.Wrapf( - "batch request and retry as single request failed - final error: {{err}}", - singleResp.err) - } - sub.respCh <- singleResp - close(sub.respCh) - } - } else { - // Send result to all subscribers - for _, sub := range batch.subscribers { - sub.respCh <- resp - close(sub.respCh) - } - } -} - // popBatch safely gets and removes a batch with given batchkey from the // RequestBatcher's started batches. func (b *RequestBatcher) popBatch(batchKey string) *startedBatch { @@ -301,11 +243,7 @@ func (batch *startedBatch) addRequest(newRequest *BatchRequest) (<-chan batchRes log.Printf("[DEBUG] Added batch request %q to batch. New batch body: %v", newRequest.DebugId, batch.Body) respCh := make(chan batchResponse, 1) - sub := batchSubscriber{ - singleRequest: newRequest, - respCh: respCh, - } - batch.subscribers = append(batch.subscribers, sub) + batch.listeners = append(batch.listeners, respCh) return respCh, nil } diff --git a/google/batcher_test.go b/google/batcher_test.go index 52902ab05bb..1078a63b48f 100644 --- a/google/batcher_test.go +++ b/google/batcher_test.go @@ -4,7 +4,6 @@ import ( "context" "errors" "fmt" - "log" "strings" "sync" "testing" @@ -140,63 +139,41 @@ func TestRequestBatcher_errInSend(t *testing.T) { enableBatching: true, }) - // combineF keeps track of the batched indexes - testCombine := func(body interface{}, toAdd interface{}) (interface{}, error) { - return append(body.([]int), toAdd.([]int)...), nil - } - - failIdx := 0 - testResource := "RESOURCE-SEND-ERROR" - expectedErrMsg := fmt.Sprintf("Error - batch %q contains idx %d", testResource, failIdx) + testResource := "resource for send error" + sendErrTmpl := "this is an expected error in send batch for resource %q" - testSendBatch := func(resourceName string, body interface{}) (interface{}, error) { - log.Printf("[DEBUG] sendBatch body: %+v", body) - for _, v := range body.([]int) { - if v == failIdx { - return nil, fmt.Errorf(expectedErrMsg) - } - } + // combineF is no-op + testCombine := func(_ interface{}, _ interface{}) (interface{}, error) { return nil, nil } - numRequests := 3 + testSendBatch := func(resourceName string, cnt interface{}) (interface{}, error) { + return cnt, fmt.Errorf(sendErrTmpl, resourceName) + } wg := sync.WaitGroup{} - wg.Add(numRequests) + wg.Add(2) - for i := 0; i < numRequests; i++ { + for i := 0; i < 2; i++ { go func(idx int) { defer wg.Done() req := &BatchRequest{ DebugId: fmt.Sprintf("sendError %d", idx), ResourceName: testResource, - Body: []int{idx}, + Body: nil, CombineF: testCombine, SendF: testSendBatch, } _, err := testBatcher.SendRequestWithTimeout("batchSendError", req, time.Duration(10)*time.Second) - // Requests without index 0 should have succeeded - if idx == failIdx { - // We expect an error - if err == nil { - t.Errorf("expected error for request %d, got none", idx) - } - // Check error message - expectedErrPrefix := "batch request and retry as single request failed - final error: " - if !strings.Contains(err.Error(), expectedErrPrefix) { - t.Errorf("expected error %q to contain %q", err, expectedErrPrefix) - } - if !strings.Contains(err.Error(), expectedErrMsg) { - t.Errorf("expected error %q to contain %q", err, expectedErrMsg) - } - } else { - - // We shouldn't get error for non-failure index - if err != nil { - t.Errorf("expected request %d to succeed, got error: %v", idx, err) - } + if err == nil { + t.Errorf("expected error, got none") + return + } + expectedErr := fmt.Sprintf(sendErrTmpl, testResource) + if !strings.Contains(err.Error(), fmt.Sprintf(sendErrTmpl, testResource)) { + t.Errorf("expected error %q, got error: %v", expectedErr, err) } }(i) } diff --git a/google/iam_batching.go b/google/iam_batching.go index e5ecbfe5525..31cfb3b7fe0 100644 --- a/google/iam_batching.go +++ b/google/iam_batching.go @@ -42,7 +42,7 @@ func combineBatchIamPolicyModifiers(currV interface{}, toAddV interface{}) (inte return append(currModifiers, newModifiers...), nil } -func sendBatchModifyIamPolicy(updater ResourceIamUpdater) BatcherSendFunc { +func sendBatchModifyIamPolicy(updater ResourceIamUpdater) batcherSendFunc { return func(resourceName string, body interface{}) (interface{}, error) { modifiers, ok := body.([]iamPolicyModifyFunc) if !ok { diff --git a/google/resource_google_project_service.go b/google/resource_google_project_service.go index 34c94c9ae60..fe1a79ccf6b 100644 --- a/google/resource_google_project_service.go +++ b/google/resource_google_project_service.go @@ -120,7 +120,7 @@ func resourceGoogleProjectServiceCreate(d *schema.ResourceData, meta interface{} } srv := d.Get("service").(string) - err = BatchRequestEnableService(srv, project, d, config) + err = BatchRequestEnableServices(map[string]struct{}{srv: {}}, project, d, config) if err != nil { return err } diff --git a/google/serviceusage_batching.go b/google/serviceusage_batching.go index 9f9e66c77c7..8ec706db0a5 100644 --- a/google/serviceusage_batching.go +++ b/google/serviceusage_batching.go @@ -16,18 +16,35 @@ const ( // BatchRequestEnableServices can be used to batch requests to enable services // across resource nodes, i.e. to batch creation of several // google_project_service(s) resources. -func BatchRequestEnableService(service string, project string, d *schema.ResourceData, config *Config) error { - // Renamed service create calls are relatively likely to fail, so don't try to batch the call. - if altName, ok := renamedServicesByOldAndNewServiceNames[service]; ok { - return tryEnableRenamedService(service, altName, project, d, config) +func BatchRequestEnableServices(services map[string]struct{}, project string, d *schema.ResourceData, config *Config) error { + // renamed service create calls are relatively likely to fail, so break out + // of the batched call to avoid failing that as well + for k := range services { + if v, ok := renamedServicesByOldAndNewServiceNames[k]; ok { + log.Printf("[DEBUG] found renamed service %s (with alternate name %s)", k, v) + delete(services, k) + // also remove the other name, so we don't enable it 2x in a row + delete(services, v) + + // use a short timeout- failures are likely + log.Printf("[DEBUG] attempting user-specified name %s", k) + err := enableServiceUsageProjectServices([]string{k}, project, config, 1*time.Minute) + if err != nil { + log.Printf("[DEBUG] saw error %s. attempting alternate name %v", err, v) + err2 := enableServiceUsageProjectServices([]string{v}, project, config, 1*time.Minute) + if err2 != nil { + return fmt.Errorf("Saw 2 subsequent errors attempting to enable a renamed service: %s / %s", err, err2) + } + } + } } req := &BatchRequest{ ResourceName: project, - Body: []string{service}, + Body: stringSliceFromGolangSet(services), CombineF: combineServiceUsageServicesBatches, SendF: sendBatchFuncEnableServices(config, d.Timeout(schema.TimeoutCreate)), - DebugId: fmt.Sprintf("Enable Project Service %q for project %q", service, project), + DebugId: fmt.Sprintf("Enable Project Services %s: %+v", project, services), } _, err := config.requestBatcherServiceUsage.SendRequestWithTimeout( @@ -37,22 +54,6 @@ func BatchRequestEnableService(service string, project string, d *schema.Resourc return err } -func tryEnableRenamedService(service, altName string, project string, d *schema.ResourceData, config *Config) error { - log.Printf("[DEBUG] found renamed service %s (with alternate name %s)", service, altName) - // use a short timeout- failures are likely - - log.Printf("[DEBUG] attempting enabling service with user-specified name %s", service) - err := enableServiceUsageProjectServices([]string{altName}, project, config, 1*time.Minute) - if err != nil { - log.Printf("[DEBUG] saw error %s. attempting alternate name %v", err, altName) - err2 := enableServiceUsageProjectServices([]string{altName}, project, config, 1*time.Minute) - if err2 != nil { - return fmt.Errorf("Saw 2 subsequent errors attempting to enable a renamed service: %s / %s", err, err2) - } - } - return nil -} - func BatchRequestReadServices(project string, d *schema.ResourceData, config *Config) (interface{}, error) { req := &BatchRequest{ ResourceName: project, @@ -82,7 +83,7 @@ func combineServiceUsageServicesBatches(srvsRaw interface{}, toAddRaw interface{ return append(srvs, toAdd...), nil } -func sendBatchFuncEnableServices(config *Config, timeout time.Duration) BatcherSendFunc { +func sendBatchFuncEnableServices(config *Config, timeout time.Duration) batcherSendFunc { return func(project string, toEnableRaw interface{}) (interface{}, error) { toEnable, ok := toEnableRaw.([]string) if !ok { @@ -92,7 +93,7 @@ func sendBatchFuncEnableServices(config *Config, timeout time.Duration) BatcherS } } -func sendListServices(config *Config, timeout time.Duration) BatcherSendFunc { +func sendListServices(config *Config, timeout time.Duration) batcherSendFunc { return func(project string, _ interface{}) (interface{}, error) { return listCurrentlyEnabledServices(project, config, timeout) }