diff --git a/.github/workflows/scheduled-pr-reminders.yml b/.github/workflows/scheduled-pr-reminders.yml index 78a082476be1..3652bfb0b217 100644 --- a/.github/workflows/scheduled-pr-reminders.yml +++ b/.github/workflows/scheduled-pr-reminders.yml @@ -8,6 +8,7 @@ on: jobs: send-pr-reminders: + if: github.repository == 'GoogleCloudPlatform/magic-modules' runs-on: ubuntu-latest permissions: pull-requests: write diff --git a/.github/workflows/unit-test-go-changelog.yml b/.github/workflows/unit-test-go-changelog.yml deleted file mode 100644 index 146ca9f96d33..000000000000 --- a/.github/workflows/unit-test-go-changelog.yml +++ /dev/null @@ -1,29 +0,0 @@ -name: unit-tests-go-changelog - -permissions: read-all - -on: - pull_request: - paths: - - 'tools/go-changelog/**' - -jobs: - test: - runs-on: ubuntu-22.04 - steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.2 - - - name: Set up Go - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 - with: - go-version: '^1.21.0' - - - name: Build - run: | - cd tools/go-changelog/cmd/changelog-pr-body-check - go build - - - name: Test - run: | - cd tools/go-changelog - go test -v ./... \ No newline at end of file diff --git a/.github/workflows/membership-checker.yml b/.github/workflows/unit-test-magician.yml similarity index 85% rename from .github/workflows/membership-checker.yml rename to .github/workflows/unit-test-magician.yml index 3a873917bce9..432049505f49 100644 --- a/.github/workflows/membership-checker.yml +++ b/.github/workflows/unit-test-magician.yml @@ -1,4 +1,4 @@ -name: membership-checker +name: unit-test-magician permissions: read-all @@ -16,7 +16,7 @@ jobs: uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 with: go-version: '^1.21' - - name: Run membership checker unit tests + - name: Run magician unit tests run: | cd .ci/magician go test ./... -v diff --git a/.github/workflows/unit-test-tools.yml b/.github/workflows/unit-test-tools.yml new file mode 100644 index 000000000000..5710ce054295 --- /dev/null +++ b/.github/workflows/unit-test-tools.yml @@ -0,0 +1,106 @@ +name: unit-test-tools + +permissions: read-all + +on: + pull_request: + paths: + - 'tools/**' + - '.github/workflows/unit-test-tools.yml' + +jobs: + diff-processor: + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.2 + + - name: Set up Go + uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 + with: + go-version: '^1.21.0' + + - name: Build diff-processor with TPG + run: | + cd tools/diff-processor + make clone OWNER_REPO=hashicorp/terraform-provider-google DEPTH=1 + make build OLD_REF=main NEW_REF=main + + - name: Test diff-processor with TPG + run: | + cd tools/diff-processor + go test -v ./... + env: + SERVICES_DIR: tools/diff-processor/new/google/services + + - name: Build diff-processor with TPGB + run: | + cd tools/diff-processor + make clone OWNER_REPO=hashicorp/terraform-provider-google-beta DEPTH=1 + make build OLD_REF=main NEW_REF=main + + - name: Test diff-processor with TPGB + run: | + cd tools/diff-processor + go test -v ./... + env: + SERVICES_DIR: tools/diff-processor/new/google/services + + go-changelog: + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.2 + + - name: Set up Go + uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 + with: + go-version: '^1.21.0' + + - name: Build go-changelog + run: | + cd tools/go-changelog/cmd/changelog-pr-body-check + go build + + - name: Test go-changelog + run: | + cd tools/go-changelog + go test -v ./... + + issue-labeler: + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.2 + + - name: Set up Go + uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 + with: + go-version: '^1.21.0' + + - name: Build issue-labeler + run: | + cd tools/issue-labeler + go build + + - name: Test issue-labeler + run: | + cd tools/issue-labeler + go test -v ./... + + template-check: + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.2 + + - name: Set up Go + uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 + with: + go-version: '^1.21.0' + + - name: Build template-check + run: | + cd tools/template-check + go build + + - name: Test template-check + run: | + cd tools/template-check + go test -v ./... \ No newline at end of file diff --git a/.github/workflows/unit-tests-diff-processor.yml b/.github/workflows/unit-tests-diff-processor.yml deleted file mode 100644 index 29b03e678a16..000000000000 --- a/.github/workflows/unit-tests-diff-processor.yml +++ /dev/null @@ -1,46 +0,0 @@ -name: unit-tests-diff-processor - -permissions: read-all - -on: - pull_request: - paths: - - 'tools/diff-processor/**' - - '.github/workflows/unit-tests-diff-processor.yml' - -jobs: - test: - runs-on: ubuntu-22.04 - steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.2 - - - name: Set up Go - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 - with: - go-version: '^1.21.0' - - - name: Build with TPG - run: | - cd tools/diff-processor - make clone OWNER_REPO=hashicorp/terraform-provider-google DEPTH=1 - make build OLD_REF=main NEW_REF=main - - - name: Test with TPG - run: | - cd tools/diff-processor - go test -v ./... - env: - SERVICES_DIR: tools/diff-processor/new/google/services - - - name: Build with TPGB - run: | - cd tools/diff-processor - make clone OWNER_REPO=hashicorp/terraform-provider-google-beta DEPTH=1 - make build OLD_REF=main NEW_REF=main - - - name: Test with TPGB - run: | - cd tools/diff-processor - go test -v ./... - env: - SERVICES_DIR: tools/diff-processor/new/google/services diff --git a/mmv1/api/resource.go b/mmv1/api/resource.go index d4f313475103..200ac61605a3 100644 --- a/mmv1/api/resource.go +++ b/mmv1/api/resource.go @@ -1072,8 +1072,8 @@ func (r Resource) ExtractIdentifiers(url string) []string { return result } -func (r Resource) ImportIdFormatsFromIam() string { - var importFormat, transformed []string +func (r Resource) RawImportIdFormatsFromIam() []string { + var importFormat []string if r.IamPolicy != nil { importFormat = r.IamPolicy.ImportFormat @@ -1082,7 +1082,13 @@ func (r Resource) ImportIdFormatsFromIam() string { importFormat = r.ImportFormat } - importIdFormats := ImportIdFormats(importFormat, r.Identity, r.BaseUrl) + return ImportIdFormats(importFormat, r.Identity, r.BaseUrl) +} + +func (r Resource) ImportIdRegexesFromIam() string { + var transformed []string + + importIdFormats := r.RawImportIdFormatsFromIam() for _, s := range importIdFormats { s = google.Format2Regex(s) s = strings.ReplaceAll(s, "", fmt.Sprintf("<%s>", r.IamParentResourceName())) @@ -1092,6 +1098,47 @@ func (r Resource) ImportIdFormatsFromIam() string { return strings.Join(transformed[:], "\", \"") } +func (r Resource) ImportIdFormatsFromIam() []string { + importIdFormats := r.RawImportIdFormatsFromIam() + var transformed []string + for _, s := range importIdFormats { + transformed = append(transformed, strings.ReplaceAll(s, "%", "")) + } + return transformed +} + +func (r Resource) FirstIamImportIdFormat() string { + importIdFormats := r.ImportIdFormatsFromIam() + if len(importIdFormats) == 0 { + return "" + } + first := importIdFormats[0] + first = strings.ReplaceAll(first, "{{name}}", fmt.Sprintf("{{%s}}", google.Underscore(r.Name))) + return first +} + +func (r Resource) IamTerraformName() string { + return fmt.Sprintf("%s_iam", r.TerraformName()) +} + +func (r Resource) IamSelfLinkProperties() []*Type { + var selfLink string + if r.IamPolicy != nil { + selfLink = r.IamPolicy.SelfLink + } + if selfLink == "" { + selfLink = r.SelfLinkUrl() + } + + params := r.ExtractIdentifiers(selfLink) + + urlProperties := google.Select(r.AllUserProperties(), func(p *Type) bool { + return slices.Contains(params, p.Name) + }) + + return urlProperties +} + func OrderProperties(props []*Type) []*Type { req := google.Select(props, func(p *Type) bool { return p.Required @@ -1113,6 +1160,20 @@ func CompareByName(a, b *Type) int { return strings.Compare(a.Name, b.Name) } +func (r Resource) GetPropertyUpdateMasksGroups() map[string][]string { + maskGroups := map[string][]string{} + for _, prop := range r.AllUserProperties() { + if (prop.FlattenObject) { + prop.GetNestedPropertyUpdateMasksGroups(maskGroups, prop.ApiName) + }else if (len(prop.UpdateMaskFields) > 0){ + maskGroups[google.Underscore(prop.Name)] = prop.UpdateMaskFields + }else{ + maskGroups[google.Underscore(prop.Name)] = []string{prop.ApiName} + } + } + return maskGroups +} + func (r Resource) CustomTemplate(templatePath string) string { return resource.ExecuteTemplate(&r, templatePath) } diff --git a/mmv1/api/type.go b/mmv1/api/type.go index ef3050903e15..a9b4f1a2174d 100644 --- a/mmv1/api/type.go +++ b/mmv1/api/type.go @@ -21,6 +21,7 @@ import ( "github.com/GoogleCloudPlatform/magic-modules/mmv1/api/product" "github.com/GoogleCloudPlatform/magic-modules/mmv1/api/resource" "github.com/GoogleCloudPlatform/magic-modules/mmv1/google" + "golang.org/x/exp/slices" ) // Represents a property type @@ -411,13 +412,17 @@ func (t Type) TerraformLineage() string { return fmt.Sprintf("%s.0.%s", t.ParentMetadata.TerraformLineage(), google.Underscore(t.Name)) } -func (t Type) EnumValuesToString(quoteSeperator string) string { +func (t Type) EnumValuesToString(quoteSeperator string, addEmpty bool) string { var values []string for _, val := range t.EnumValues { values = append(values, fmt.Sprintf("%s%s%s", quoteSeperator, val, quoteSeperator)) } + if addEmpty && !slices.Contains(values, "\"\"") && !t.Required { + values = append(values, "\"\"") + } + return strings.Join(values, ", ") } @@ -687,6 +692,10 @@ func (t Type) Deprecated() bool { return t.DeprecationMessage != "" } +func (t *Type) GetDescription() string { + return strings.TrimRight(t.Description, "\n") +} + // // private // // A constant value to be provided as field @@ -1283,6 +1292,20 @@ func (t Type) NamespaceProperty() string { // // end +// new utility function for recursive calls to GetPropertyUpdateMasksGroups + +func (t Type) GetNestedPropertyUpdateMasksGroups(maskGroups map[string][]string, maskPrefix string) { + for _, prop := range t.AllProperties() { + if (prop.FlattenObject) { + prop.GetNestedPropertyUpdateMasksGroups(maskGroups, prop.ApiName) + }else if (len(prop.UpdateMaskFields) > 0){ + maskGroups[google.Underscore(prop.Name)] = prop.UpdateMaskFields + }else{ + maskGroups[google.Underscore(prop.Name)] = []string{maskPrefix + prop.ApiName} + } + } +} + func (t Type) CustomTemplate(templatePath string) string { return resource.ExecuteTemplate(&t, templatePath) } diff --git a/mmv1/products/compute/InterconnectAttachment.yaml b/mmv1/products/compute/InterconnectAttachment.yaml index 0302f1cec30c..ba090e031de9 100644 --- a/mmv1/products/compute/InterconnectAttachment.yaml +++ b/mmv1/products/compute/InterconnectAttachment.yaml @@ -10,7 +10,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - --- !ruby/object:Api::Resource name: 'InterconnectAttachment' kind: 'compute#interconnectAttachment' @@ -47,6 +46,15 @@ examples: interconnect_attachment_name: 'on-prem-attachment' router_name: 'router-1' network_name: 'network-1' + - !ruby/object:Provider::Terraform::Examples + name: 'interconnect_attachment_dedicated' + skip_docs: true + primary_resource_id: 'on_prem' + vars: + interconnect_name: 'interconenct-1' + interconnect_attachment_name: 'on-prem-attachment' + router_name: 'router-1' + network_name: 'network-1' - !ruby/object:Provider::Terraform::Examples name: 'compute_interconnect_attachment_ipsec_encryption' primary_resource_id: 'ipsec-encrypted-interconnect-attachment' @@ -254,17 +262,14 @@ properties: URL of addresses that have been reserved for the interconnect attachment, Used only for interconnect attachment that has the encryption option as IPSEC. - The addresses must be RFC 1918 IP address ranges. When creating HA VPN gateway over the interconnect attachment, if the attachment is configured to use an RFC 1918 IP address, then the VPN gateway's IP address will be allocated from the IP address range specified here. - For example, if the HA VPN gateway's interface 0 is paired to this interconnect attachment, then an RFC 1918 IP address for the VPN gateway interface 0 will be allocated from the IP address specified for this interconnect attachment. - If this field is not specified for interconnect attachment that has encryption option as IPSEC, later on when creating HA VPN gateway on this interconnect attachment, the HA VPN gateway's IP address will be @@ -284,11 +289,9 @@ properties: Indicates the user-supplied encryption option of this interconnect attachment. Can only be specified at attachment creation for PARTNER or DEDICATED attachments. - * NONE - This is the default value, which means that the VLAN attachment carries unencrypted traffic. VMs are able to send traffic to, or receive traffic from, such a VLAN attachment. - * IPSEC - The VLAN attachment carries only encrypted traffic that is encrypted by an IPsec device, such as an HA VPN gateway or third-party IPsec VPN. VMs cannot directly send traffic to, or receive traffic from, @@ -305,7 +308,6 @@ properties: description: | The stack type for this interconnect attachment to identify whether the IPv6 feature is enabled or not. If not specified, IPV4_ONLY will be used. - This field can be both set at interconnect attachments creation and update interconnect attachment operations. values: @@ -324,3 +326,14 @@ properties: IPv6 address + prefix length to be configured on the customer router subinterface for this interconnect attachment. output: true + - !ruby/object:Api::Type::Integer + name: 'subnetLength' + description: | + Length of the IPv4 subnet mask. Allowed values: 29 (default), 30. The default value is 29, + except for Cross-Cloud Interconnect connections that use an InterconnectRemoteLocation with a + constraints.subnetLengthRange.min equal to 30. For example, connections that use an Azure + remote location fall into this category. In these cases, the default value is 30, and + requesting 29 returns an error. Where both 29 and 30 are allowed, 29 is preferred, because it + gives Google Cloud Support more debugging visibility. + immutable: true + ignore_read: true diff --git a/mmv1/products/compute/RegionSslPolicy.yaml b/mmv1/products/compute/RegionSslPolicy.yaml index eeb9a3175129..c0b79b0e60c9 100644 --- a/mmv1/products/compute/RegionSslPolicy.yaml +++ b/mmv1/products/compute/RegionSslPolicy.yaml @@ -53,10 +53,11 @@ parameters: name: 'region' resource: 'Region' imports: 'name' - required: true + default_from_api: true immutable: true description: | The region where the regional SSL policy resides. + custom_flatten: 'templates/terraform/custom_flatten/name_from_self_link.erb' custom_expand: 'templates/terraform/custom_expand/resourceref_with_validation.go.erb' properties: - !ruby/object:Api::Type::Time diff --git a/mmv1/products/datafusion/go_instance.yaml b/mmv1/products/datafusion/go_instance.yaml index 84691be931a3..33cfefff7faf 100644 --- a/mmv1/products/datafusion/go_instance.yaml +++ b/mmv1/products/datafusion/go_instance.yaml @@ -115,16 +115,17 @@ properties: immutable: true - name: 'type' type: Enum - description: "Represents the type of Data Fusion instance. Each type is configured with -the default settings for processing and memory. -- BASIC: Basic Data Fusion instance. In Basic type, the user will be able to create data pipelines -using point and click UI. However, there are certain limitations, such as fewer number -of concurrent pipelines, no support for streaming pipelines, etc. -- ENTERPRISE: Enterprise Data Fusion instance. In Enterprise type, the user will have more features -available, such as support for streaming pipelines, higher number of concurrent pipelines, etc. -- DEVELOPER: Developer Data Fusion instance. In Developer type, the user will have all features available but -with restrictive capabilities. This is to help enterprises design and develop their data ingestion and integration -pipelines at low cost." + description: | + Represents the type of Data Fusion instance. Each type is configured with + the default settings for processing and memory. + - BASIC: Basic Data Fusion instance. In Basic type, the user will be able to create data pipelines + using point and click UI. However, there are certain limitations, such as fewer number + of concurrent pipelines, no support for streaming pipelines, etc. + - ENTERPRISE: Enterprise Data Fusion instance. In Enterprise type, the user will have more features + available, such as support for streaming pipelines, higher number of concurrent pipelines, etc. + - DEVELOPER: Developer Data Fusion instance. In Developer type, the user will have all features available but + with restrictive capabilities. This is to help enterprises design and develop their data ingestion and integration + pipelines at low cost. required: true immutable: true enum_values: @@ -142,9 +143,9 @@ pipelines at low cost." description: "Option to enable granular role-based access control." - name: 'labels' type: KeyValueLabels - description: "The resource labels for instance to use to annotate any related underlying resources, -such as Compute Engine VMs. -" + description: | + The resource labels for instance to use to annotate any related underlying resources, + such as Compute Engine VMs. immutable: false - name: 'options' type: KeyValuePairs @@ -154,21 +155,24 @@ such as Compute Engine VMs. diff_suppress_func: 'instanceOptionsDiffSuppress' - name: 'createTime' type: String - description: "The time the instance was created in RFC3339 UTC 'Zulu' format, accurate to nanoseconds." + description: | + The time the instance was created in RFC3339 UTC "Zulu" format, accurate to nanoseconds. output: true - name: 'updateTime' type: String - description: "The time the instance was last updated in RFC3339 UTC 'Zulu' format, accurate to nanoseconds." + description: | + The time the instance was last updated in RFC3339 UTC "Zulu" format, accurate to nanoseconds. output: true - name: 'state' type: Enum - description: "The current state of this Data Fusion instance. -- CREATING: Instance is being created -- RUNNING: Instance is running and ready for requests -- FAILED: Instance creation failed -- DELETING: Instance is being deleted -- UPGRADING: Instance is being upgraded -- RESTARTING: Instance is being restarted" + description: | + The current state of this Data Fusion instance. + - CREATING: Instance is being created + - RUNNING: Instance is running and ready for requests + - FAILED: Instance creation failed + - DELETING: Instance is being deleted + - UPGRADING: Instance is being upgraded + - RESTARTING: Instance is being restarted output: true enum_values: - 'CREATING' @@ -197,9 +201,10 @@ such as Compute Engine VMs. deprecation_message: '`service_account` is deprecated and will be removed in a future major release. Instead, use `tenant_project_id` to extract the tenant project ID.' - name: 'privateInstance' type: Boolean - description: "Specifies whether the Data Fusion instance should be private. If set to -true, all Data Fusion nodes will have private IP addresses and will not be -able to access the public internet." + description: | + Specifies whether the Data Fusion instance should be private. If set to + true, all Data Fusion nodes will have private IP addresses and will not be + able to access the public internet. immutable: true - name: 'dataprocServiceAccount' type: String @@ -220,15 +225,17 @@ able to access the public internet." properties: - name: 'ipAllocation' type: String - description: "The IP range in CIDR notation to use for the managed Data Fusion instance - nodes. This range must not overlap with any other ranges used in the Data Fusion instance network." + description: | + The IP range in CIDR notation to use for the managed Data Fusion instance + nodes. This range must not overlap with any other ranges used in the Data Fusion instance network. required: true immutable: true - name: 'network' type: String - description: "Name of the network in the project with which the tenant project - will be peered for executing pipelines. In case of shared VPC where the network resides in another host - project the network should specified in the form of projects/{host-project-id}/global/networks/{network}" + description: | + Name of the network in the project with which the tenant project + will be peered for executing pipelines. In case of shared VPC where the network resides in another host + project the network should specified in the form of projects/{host-project-id}/global/networks/{network} required: true immutable: true - name: 'zone' @@ -275,8 +282,9 @@ able to access the public internet." type: Array description: | List of accelerators enabled for this CDF instance. - If accelerators are enabled it is possible a permadiff will be created with the Options field. - Users will need to either manually update their state file to include these diffed options, or include the field in a [lifecycle ignore changes block](https://developer.hashicorp.com/terraform/language/meta-arguments/lifecycle#ignore_changes). + + If accelerators are enabled it is possible a permadiff will be created with the Options field. + Users will need to either manually update their state file to include these diffed options, or include the field in a [lifecycle ignore changes block](https://developer.hashicorp.com/terraform/language/meta-arguments/lifecycle#ignore_changes). item_type: properties: - name: 'acceleratorType' @@ -289,7 +297,8 @@ able to access the public internet." - 'CCAI_INSIGHTS' - name: 'state' type: Enum - description: "The type of an accelator for a CDF instance." + description: | + The type of an accelator for a CDF instance. required: true enum_values: - 'ENABLED' diff --git a/mmv1/products/dataplex/EntryGroup.yaml b/mmv1/products/dataplex/EntryGroup.yaml new file mode 100644 index 000000000000..6d5ef7c445bc --- /dev/null +++ b/mmv1/products/dataplex/EntryGroup.yaml @@ -0,0 +1,133 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- !ruby/object:Api::Resource +name: 'EntryGroup' +base_url: 'projects/{{project}}/locations/{{location}}/entryGroups/{{entry_group_id}}' +self_link: 'projects/{{project}}/locations/{{location}}/entryGroups/{{entry_group_id}}' +create_url: 'projects/{{project}}/locations/{{location}}/entryGroups?entryGroupId={{entry_group_id}}' +update_url: 'projects/{{project}}/locations/{{location}}/entryGroups/{{entry_group_id}}' +delete_url: 'projects/{{project}}/locations/{{location}}/entryGroups/{{entry_group_id}}' +update_verb: :PATCH +update_mask: true +description: | + An Entry Group represents a logical grouping of one or more Entries. +import_format: ['projects/{{project}}/locations/{{location}}/entryGroups/{{entry_group_id}}'] +async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{{op_id}}' + wait_ms: 1000 + timeouts: !ruby/object:Api::Timeouts + insert_minutes: 5 + update_minutes: 5 + delete_minutes: 5 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: true + allowed: + - true + - false + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' +autogen_async: true +iam_policy: !ruby/object:Api::Resource::IamPolicy + skip_import_test: true + method_name_separator: ':' + fetch_iam_policy_verb: :GET + parent_resource_attribute: 'entry_group_id' + import_format: + [ + 'projects/{{project}}/locations/{{location}}/entryGroups/{{entry_group_id}}', + '{{entry_group_id}}', + ] +parameters: + - !ruby/object:Api::Type::String + name: 'location' + url_param_only: true + immutable: true + description: | + The location where entry group will be created in. + - !ruby/object:Api::Type::String + name: 'entryGroupId' + url_param_only: true + immutable: true + description: | + The entry group id of the entry group. +properties: + - !ruby/object:Api::Type::String + name: name + description: | + The relative resource name of the EntryGroup, of the form: projects/{project_number}/locations/{location_id}/entryGroups/{entry_group_id} + output: true + - !ruby/object:Api::Type::String + name: 'uid' + output: true + description: | + System generated globally unique ID for the EntryGroup. This ID will be different if the EntryGroup is deleted and re-created with the same name. + - !ruby/object:Api::Type::Time + name: 'createTime' + output: true + description: | + The time when the EntryGroup was created. + - !ruby/object:Api::Type::Time + name: 'updateTime' + output: true + description: | + The time when the EntryGroup was last updated. + - !ruby/object:Api::Type::String + name: 'description' + description: | + Description of the EntryGroup. + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + User friendly display name. + - !ruby/object:Api::Type::KeyValueLabels + name: 'labels' + description: | + User-defined labels for the EntryGroup. + - !ruby/object:Api::Type::Enum + name: 'transferStatus' + output: true + description: | + Denotes the transfer status of the Entry Group. It is unspecified + for Entry Group created from Dataplex API. + values: + - :TRANSFER_STATUS_UNSPECIFIED + - :TRANSFER_STATUS_MIGRATED + - :TRANSFER_STATUS_TRANSFERRED +examples: + - !ruby/object:Provider::Terraform::Examples + name: 'dataplex_entry_group_basic' + primary_resource_id: 'test_entry_group_basic' + primary_resource_name: "fmt.Sprintf(\"tf-test-entry-group%s\", + context[\"random_suffix\"\ + ])" + test_env_vars: + project_name: :PROJECT_NAME + vars: + entry_group_name: entry-group-basic + - !ruby/object:Provider::Terraform::Examples + name: 'dataplex_entry_group_full' + primary_resource_id: 'test_entry_group_full' + primary_resource_name: "fmt.Sprintf(\"tf-test-entry-group%s\", + context[\"random_suffix\"\ + ])" + test_env_vars: + project_name: :PROJECT_NAME + vars: + entry_group_name: entry-group-full diff --git a/mmv1/products/kms/AutokeyConfig.yaml b/mmv1/products/kms/AutokeyConfig.yaml new file mode 100644 index 000000000000..845f3bf0521a --- /dev/null +++ b/mmv1/products/kms/AutokeyConfig.yaml @@ -0,0 +1,74 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- !ruby/object:Api::Resource +name: 'AutokeyConfig' +base_url: 'folders/{{folder}}/autokeyConfig' +self_link: 'folders/{{folder}}/autokeyConfig' +# This is a singleton resource that is already created, so create +# is really an update, and therefore should be PATCHed. +create_url: 'folders/{{folder}}/autokeyConfig?updateMask=keyProject' +create_verb: :PATCH +update_url: 'folders/{{folder}}/autokeyConfig?updateMask=keyProject' +update_verb: :PATCH +delete_url: 'folders/{{folder}}/autokeyConfig?updateMask=keyProject' +delete_verb: :PATCH +# This resource is currently in public preview. +description: | + `AutokeyConfig` is a singleton resource used to configure the auto-provisioning + flow of CryptoKeys for CMEK. + + + ~> **Note:** AutokeyConfigs cannot be deleted from Google Cloud Platform. + Destroying a Terraform-managed AutokeyConfig will remove it from state but + *will not delete the resource from the project.* +references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Cloud KMS with Autokey': 'https://cloud.google.com/kms/docs/kms-with-autokey' + api: 'https://cloud.google.com/kms/docs/reference/rest/v1/AutokeyConfig' +id_format: 'folders/{{folder}}/autokeyConfig' +import_format: ['folders/{{folder}}/autokeyConfig'] +min_version: beta +custom_code: !ruby/object:Provider::Terraform::CustomCode + test_check_destroy: templates/terraform/custom_check_destroy/kms_autokey_config.go.erb +examples: + - !ruby/object:Provider::Terraform::Examples + name: 'kms_autokey_config_all' + # Need the time_sleep resource + external_providers: ["random", "time"] + primary_resource_id: + 'example-autokeyconfig' + min_version: beta + vars: + folder_name: + 'my-folder' + key_project_name: + 'key-proj' + test_env_vars: + org_id: :ORG_ID + billing_account: :BILLING_ACCT +parameters: + - !ruby/object:Api::Type::String + name: 'folder' + required: true + immutable: true + url_param_only: true + description: | + The folder for which to retrieve config. +properties: + - !ruby/object:Api::Type::String + name: 'keyProject' + description: | + The target key project for a given folder where KMS Autokey will provision a + CryptoKey for any new KeyHandle the Developer creates. Should have the form + `projects/`. diff --git a/mmv1/products/kms/KeyHandle.yaml b/mmv1/products/kms/KeyHandle.yaml new file mode 100644 index 000000000000..645fed6ddeb6 --- /dev/null +++ b/mmv1/products/kms/KeyHandle.yaml @@ -0,0 +1,89 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- !ruby/object:Api::Resource +name: 'KeyHandle' +base_url: 'projects/{{project}}/locations/{{location}}/keyHandles' +create_url: 'projects/{{project}}/locations/{{location}}/keyHandles?keyHandleId={{name}}' +self_link: 'projects/{{project}}/locations/{{location}}/keyHandles/{{name}}' +immutable: true +# This resource is currently in public preview. +description: | + A `KeyHandle` is a resource used to auto-provision CryptoKeys for CMEK. + + + ~> **Note:** KeyHandles cannot be deleted from Google Cloud Platform. + Destroying a Terraform-managed KeyHandle will remove it from state but + *will not delete the resource from the project.* +references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Cloud KMS with Autokey': 'https://cloud.google.com/kms/docs/kms-with-autokey' + api: 'https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyHandles' +id_format: 'projects/{{project}}/locations/{{location}}/keyHandles/{{name}}' +import_format: ['projects/{{project}}/locations/{{location}}/keyHandles/{{name}}'] +min_version: beta +skip_delete: true +examples: + - !ruby/object:Provider::Terraform::Examples + name: 'kms_key_handle_basic' + # Need the time_sleep resource + external_providers: ["random", "time"] + primary_resource_id: + 'example-keyhandle' + min_version: beta + vars: + key_project_name: + 'key-proj' + resource_project_name: + 'resources' + test_env_vars: + org_id: :ORG_ID + billing_account: :BILLING_ACCT +custom_code: !ruby/object:Provider::Terraform::CustomCode + decoder: templates/terraform/decoders/kms.go.erb +autogen_async: true +async: !ruby/object:Api::OpAsync + actions: ['create'] + operation: !ruby/object:Api::OpAsync::Operation + base_url: '{{op_id}}' + result: !ruby/object:Api::OpAsync::Result + resource_inside_response: true +parameters: + - !ruby/object:Api::Type::String + name: 'location' + description: | + The location for the KeyHandle. + A full list of valid locations can be found by running `gcloud kms locations list`. + required: true + url_param_only: true +properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + The resource name for the KeyHandle. + required: true + immutable: true + - !ruby/object:Api::Type::String + name: 'kmsKey' + description: | + A reference to a Cloud KMS CryptoKey that can be used for CMEK in the requested + product/project/location, for example + `projects/1/locations/us-east1/keyRings/foo/cryptoKeys/bar-ffffff` + output: true + - !ruby/object:Api::Type::String + name: 'resourceTypeSelector' + description: | + Selector of the resource type where we want to protect resources. + For example, `storage.googleapis.com/Bucket OR compute.googleapis.com/*` + required: true + immutable: true diff --git a/mmv1/products/kms/product.yaml b/mmv1/products/kms/product.yaml index 17295cea22e2..47f6a50fb0b6 100644 --- a/mmv1/products/kms/product.yaml +++ b/mmv1/products/kms/product.yaml @@ -18,5 +18,8 @@ versions: - !ruby/object:Api::Product::Version name: ga base_url: https://cloudkms.googleapis.com/v1/ + - !ruby/object:Api::Product::Version + name: beta + base_url: https://cloudkms.googleapis.com/v1/ scopes: - https://www.googleapis.com/auth/cloudkms diff --git a/mmv1/products/networkservices/LbRouteExtension.yaml b/mmv1/products/networkservices/LbRouteExtension.yaml new file mode 100644 index 000000000000..c71765e4e5ed --- /dev/null +++ b/mmv1/products/networkservices/LbRouteExtension.yaml @@ -0,0 +1,179 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- !ruby/object:Api::Resource +name: 'LbRouteExtension' +description: | + LbRouteExtension is a resource that lets you control where traffic is routed to for a given request. +references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Configure a route extension': 'https://cloud.google.com/service-extensions/docs/configure-callout#configure_a_route_extension' + api: 'https://cloud.google.com/service-extensions/docs/reference/rest/v1beta1/projects.locations.lbRouteExtensions' +base_url: 'projects/{{project}}/locations/{{location}}/lbRouteExtensions' +self_link: 'projects/{{project}}/locations/{{location}}/lbRouteExtensions/{{name}}' +timeouts: !ruby/object:Api::Timeouts + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +create_url: 'projects/{{project}}/locations/{{location}}/lbRouteExtensions?lbRouteExtensionId={{name}}' +update_verb: :PATCH +update_mask: true +autogen_async: true +async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + base_url: '{{op_id}}' +examples: + - !ruby/object:Provider::Terraform::Examples + name: 'network_services_lb_route_extension_basic' + primary_resource_id: 'default' + vars: + ilb_network_name: 'l7-ilb-network' + proxy_subnet_name: 'l7-ilb-proxy-subnet' + backend_subnet_name: 'l7-ilb-subnet' + forwarding_rule_name: 'l7-ilb-forwarding-rule' + target_http_proxy_name: 'l7-ilb-target-http-proxy' + regional_url_map_name: 'l7-ilb-regional-url-map' + backend_service_name: 'l7-ilb-backend-subnet' + mig_template_name: 'l7-ilb-mig-template' + hc_name: 'l7-ilb-hc' + mig_name: 'l7-ilb-mig1' + fw_allow_iap_hc_name: 'l7-ilb-fw-allow-iap-hc' + fw_allow_ilb_to_backends_name: 'l7-ilb-fw-allow-ilb-to-backends' + vm_test_name: 'l7-ilb-test-vm' + lb_route_extension_name: 'l7-ilb-route-ext' + callouts_instance_name: 'l7-ilb-callouts-ins' + callouts_instance_group: 'l7-ilb-callouts-ins-group' + callouts_hc_name: 'l7-ilb-callouts-hc' + callouts_backend_name: 'l7-ilb-callouts-backend' + ignore_read_extra: + - 'port_range' + - 'target' +parameters: + - !ruby/object:Api::Type::String + name: 'location' + required: true + immutable: true + url_param_only: true + description: | + The location of the route extension + - !ruby/object:Api::Type::String + name: 'name' + required: true + immutable: true + url_param_only: true + description: | + Name of the LbRouteExtension resource in the following format: projects/{project}/locations/{location}/lbRouteExtensions/{lbRouteExtension} +properties: + - !ruby/object:Api::Type::String + name: 'description' + description: | + A human-readable description of the resource. + - !ruby/object:Api::Type::KeyValueLabels + name: 'labels' + description: 'Set of labels associated with the LbRouteExtension resource.' + - !ruby/object:Api::Type::Array + name: forwardingRules + description: | + A list of references to the forwarding rules to which this service extension is attached to. + At least one forwarding rule is required. There can be only one LbRouteExtension resource per forwarding rule. + required: true + item_type: Api::Type::String + diff_suppress_func: 'tpgresource.ProjectNumberDiffSuppress' + - !ruby/object:Api::Type::Array + name: extensionChains + description: | + A set of ordered extension chains that contain the match conditions and extensions to execute. + Match conditions for each extension chain are evaluated in sequence for a given request. + The first extension chain that has a condition that matches the request is executed. + Any subsequent extension chains do not execute. Limited to 5 extension chains per resource. + required: true + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + The name for this extension chain. The name is logged as part of the HTTP request logs. + The name must conform with RFC-1034, is restricted to lower-cased letters, numbers and hyphens, + and can have a maximum length of 63 characters. Additionally, the first character must be a letter + and the last character must be a letter or a number. + required: true + - !ruby/object:Api::Type::NestedObject + name: 'matchCondition' + description: | + Conditions under which this chain is invoked for a request. + required: true + properties: + - !ruby/object:Api::Type::String + name: 'celExpression' + description: | + A Common Expression Language (CEL) expression that is used to match requests for which the extension chain is executed. + required: true + - !ruby/object:Api::Type::Array + name: 'extensions' + description: | + A set of extensions to execute for the matching request. + At least one extension is required. Up to 3 extensions can be defined for each extension chain for + LbTrafficExtension resource. LbRouteExtension chains are limited to 1 extension per extension chain. + required: true + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + The name for this extension. The name is logged as part of the HTTP request logs. + The name must conform with RFC-1034, is restricted to lower-cased letters, numbers and hyphens, + and can have a maximum length of 63 characters. Additionally, the first character must be a letter + and the last a letter or a number. + required: true + - !ruby/object:Api::Type::String + name: 'authority' + description: | + The :authority header in the gRPC request sent from Envoy to the extension service. + - !ruby/object:Api::Type::String + name: 'service' + description: | + The reference to the service that runs the extension. Must be a reference to a backend service + required: true + diff_suppress_func: 'tpgresource.ProjectNumberDiffSuppress' + - !ruby/object:Api::Type::String + name: 'timeout' + description: | + Specifies the timeout for each individual message on the stream. The timeout must be between 10-1000 milliseconds. + A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". + diff_suppress_func: 'tpgresource.DurationDiffSuppress' + - !ruby/object:Api::Type::Boolean + name: 'failOpen' + description: | + Determines how the proxy behaves if the call to the extension fails or times out. + When set to TRUE, request or response processing continues without error. + Any subsequent extensions in the extension chain are also executed. + When set to FALSE: * If response headers have not been delivered to the downstream client, + a generic 500 error is returned to the client. The error response can be tailored by + configuring a custom error response in the load balancer. + - !ruby/object:Api::Type::Array + name: 'forwardHeaders' + description: | + List of the HTTP headers to forward to the extension (from the client or backend). + If omitted, all headers are sent. Each element is a string indicating the header name. + item_type: Api::Type::String + - !ruby/object:Api::Type::Enum + name: 'loadBalancingScheme' + required: true + immutable: true + description: | + All backend services and forwarding rules referenced by this extension must share the same load balancing scheme. + For more information, refer to [Choosing a load balancer](https://cloud.google.com/load-balancing/docs/backend-service) and + [Supported application load balancers](https://cloud.google.com/service-extensions/docs/callouts-overview#supported-lbs). + values: + - :INTERNAL_MANAGED + - :EXTERNAL_MANAGED diff --git a/mmv1/products/networkservices/LbTrafficExtension.yaml b/mmv1/products/networkservices/LbTrafficExtension.yaml index 0c307c54f7f6..5f928941592a 100644 --- a/mmv1/products/networkservices/LbTrafficExtension.yaml +++ b/mmv1/products/networkservices/LbTrafficExtension.yaml @@ -136,7 +136,6 @@ properties: name: 'authority' description: | The :authority header in the gRPC request sent from Envoy to the extension service. - required: true - !ruby/object:Api::Type::String name: 'service' description: | @@ -148,7 +147,6 @@ properties: description: | Specifies the timeout for each individual message on the stream. The timeout must be between 10-1000 milliseconds. A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". - required: true diff_suppress_func: 'tpgresource.DurationDiffSuppress' - !ruby/object:Api::Type::Boolean name: 'failOpen' @@ -176,10 +174,11 @@ properties: item_type: Api::Type::String - !ruby/object:Api::Type::Enum name: 'loadBalancingScheme' + immutable: true description: | All backend services and forwarding rules referenced by this extension must share the same load balancing scheme. - For more information, refer to Choosing a load balancer. + For more information, refer to [Choosing a load balancer](https://cloud.google.com/load-balancing/docs/backend-service) and + [Supported application load balancers](https://cloud.google.com/service-extensions/docs/callouts-overview#supported-lbs). values: - - :LOAD_BALANCING_SCHEME_UNSPECIFIED - :INTERNAL_MANAGED - :EXTERNAL_MANAGED diff --git a/mmv1/products/pubsub/Subscription.yaml b/mmv1/products/pubsub/Subscription.yaml index 538c3212b9fa..97a17de701cd 100644 --- a/mmv1/products/pubsub/Subscription.yaml +++ b/mmv1/products/pubsub/Subscription.yaml @@ -173,6 +173,10 @@ properties: name: 'filenameSuffix' description: | User-provided suffix for Cloud Storage filename. Must not end in "/". + - !ruby/object:Api::Type::String + name: 'filenameDatetimeFormat' + description: | + User-provided format string specifying how to represent datetimes in Cloud Storage filenames. - !ruby/object:Api::Type::String name: 'maxDuration' description: | diff --git a/mmv1/products/pubsub/go_Schema.yaml b/mmv1/products/pubsub/go_Schema.yaml index a9a0a1f120ff..fb0222e11343 100644 --- a/mmv1/products/pubsub/go_Schema.yaml +++ b/mmv1/products/pubsub/go_Schema.yaml @@ -74,13 +74,13 @@ properties: - 'TYPE_UNSPECIFIED' - 'PROTOCOL_BUFFER' - 'AVRO' - - '' - name: 'definition' type: String - description: "The definition of the schema. -This should contain a string representing the full definition of the schema -that is a valid schema definition of the type specified in type. Changes -to the definition commit new [schema revisions](https://cloud.google.com/pubsub/docs/commit-schema-revision). -A schema can only have up to 20 revisions, so updates that fail with an -error indicating that the limit has been reached require manually -[deleting old revisions](https://cloud.google.com/pubsub/docs/delete-schema-revision)." + description: | + The definition of the schema. + This should contain a string representing the full definition of the schema + that is a valid schema definition of the type specified in type. Changes + to the definition commit new [schema revisions](https://cloud.google.com/pubsub/docs/commit-schema-revision). + A schema can only have up to 20 revisions, so updates that fail with an + error indicating that the limit has been reached require manually + [deleting old revisions](https://cloud.google.com/pubsub/docs/delete-schema-revision). diff --git a/mmv1/products/pubsub/go_Subscription.yaml b/mmv1/products/pubsub/go_Subscription.yaml index 9021111b268f..a1c1fa36e5d2 100644 --- a/mmv1/products/pubsub/go_Subscription.yaml +++ b/mmv1/products/pubsub/go_Subscription.yaml @@ -103,9 +103,10 @@ properties: custom_expand: 'templates/terraform/custom_expand/go/shortname_to_url.go.tmpl' - name: 'topic' type: ResourceRef - description: "A reference to a Topic resource, of the form projects/{project}/topics/{{name}} -(as in the id property of a google_pubsub_topic), or just a topic name if -the topic is in the same project as the subscription." + description: | + A reference to a Topic resource, of the form projects/{project}/topics/{{name}} + (as in the id property of a google_pubsub_topic), or just a topic name if + the topic is in the same project as the subscription. required: true immutable: true diff_suppress_func: 'tpgresource.CompareSelfLinkOrResourceName' @@ -271,24 +272,25 @@ pull and ack messages using API methods." send_empty_value: true - name: 'ackDeadlineSeconds' type: Integer - description: "This value is the maximum time after a subscriber receives a message -before the subscriber should acknowledge the message. After message -delivery but before the ack deadline expires and before the message is -acknowledged, it is an outstanding message and will not be delivered -again during that time (on a best-effort basis). + description: | + This value is the maximum time after a subscriber receives a message + before the subscriber should acknowledge the message. After message + delivery but before the ack deadline expires and before the message is + acknowledged, it is an outstanding message and will not be delivered + again during that time (on a best-effort basis). -For pull subscriptions, this value is used as the initial value for -the ack deadline. To override this value for a given message, call -subscriptions.modifyAckDeadline with the corresponding ackId if using -pull. The minimum custom deadline you can specify is 10 seconds. The -maximum custom deadline you can specify is 600 seconds (10 minutes). -If this parameter is 0, a default value of 10 seconds is used. + For pull subscriptions, this value is used as the initial value for + the ack deadline. To override this value for a given message, call + subscriptions.modifyAckDeadline with the corresponding ackId if using + pull. The minimum custom deadline you can specify is 10 seconds. The + maximum custom deadline you can specify is 600 seconds (10 minutes). + If this parameter is 0, a default value of 10 seconds is used. -For push delivery, this value is also used to set the request timeout -for the call to the push endpoint. + For push delivery, this value is also used to set the request timeout + for the call to the push endpoint. -If the subscriber never acknowledges the message, the Pub/Sub system -will eventually redeliver the message." + If the subscriber never acknowledges the message, the Pub/Sub system + will eventually redeliver the message. default_from_api: true - name: 'messageRetentionDuration' type: String diff --git a/mmv1/provider/template_data.go b/mmv1/provider/template_data.go index 8aa91fff0262..1de1a2ee03a4 100644 --- a/mmv1/provider/template_data.go +++ b/mmv1/provider/template_data.go @@ -118,6 +118,7 @@ func (td *TemplateData) GenerateResourceFile(filePath string, resource api.Resou "templates/terraform/custom_flatten/go/bigquery_table_ref.go.tmpl", "templates/terraform/flatten_property_method.go.tmpl", "templates/terraform/expand_property_method.go.tmpl", + "templates/terraform/update_mask.go.tmpl", } td.GenerateFile(filePath, templatePath, resource, true, templates...) } @@ -167,6 +168,14 @@ func (td *TemplateData) GenerateIamPolicyFile(filePath string, resource api.Reso td.GenerateFile(filePath, templatePath, resource, true, templates...) } +func (td *TemplateData) GenerateIamResourceDocumentationFile(filePath string, resource api.Resource) { + templatePath := "templates/terraform/resource_iam.html.markdown.tmpl" + templates := []string{ + templatePath, + } + td.GenerateFile(filePath, templatePath, resource, false, templates...) +} + func (td *TemplateData) GenerateIamPolicyTestFile(filePath string, resource api.Resource) { } diff --git a/mmv1/provider/terraform.go b/mmv1/provider/terraform.go index 49cd4f8d4247..a58f72b3360d 100644 --- a/mmv1/provider/terraform.go +++ b/mmv1/provider/terraform.go @@ -223,9 +223,26 @@ func (t *Terraform) GenerateIamPolicy(object api.Resource, templateData Template templateData.GenerateIamPolicyTestFile(targetFilePath, object) } } - // if generateDocs { - // generate_iam_documentation(pwd, data) - // } + if generateDocs { + t.GenerateIamDocumentation(object, templateData, outputFolder, generateCode, generateDocs) + } +} + +// def generate_iam_documentation(pwd, data) +func (t *Terraform) GenerateIamDocumentation(object api.Resource, templateData TemplateData, outputFolder string, generateCode, generateDocs bool) { + resourceDocFolder := path.Join(outputFolder, "website", "docs", "r") + if err := os.MkdirAll(resourceDocFolder, os.ModePerm); err != nil { + log.Println(fmt.Errorf("error creating parent directory %v: %v", resourceDocFolder, err)) + } + targetFilePath := path.Join(resourceDocFolder, fmt.Sprintf("%s_iam.html.markdown", t.FullResourceName(object))) + templateData.GenerateIamResourceDocumentationFile(targetFilePath, object) + + datasourceDocFolder := path.Join(outputFolder, "website", "docs", "d") + if err := os.MkdirAll(datasourceDocFolder, os.ModePerm); err != nil { + log.Println(fmt.Errorf("error creating parent directory %v: %v", datasourceDocFolder, err)) + } + // targetFilePath = path.Join(datasourceDocFolder, fmt.Sprintf("%s_iam.html.markdown", t.FullResourceName(object))) + // templateData.GenerateIamDatasourceDocumentationFile(targetFilePath, object) } func (t *Terraform) FolderName() string { @@ -1016,28 +1033,6 @@ func languageFromFilename(filename string) string { // // end // -// def generate_resource_tests(pwd, data) -// -// return if data.object.examples -// .reject(&:skip_test) -// .reject do |e| -// @api.version_obj_or_closest(data.version) \ -// < @api.version_obj_or_closest(e.min_version) -// end -// .empty? -// -// product_name = @api.api_name -// target_folder = File.join(folder_name(data.version), 'services', product_name) -// FileUtils.mkpath folder_name(data.version) -// data.generate( -// pwd, -// 'templates/terraform/examples/base_configs/test_file.go.erb', -// "#{target_folder}/resource_#{full_resource_name(data)}_generated_test.go", -// self -// ) -// -// end -// // def generate_resource_sweepers(pwd, data) // // return if data.object.skip_sweeper || @@ -1058,57 +1053,6 @@ func languageFromFilename(filename string) string { // // end // -// # Generate the IAM policy for this object. This is used to query and test -// # IAM policies separately from the resource itself -// def generate_iam_policy(pwd, data, generate_code, generate_docs) -// -// if generate_code \ -// && (!data.object.iam_policy.min_version \ -// || data.object.iam_policy.min_version >= data.version) -// product_name = @api.api_name -// target_folder = File.join(folder_name(data.version), 'services', product_name) -// FileUtils.mkpath target_folder -// data.generate(pwd, -// 'templates/terraform/iam_policy.go.erb', -// "#{target_folder}/iam_#{full_resource_name(data)}.go", -// self) -// -// # Only generate test if testable examples exist. -// unless data.object.examples.reject(&:skip_test).empty? -// data.generate( -// pwd, -// 'templates/terraform/examples/base_configs/iam_test_file.go.erb', -// "#{target_folder}/iam_#{full_resource_name(data)}_generated_test.go", -// self -// ) -// end -// end -// -// return unless generate_docs -// -// generate_iam_documentation(pwd, data) -// -// end -// -// def generate_iam_documentation(pwd, data) -// -// target_folder = data.output_folder -// resource_doc_folder = File.join(target_folder, 'website', 'docs', 'r') -// datasource_doc_folder = File.join(target_folder, 'website', 'docs', 'd') -// FileUtils.mkpath resource_doc_folder -// filepath = -// File.join(resource_doc_folder, "#{full_resource_name(data)}_iam.html.markdown") -// -// data.generate(pwd, 'templates/terraform/resource_iam.html.markdown.erb', filepath, self) -// FileUtils.mkpath datasource_doc_folder -// filepath = -// File.join(datasource_doc_folder, "#{full_resource_name(data)}_iam_policy.html.markdown") -// -// data.generate(pwd, 'templates/terraform/datasource_iam.html.markdown.erb', filepath, self) -// -// end -// -// // # Returns the id format of an object, or self_link_uri if none is explicitly defined // # We prefer the long name of a resource as the id so that users can reference // # resources in a standard way, and most APIs accept short name, long name or self_link diff --git a/mmv1/templates/terraform/custom_check_destroy/kms_autokey_config.go.erb b/mmv1/templates/terraform/custom_check_destroy/kms_autokey_config.go.erb new file mode 100644 index 000000000000..275a16ddd588 --- /dev/null +++ b/mmv1/templates/terraform/custom_check_destroy/kms_autokey_config.go.erb @@ -0,0 +1,22 @@ +config := acctest.GoogleProviderConfig(t) + +url, err := tpgresource.ReplaceVarsForTest(config, rs, "{{KMSBasePath}}folders/{{folder}}/autokeyConfig") +if err != nil { + return err +} + +res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + RawURL: url, + UserAgent: config.UserAgent, +}) +if err != nil { + return nil +} + +if v := res["key_project"]; v != nil { + return fmt.Errorf("AutokeyConfig still exists at %s", url) +} + +return nil diff --git a/mmv1/templates/terraform/examples/dataplex_entry_group_basic.tf.erb b/mmv1/templates/terraform/examples/dataplex_entry_group_basic.tf.erb new file mode 100644 index 000000000000..630a820f5d88 --- /dev/null +++ b/mmv1/templates/terraform/examples/dataplex_entry_group_basic.tf.erb @@ -0,0 +1,5 @@ +resource "google_dataplex_entry_group" "<%= ctx[:primary_resource_id] %>" { + entry_group_id = "<%= ctx[:vars]['entry_group_name'] %>" + project = "<%= ctx[:test_env_vars]['project_name'] %>" + location = "us-central1" +} \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/dataplex_entry_group_full.tf.erb b/mmv1/templates/terraform/examples/dataplex_entry_group_full.tf.erb new file mode 100644 index 000000000000..32a1598548d7 --- /dev/null +++ b/mmv1/templates/terraform/examples/dataplex_entry_group_full.tf.erb @@ -0,0 +1,9 @@ +resource "google_dataplex_entry_group" "<%= ctx[:primary_resource_id] %>" { + entry_group_id = "<%= ctx[:vars]['entry_group_name'] %>" + project = "<%= ctx[:test_env_vars]['project_name'] %>" + location = "us-central1" + + labels = { "tag": "test-tf" } + display_name = "terraform entry group" + description = "entry group created by Terraform" +} \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/cloudrunv2_service_directvpc.tf.tmpl b/mmv1/templates/terraform/examples/go/cloudrunv2_service_directvpc.tf.tmpl index bceb611ffd13..4137a4c81dd9 100644 --- a/mmv1/templates/terraform/examples/go/cloudrunv2_service_directvpc.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/cloudrunv2_service_directvpc.tf.tmpl @@ -1,7 +1,7 @@ resource "google_cloud_run_v2_service" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "cloud_run_service_name"}}" location = "us-central1" - launch_stage = "BETA" + launch_stage = "GA" template { containers { image = "us-docker.pkg.dev/cloudrun/container/hello" diff --git a/mmv1/templates/terraform/examples/interconnect_attachment_dedicated.tf.erb b/mmv1/templates/terraform/examples/interconnect_attachment_dedicated.tf.erb new file mode 100644 index 000000000000..1770904f364e --- /dev/null +++ b/mmv1/templates/terraform/examples/interconnect_attachment_dedicated.tf.erb @@ -0,0 +1,36 @@ +data "google_project" "project" {} + +resource "google_compute_interconnect" "foobar" { + name = "<%= ctx[:vars]['interconnect_name'] %>" + customer_name = "internal_customer" # Special customer only available for Google testing. + interconnect_type = "IT_PRIVATE" # Special type only available for Google testing. + link_type = "LINK_TYPE_ETHERNET_10G_LR" + requested_link_count = 1 + location = "https://www.googleapis.com/compute/v1/projects/${data.google_project.project.name}/global/interconnectLocations/z2z-us-east4-zone1-lciadl-a" # Special location only available for Google testing. +} + +resource "google_compute_interconnect_attachment" "<%= ctx[:primary_resource_id] %>" { + name = "<%= ctx[:vars]['interconnect_attachment_name'] %>" + type = "DEDICATED" + interconnect = google_compute_interconnect.foobar.id + router = google_compute_router.foobar.id + mtu = 1500 + subnet_length = 29 + vlan_tag8021q = 1000 + region = "https://www.googleapis.com/compute/v1/projects/${data.google_project.project.name}/regions/us-east4" + stack_type = "IPV4_ONLY" +} + +resource "google_compute_router" "foobar" { + name = "<%= ctx[:vars]['router_name'] %>" + network = google_compute_network.foobar.name + region = "us-east4" + bgp { + asn = 16550 + } +} + +resource "google_compute_network" "foobar" { + name = "<%= ctx[:vars]['network_name'] %>" + auto_create_subnetworks = false +} diff --git a/mmv1/templates/terraform/examples/kms_autokey_config_all.tf.erb b/mmv1/templates/terraform/examples/kms_autokey_config_all.tf.erb new file mode 100644 index 000000000000..5611cbc65db0 --- /dev/null +++ b/mmv1/templates/terraform/examples/kms_autokey_config_all.tf.erb @@ -0,0 +1,68 @@ +# Create Folder in GCP Organization +resource "google_folder" "autokms_folder" { + provider = google-beta + display_name = "<%= ctx[:vars]["folder_name"] %>" + parent = "organizations/<%= ctx[:test_env_vars]['org_id'] %>" +} + +# Create the key project +resource "google_project" "key_project" { + provider = google-beta + project_id = "<%= ctx[:vars]["key_project_name"] %>" + name = "<%= ctx[:vars]["key_project_name"] %>" + folder_id = google_folder.autokms_folder.folder_id + billing_account = "<%= ctx[:test_env_vars]['billing_account'] %>" + depends_on = [google_folder.autokms_folder] +} + +# Enable the Cloud KMS API +resource "google_project_service" "kms_api_service" { + provider = google-beta + service = "cloudkms.googleapis.com" + project = google_project.key_project.project_id + disable_on_destroy = false + disable_dependent_services = true + depends_on = [google_project.key_project] +} + +# Wait delay after enabling APIs +resource "time_sleep" "wait_enable_service_api" { + depends_on = [google_project_service.kms_api_service] + create_duration = "30s" +} + +#Create KMS Service Agent +resource "google_project_service_identity" "kms_service_agent" { + provider = google-beta + service = "cloudkms.googleapis.com" + project = google_project.key_project.number + depends_on = [time_sleep.wait_enable_service_api] +} + +# Wait delay after creating service agent. +resource "time_sleep" "wait_service_agent" { + depends_on = [google_project_service_identity.kms_service_agent] + create_duration = "10s" +} + +#Grant the KMS Service Agent the Cloud KMS Admin role +resource "google_project_iam_member" "autokey_project_admin" { + provider = google-beta + project = google_project.key_project.project_id + role = "roles/cloudkms.admin" + member = "serviceAccount:service-${google_project.key_project.number}@gcp-sa-cloudkms.iam.gserviceaccount.com" + depends_on = [time_sleep.wait_service_agent] +} + +# Wait delay after granting IAM permissions +resource "time_sleep" "wait_srv_acc_permissions" { + create_duration = "10s" + depends_on = [google_project_iam_member.autokey_project_admin] +} + +resource "google_kms_autokey_config" "<%= ctx[:primary_resource_id] %>" { + provider = google-beta + folder = google_folder.autokms_folder.folder_id + key_project = "projects/${google_project.key_project.project_id}" + depends_on = [time_sleep.wait_srv_acc_permissions] +} diff --git a/mmv1/templates/terraform/examples/kms_key_handle_basic.tf.erb b/mmv1/templates/terraform/examples/kms_key_handle_basic.tf.erb new file mode 100644 index 000000000000..a03be1027829 --- /dev/null +++ b/mmv1/templates/terraform/examples/kms_key_handle_basic.tf.erb @@ -0,0 +1,93 @@ +# Create Folder in GCP Organization +resource "google_folder" "autokms_folder" { + provider = google-beta + display_name = "folder-example" + parent = "organizations/<%= ctx[:test_env_vars]['org_id'] %>" +} + +# Create the key project +resource "google_project" "key_project" { + provider = google-beta + project_id = "<%= ctx[:vars]["key_project_name"] %>" + name = "<%= ctx[:vars]["key_project_name"] %>" + folder_id = google_folder.autokms_folder.folder_id + billing_account = "<%= ctx[:test_env_vars]['billing_account'] %>" + depends_on = [google_folder.autokms_folder] +} + +# Create the resource project +resource "google_project" "resource_project" { + provider = google-beta + project_id = "<%= ctx[:vars]["resource_project_name"] %>" + name = "<%= ctx[:vars]["resource_project_name"] %>" + folder_id = google_folder.autokms_folder.folder_id + billing_account = "<%= ctx[:test_env_vars]['billing_account'] %>" + depends_on = [google_folder.autokms_folder] +} + +# Enable the Cloud KMS API +resource "google_project_service" "kms_api_service" { + provider = google-beta + service = "cloudkms.googleapis.com" + project = google_project.key_project.project_id + disable_on_destroy = false + disable_dependent_services = true + depends_on = [google_project.key_project] +} + +# Wait delay after enabling APIs +resource "time_sleep" "wait_enable_service_api" { + depends_on = [google_project_service.kms_api_service] + create_duration = "30s" +} + +#Create KMS Service Agent +resource "google_project_service_identity" "kms_service_agent" { + provider = google-beta + service = "cloudkms.googleapis.com" + project = google_project.key_project.number + depends_on = [time_sleep.wait_enable_service_api] +} + +# Wait delay after creating service agent. +resource "time_sleep" "wait_service_agent" { + depends_on = [google_project_service_identity.kms_service_agent] + create_duration = "10s" +} + +#Grant the KMS Service Agent the Cloud KMS Admin role +resource "google_project_iam_member" "autokey_project_admin" { + provider = google-beta + project = google_project.key_project.project_id + role = "roles/cloudkms.admin" + member = "serviceAccount:service-${google_project.key_project.number}@gcp-sa-cloudkms.iam.gserviceaccount.com" + depends_on = [time_sleep.wait_service_agent] +} + +# Wait delay after granting IAM permissions +resource "time_sleep" "wait_srv_acc_permissions" { + create_duration = "10s" + depends_on = [google_project_iam_member.autokey_project_admin] +} + +resource "google_kms_autokey_config" "autokey_config" { + provider = google-beta + folder = google_folder.autokms_folder.folder_id + key_project = "projects/${google_project.key_project.project_id}" + depends_on = [time_sleep.wait_srv_acc_permissions] +} + +# Wait delay for autokey config to take effect +resource "time_sleep" "wait_autokey_config" { + create_duration = "10s" + depends_on = [google_kms_autokey_config.autokey_config] +} + +resource "google_kms_key_handle" "<%= ctx[:primary_resource_id] %>" { + provider = google-beta + project = google_project.resource_project.project_id + name = "example-key-handle" + location = "global" + resource_type_selector = "storage.googleapis.com/Bucket" + depends_on = [time_sleep.wait_autokey_config] +} diff --git a/mmv1/templates/terraform/examples/network_services_lb_route_extension_basic.tf.erb b/mmv1/templates/terraform/examples/network_services_lb_route_extension_basic.tf.erb new file mode 100644 index 000000000000..2fc0dff39194 --- /dev/null +++ b/mmv1/templates/terraform/examples/network_services_lb_route_extension_basic.tf.erb @@ -0,0 +1,352 @@ +# Internal HTTP load balancer with a managed instance group backend +# [START cloudloadbalancing_int_http_gce] +# VPC network +resource "google_compute_network" "ilb_network" { + name = "<%= ctx[:vars]['ilb_network_name'] %>" + auto_create_subnetworks = false +} + +# proxy-only subnet +resource "google_compute_subnetwork" "proxy_subnet" { + name = "<%= ctx[:vars]['proxy_subnet_name'] %>" + ip_cidr_range = "10.0.0.0/24" + region = "us-west1" + purpose = "REGIONAL_MANAGED_PROXY" + role = "ACTIVE" + network = google_compute_network.ilb_network.id +} + +# backend subnet +resource "google_compute_subnetwork" "ilb_subnet" { + name = "<%= ctx[:vars]['backend_subnet_name'] %>" + ip_cidr_range = "10.0.1.0/24" + region = "us-west1" + network = google_compute_network.ilb_network.id + + depends_on = [ + google_compute_subnetwork.proxy_subnet + ] +} + +# forwarding rule +resource "google_compute_forwarding_rule" "default" { + name = "<%= ctx[:vars]['forwarding_rule_name'] %>" + region = "us-west1" + ip_protocol = "TCP" + load_balancing_scheme = "INTERNAL_MANAGED" + port_range = "80" + target = google_compute_region_target_http_proxy.default.id + network = google_compute_network.ilb_network.id + subnetwork = google_compute_subnetwork.ilb_subnet.id + network_tier = "PREMIUM" + + depends_on = [ + google_compute_subnetwork.proxy_subnet + ] +} + +# HTTP target proxy +resource "google_compute_region_target_http_proxy" "default" { + name = "<%= ctx[:vars]['target_http_proxy_name'] %>" + region = "us-west1" + url_map = google_compute_region_url_map.default.id +} + +# URL map +resource "google_compute_region_url_map" "default" { + name = "tf-test-l7-ilb-regional-url-map%{random_suffix}" + region = "us-west1" + default_service = google_compute_region_backend_service.default.id + + host_rule { + hosts = ["service-extensions.com"] + path_matcher = "callouts" + } + + path_matcher { + name = "callouts" + default_service = google_compute_region_backend_service.callouts_backend.id + } +} + +# backend service +resource "google_compute_region_backend_service" "default" { + name = "<%= ctx[:vars]['backend_service_name'] %>" + region = "us-west1" + protocol = "HTTP" + load_balancing_scheme = "INTERNAL_MANAGED" + timeout_sec = 10 + health_checks = [google_compute_region_health_check.default.id] + + backend { + group = google_compute_region_instance_group_manager.mig.instance_group + balancing_mode = "UTILIZATION" + capacity_scaler = 1.0 + } +} + +# instance template +resource "google_compute_instance_template" "instance_template" { + name = "<%= ctx[:vars]['mig_template_name'] %>" + machine_type = "e2-small" + tags = ["http-server"] + + network_interface { + network = google_compute_network.ilb_network.id + subnetwork = google_compute_subnetwork.ilb_subnet.id + + access_config { + # add external ip to fetch packages + } + } + + disk { + source_image = "debian-cloud/debian-10" + auto_delete = true + boot = true + } + + # install nginx and serve a simple web page + metadata = { + startup-script = <<-EOF1 + #! /bin/bash + set -euo pipefail + + export DEBIAN_FRONTEND=noninteractive + apt-get update + apt-get install -y nginx-light jq + + NAME=$(curl -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/hostname") + IP=$(curl -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ip") + METADATA=$(curl -f -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/attributes/?recursive=True" | jq 'del(.["startup-script"])') + + cat < /var/www/html/index.html +
+      Name: $NAME
+      IP: $IP
+      Metadata: $METADATA
+      
+ EOF + EOF1 + } + + lifecycle { + create_before_destroy = true + } +} + +# health check +resource "google_compute_region_health_check" "default" { + name = "<%= ctx[:vars]['hc_name'] %>" + region = "us-west1" + + http_health_check { + port_specification = "USE_SERVING_PORT" + } +} + +# MIG +resource "google_compute_region_instance_group_manager" "mig" { + name = "<%= ctx[:vars]['mig_name'] %>" + region = "us-west1" + + base_instance_name = "vm" + target_size = 2 + + version { + instance_template = google_compute_instance_template.instance_template.id + name = "primary" + } +} + +# allow all access from IAP and health check ranges +resource "google_compute_firewall" "fw_iap" { + name = "<%= ctx[:vars]['fw_allow_iap_hc_name'] %>" + direction = "INGRESS" + network = google_compute_network.ilb_network.id + source_ranges = ["130.211.0.0/22", "35.191.0.0/16", "35.235.240.0/20"] + + allow { + protocol = "tcp" + } +} + +# allow http from proxy subnet to backends +resource "google_compute_firewall" "fw_ilb_to_backends" { + name = "<%= ctx[:vars]['fw_allow_ilb_to_backends_name'] %>" + direction = "INGRESS" + network = google_compute_network.ilb_network.id + source_ranges = ["10.0.0.0/24"] + target_tags = ["http-server"] + + allow { + protocol = "tcp" + ports = ["80", "443", "8080"] + } + + depends_on = [ + google_compute_firewall.fw_iap + ] +} +# [END cloudloadbalancing_int_http_gce] + +# [START lb_route_extension] +resource "google_network_services_lb_route_extension" "<%= ctx[:primary_resource_id] %>" { + name = "<%= ctx[:vars]['lb_route_extension_name'] %>" + description = "my route extension" + location = "us-west1" + load_balancing_scheme = "INTERNAL_MANAGED" + forwarding_rules = [google_compute_forwarding_rule.default.self_link] + + extension_chains { + name = "chain1" + + match_condition { + cel_expression = "request.path.startsWith('/extensions')" + } + + extensions { + name = "ext11" + authority = "ext11.com" + service = google_compute_region_backend_service.callouts_backend.self_link + timeout = "0.1s" + fail_open = false + + forward_headers = ["custom-header"] + } + } + + labels = { + foo = "bar" + } +} + +# test instance +resource "google_compute_instance" "vm_test" { + name = "<%= ctx[:vars]['vm_test_name'] %>" + zone = "us-west1-b" + machine_type = "e2-small" + + network_interface { + network = google_compute_network.ilb_network.id + subnetwork = google_compute_subnetwork.ilb_subnet.id + } + + boot_disk { + initialize_params { + image = "debian-cloud/debian-11" + } + } +} + +# Route Extension Backend Instance +resource "google_compute_instance" "callouts_instance" { + name = "<%= ctx[:vars]['callouts_instance_name'] %>" + zone = "us-west1-a" + machine_type = "e2-small" + + labels = { + "container-vm" = "cos-stable-109-17800-147-54" + } + + tags = ["allow-ssh","load-balanced-backend"] + + network_interface { + network = google_compute_network.ilb_network.id + subnetwork = google_compute_subnetwork.ilb_subnet.id + + access_config { + # add external ip to fetch packages + } + } + + boot_disk { + auto_delete = true + + initialize_params { + type = "pd-standard" + size = 10 + image = "https://www.googleapis.com/compute/v1/projects/cos-cloud/global/images/cos-stable-109-17800-147-54" + } + } + + # Initialize an Envoy's Ext Proc gRPC API based on a docker container + metadata = { + startup-script = <<-EOF1 + #! /bin/bash + apt-get update + apt-get install apache2 -y + a2ensite default-ssl + a2enmod ssl + echo "Page served from second backend service" | tee /var/www/html/index.html + systemctl restart apache2' + EOF1 + } + + lifecycle { + create_before_destroy = true + } + + deletion_protection = false + + depends_on = [ + google_compute_instance.vm_test + ] +} + +// callouts instance group +resource "google_compute_instance_group" "callouts_instance_group" { + name = "<%= ctx[:vars]['callouts_instance_group'] %>" + description = "Terraform test instance group" + zone = "us-west1-a" + + instances = [ + google_compute_instance.callouts_instance.id, + ] + + named_port { + name = "http" + port = "80" + } + + named_port { + name = "grpc" + port = "443" + } +} + +# callout health check +resource "google_compute_region_health_check" "callouts_health_check" { + name = "<%= ctx[:vars]['callouts_hc_name'] %>" + region = "us-west1" + + http_health_check { + port = 80 + } + + depends_on = [ + google_compute_region_health_check.default + ] +} + +# callout backend service +resource "google_compute_region_backend_service" "callouts_backend" { + name = "<%= ctx[:vars]['callouts_backend_name'] %>" + region = "us-west1" + protocol = "HTTP2" + load_balancing_scheme = "INTERNAL_MANAGED" + timeout_sec = 10 + port_name = "grpc" + health_checks = [google_compute_region_health_check.callouts_health_check.id] + + backend { + group = google_compute_instance_group.callouts_instance_group.id + balancing_mode = "UTILIZATION" + capacity_scaler = 1.0 + } + + depends_on = [ + google_compute_region_backend_service.default + ] +} +# [END lb_route_extension] diff --git a/mmv1/templates/terraform/examples/pubsub_subscription_push_cloudstorage.tf.erb b/mmv1/templates/terraform/examples/pubsub_subscription_push_cloudstorage.tf.erb index e946dada186e..e95f12169611 100644 --- a/mmv1/templates/terraform/examples/pubsub_subscription_push_cloudstorage.tf.erb +++ b/mmv1/templates/terraform/examples/pubsub_subscription_push_cloudstorage.tf.erb @@ -17,11 +17,12 @@ resource "google_pubsub_subscription" "<%= ctx[:primary_resource_id] %>" { filename_prefix = "pre-" filename_suffix = "-%{random_suffix}" - + filename_datetime_format = "YYYY-MM-DD/hh_mm_ssZ" + max_bytes = 1000 max_duration = "300s" } - depends_on = [ + depends_on = [ google_storage_bucket.<%= ctx[:primary_resource_id] %>, google_storage_bucket_iam_member.admin, ] @@ -34,4 +35,4 @@ resource "google_storage_bucket_iam_member" "admin" { bucket = google_storage_bucket.<%= ctx[:primary_resource_id] %>.name role = "roles/storage.admin" member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-pubsub.iam.gserviceaccount.com" -} \ No newline at end of file +} diff --git a/mmv1/templates/terraform/examples/pubsub_subscription_push_cloudstorage_avro.tf.erb b/mmv1/templates/terraform/examples/pubsub_subscription_push_cloudstorage_avro.tf.erb index 8df2217e7b5a..b26352b1483e 100644 --- a/mmv1/templates/terraform/examples/pubsub_subscription_push_cloudstorage_avro.tf.erb +++ b/mmv1/templates/terraform/examples/pubsub_subscription_push_cloudstorage_avro.tf.erb @@ -17,15 +17,16 @@ resource "google_pubsub_subscription" "<%= ctx[:primary_resource_id] %>" { filename_prefix = "pre-" filename_suffix = "-%{random_suffix}" - + filename_datetime_format = "YYYY-MM-DD/hh_mm_ssZ" + max_bytes = 1000 max_duration = "300s" - + avro_config { write_metadata = true } } - depends_on = [ + depends_on = [ google_storage_bucket.<%= ctx[:primary_resource_id] %>, google_storage_bucket_iam_member.admin, ] @@ -38,4 +39,4 @@ resource "google_storage_bucket_iam_member" "admin" { bucket = google_storage_bucket.<%= ctx[:primary_resource_id] %>.name role = "roles/storage.admin" member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-pubsub.iam.gserviceaccount.com" -} \ No newline at end of file +} diff --git a/mmv1/templates/terraform/expand_property_method.go.tmpl b/mmv1/templates/terraform/expand_property_method.go.tmpl index d09d8ffea58d..014d175cd81b 100644 --- a/mmv1/templates/terraform/expand_property_method.go.tmpl +++ b/mmv1/templates/terraform/expand_property_method.go.tmpl @@ -14,7 +14,7 @@ limitations under the License. */ -}} {{- define "expandPropertyMethod" }} {{- if $.CustomExpand }} - {{- $.CustomTemplate $.CustomExpand -}} + {{ $.CustomTemplate $.CustomExpand -}} {{- else }}{{/* if $.CustomExpand */}} {{- if $.IsA "Map" }} func expand{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]interface{}, error) { diff --git a/mmv1/templates/terraform/iam_policy.go.tmpl b/mmv1/templates/terraform/iam_policy.go.tmpl index cb7f2456d039..97d6046ef36e 100644 --- a/mmv1/templates/terraform/iam_policy.go.tmpl +++ b/mmv1/templates/terraform/iam_policy.go.tmpl @@ -101,7 +101,7 @@ func {{ $.ResourceName }}IamUpdaterProducer(d tpgresource.TerraformResourceData, {{- end }}{{- /* range $param := $.IamResourceParams */}} // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := tpgresource.GetImportIdQualifiers([]string{"{{ $.ImportIdFormatsFromIam }}"}, d, config, d.Get("{{ $.IamParentResourceName }}").(string)) + m, err := tpgresource.GetImportIdQualifiers([]string{"{{ $.ImportIdRegexesFromIam }}"}, d, config, d.Get("{{ $.IamParentResourceName }}").(string)) if err != nil { return nil, err } @@ -154,7 +154,7 @@ func {{ $.ResourceName }}IdParseFunc(d *schema.ResourceData, config *transport_t {{- end }} {{- end }}{{/* range $param := $.IamResourceParams */}} - m, err := tpgresource.GetImportIdQualifiers([]string{"{{ $.ImportIdFormatsFromIam }}"}, d, config, d.Id()) + m, err := tpgresource.GetImportIdQualifiers([]string{"{{ $.ImportIdRegexesFromIam }}"}, d, config, d.Id()) if err != nil { return err } diff --git a/mmv1/templates/terraform/property_documentation.html.markdown.tmpl b/mmv1/templates/terraform/property_documentation.html.markdown.tmpl index 2dd41871984a..e791ca08df37 100644 --- a/mmv1/templates/terraform/property_documentation.html.markdown.tmpl +++ b/mmv1/templates/terraform/property_documentation.html.markdown.tmpl @@ -26,12 +26,12 @@ {{- if not (or $.ItemType.DefaultValue (eq $.ItemType.DefaultValue "")) }} Default value is [`{{ $.ItemType.DefaultValue }}`]. {{- end }} - Each value may be one of: {{ $.ItemType.EnumValuesToString "`" }}. + Each value may be one of: {{ $.ItemType.EnumValuesToString "`" false }}. {{- else if and ($.IsA "Enum") (and (not $.Output) (not (and $.ItemType $.ItemType.SkipDocsValues)))}} {{- if not (or $.DefaultValue (eq $.DefaultValue "")) }} Default value is [`{{ $.DefaultValue }}`]. {{- end }} - Possible values are: {{ $.EnumValuesToString "`" }}. + Possible values are: {{ $.EnumValuesToString "`" false }}. {{- end }} {{- if $.Sensitive }} **Note**: This property is sensitive and will not be displayed in the plan. diff --git a/mmv1/templates/terraform/resource.go.tmpl b/mmv1/templates/terraform/resource.go.tmpl index 1ad90ef037a4..b919597efb00 100644 --- a/mmv1/templates/terraform/resource.go.tmpl +++ b/mmv1/templates/terraform/resource.go.tmpl @@ -742,9 +742,9 @@ func resource{{ $.ResourceName -}}Update(d *schema.ResourceData, meta interface{ log.Printf("[DEBUG] Updating {{ $.Name }} %q: %#v", d.Id(), obj) headers := make(http.Header) -{{- if $.UpdateMask -}} -//TODO updatemask -{{- end -}} +{{ if $.UpdateMask -}} +{{template "UpdateMask" $ -}} +{{end}} {{- if $.CustomCode.PreUpdate -}} {{- $.CustomTemplate $.CustomCode.PreUpdate -}} {{end}} @@ -1182,19 +1182,20 @@ func resource{{ $.ResourceName -}}Encoder(d *schema.ResourceData, meta interface } {{- end }} {{- if $.CustomCode.UpdateEncoder }} + func resource{{ $.ResourceName -}}UpdateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { {{- $.CustomTemplate $.CustomCode.UpdateEncoder -}} } {{- end }} {{- if $.CustomCode.Decoder }} func resource{{ $.ResourceName -}}Decoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - {{- $.CustomTemplate $.CustomCode.Decoder -}} + {{ $.CustomTemplate $.CustomCode.Decoder -}} } {{- end }} {{- if $.CustomCode.PostCreateFailure }} func resource{{ $.ResourceName -}}PostCreateFailure(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - {{- $.CustomTemplate $.CustomCode.PostCreateFailure -}} + {{ $.CustomTemplate $.CustomCode.PostCreateFailure -}} } {{- end }} {{/* TODO nested query */}} -{{/* TODO state upgraders */}} \ No newline at end of file +{{/* TODO state upgraders */}} diff --git a/mmv1/templates/terraform/resource_iam.html.markdown.tmpl b/mmv1/templates/terraform/resource_iam.html.markdown.tmpl new file mode 100644 index 000000000000..2f887a483c85 --- /dev/null +++ b/mmv1/templates/terraform/resource_iam.html.markdown.tmpl @@ -0,0 +1,313 @@ +{{/* The license inside this block applies to this file + Copyright 2024 Google LLC. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ -}} +{{- /* NOTE NOTE NOTE + The newlines in this file are *load bearing*. This file outputs + Markdown, which is extremely sensitive to newlines. You have got + to have a newline after every attribute and property, because + otherwise MD will think the next element is part of the previous + property's bullet point. You cannot have any double newlines in the + middle of a property or attribute, because MD will think that the + empty line ends the bullet point and the indentation will be off. + You must have a newline before and after all --- document indicators, + and you must have a newline before and after all - - - hlines. + You cannot have more than one blank line between properties. + The --- document indicator must be the first line of the file. + As long as you only use `build_property_documentation`, it all works + fine - but when you need to add custom docs (notes, etc), you need + to remember these things. + + Know also that the `lines` function in heavy use in MagicModules will + strip exactly one trailing newline - unless that's what you've designed + your docstring for, it's easier to insert newlines where you need them + manually. That's why, in this file, we use `lines` on anything which + is generated from a ruby function, but skip it on anything that is + directly inserted from YAML. */ -}} +--- +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file in +# .github/CONTRIBUTING.md. +# +# ---------------------------------------------------------------------------- +subcategory: "{{$.ProductMetadata.DisplayName}}" +description: |- + Collection of resources to manage IAM policy for {{$.ProductMetadata.DisplayName}} {{$.Name}} +--- + +# IAM policy for {{$.ProductMetadata.DisplayName}} {{$.Name}} +Three different resources help you manage your IAM policy for {{$.ProductMetadata.DisplayName}} {{$.Name}}. Each of these resources serves a different use case: + +* `{{ $.IamTerraformName }}_policy`: Authoritative. Sets the IAM policy for the {{ lower $.Name }} and replaces any existing policy already attached. +* `{{ $.IamTerraformName }}_binding`: Authoritative for a given role. Updates the IAM policy to grant a role to a list of members. Other roles within the IAM policy for the {{ lower $.Name }} are preserved. +* `{{ $.IamTerraformName }}_member`: Non-authoritative. Updates the IAM policy to grant a role to a new member. Other members for the role for the {{ lower $.Name }} are preserved. + +A data source can be used to retrieve policy data in advent you do not need creation + +* `{{ $.IamTerraformName }}_policy`: Retrieves the IAM policy for the {{ lower $.Name }} + +~> **Note:** `{{ $.IamTerraformName }}_policy` **cannot** be used in conjunction with `{{ $.IamTerraformName }}_binding` and `{{ $.IamTerraformName }}_member` or they will fight over what your policy should be. + +~> **Note:** `{{ $.IamTerraformName }}_binding` resources **can be** used in conjunction with `{{ $.IamTerraformName }}_member` resources **only if** they do not grant privilege to the same role. + +{{ if $.IamPolicy.IamConditionsRequestType }} +~> **Note:** This resource supports IAM Conditions but they have some known limitations which can be found [here](https://cloud.google.com/iam/docs/conditions-overview#limitations). Please review this article if you are having issues with IAM Conditions. +{{- end }} + +{{ if or (eq $.MinVersionObj.Name "beta") (eq $.IamPolicy.MinVersion "beta") }} +~> **Warning:** This resource is in beta, and should be used with the terraform-provider-google-beta provider. +See [Provider Versions](https://terraform.io/docs/providers/google/guides/provider_versions.html) for more details on beta resources. +{{- end }} + +## {{ $.IamTerraformName }}_policy + +```hcl +data "google_iam_policy" "admin" { +{{- if eq $.MinVersionObj.Name "beta" }} + provider = google-beta +{{- end }} + binding { + role = "{{if $.IamPolicy.AdminIamRole}}{{$.IamPolicy.AdminIamRole }}{{else}}{{$.IamPolicy.AllowedIamRole}}{{end}}" + members = [ + "user:jane@example.com", + ] + } +} + +resource "{{ $.IamTerraformName }}_policy" "policy" { +{{ if eq $.MinVersionObj.Name "beta" }} + provider = google-beta +{{- end }} +{{/* object.iam_policy.example_config_body)) */}} + policy_data = data.google_iam_policy.admin.policy_data +} +``` + +{{ if $.IamPolicy.IamConditionsRequestType }} +With IAM Conditions: + +```hcl +data "google_iam_policy" "admin" { +{{ if eq $.MinVersionObj.Name "beta" }} + provider = google-beta +{{- end }} + binding { + role = "{{if $.IamPolicy.AdminIamRole}}{{$.IamPolicy.AdminIamRole }}{{else}}{{$.IamPolicy.AllowedIamRole}}{{end}}" + members = [ + "user:jane@example.com", + ] + + condition { + title = "expires_after_2019_12_31" + description = "Expiring at midnight of 2019-12-31" + expression = "request.time < timestamp(\"2020-01-01T00:00:00Z\")" + } + } +} + +resource "{{ $.IamTerraformName }}_policy" "policy" { +{{ if eq $.MinVersionObj.Name "beta" }} + provider = google-beta +{{- end }} +{{/* object.iam_policy.example_config_body)) */}} + policy_data = data.google_iam_policy.admin.policy_data +} +``` +{{- end }} +## {{ $.IamTerraformName }}_binding + +```hcl +resource "{{ $.IamTerraformName }}_binding" "binding" { +{{ if eq $.MinVersionObj.Name "beta" }} + provider = google-beta +{{- end }} +{{/* object.iam_policy.example_config_body)) */}} + role = "{{if $.IamPolicy.AdminIamRole}}{{$.IamPolicy.AdminIamRole }}{{else}}{{$.IamPolicy.AllowedIamRole}}{{end}}" + members = [ + "user:jane@example.com", + ] +} +``` + +{{ if $.IamPolicy.IamConditionsRequestType }} +With IAM Conditions: + +```hcl +resource "{{ $.IamTerraformName }}_binding" "binding" { +{{ if eq $.MinVersionObj.Name "beta" }} + provider = google-beta +{{- end }} +{{/* object.iam_policy.example_config_body)) */}} + role = "{{if $.IamPolicy.AdminIamRole}}{{$.IamPolicy.AdminIamRole }}{{else}}{{$.IamPolicy.AllowedIamRole}}{{end}}" + members = [ + "user:jane@example.com", + ] + + condition { + title = "expires_after_2019_12_31" + description = "Expiring at midnight of 2019-12-31" + expression = "request.time < timestamp(\"2020-01-01T00:00:00Z\")" + } +} +``` +{{- end }} +## {{ $.IamTerraformName }}_member + +```hcl +resource "{{ $.IamTerraformName }}_member" "member" { +{{ if eq $.MinVersionObj.Name "beta" }} + provider = google-beta +{{- end }} +{{/* object.iam_policy.example_config_body)) */}} + role = "{{if $.IamPolicy.AdminIamRole}}{{$.IamPolicy.AdminIamRole }}{{else}}{{$.IamPolicy.AllowedIamRole}}{{end}}" + member = "user:jane@example.com" +} +``` + +{{ if $.IamPolicy.IamConditionsRequestType -}} +With IAM Conditions: + +```hcl +resource "{{ $.IamTerraformName }}_member" "member" { +{{ if eq $.MinVersionObj.Name "beta" }} + provider = google-beta +{{- end }} +{{/* object.iam_policy.example_config_body)) */}} + role = "{{if $.IamPolicy.AdminIamRole}}{{$.IamPolicy.AdminIamRole}}{{else}}{{$.IamPolicy.AllowedIamRole}}{{end}}" + member = "user:jane@example.com" + + condition { + title = "expires_after_2019_12_31" + description = "Expiring at midnight of 2019-12-31" + expression = "request.time < timestamp(\"2020-01-01T00:00:00Z\")" + } +} +``` +{{- end }} + +## Argument Reference + +The following arguments are supported: + +{{ range $param := $.IamSelfLinkProperties }} +{{- if eq $param.Name "name" }} +* `{{if $.IamPolicy.ParentResourceAttribute}}{{$.IamPolicy.ParentResourceAttribute}}{{else}}{{underscore $.Name}}{{end}}` - (Required) Used to find the parent resource to bind the IAM policy to +{{ else if or (eq (underscore $param.Name) "region") (eq (underscore $param.Name) "zone") }} +* `{{ underscore $param.Name }}` - (Optional) {{ $param.Description }} Used to find the parent resource to bind the IAM policy to. If not specified, + the value will be parsed from the identifier of the parent resource. If no {{ underscore $param.Name }} is provided in the parent identifier and no + {{ underscore $param.Name }} is specified, it is taken from the provider configuration. +{{- else }} +* `{{ underscore $param.Name }}` - (Required) {{ $param.Description }} Used to find the parent resource to bind the IAM policy to +{{- end }} +{{- end }} +{{ if $.IamPolicy.BaseUrl }} +{{ if contains $.IamPolicy.BaseUrl "{{project}}" }} +{{/* The following new line allow for project to be bullet-formatted properly. */}} + +* `project` - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the project will be parsed from the identifier of the parent resource. If no project is provided in the parent identifier and no project is specified, the provider project is used. +{{- end }} +{{ else if contains $.BaseUrl "{{project}}" }} +{{/* The following new line allow for project to be bullet-formatted properly. */}} + +* `project` - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the project will be parsed from the identifier of the parent resource. If no project is provided in the parent identifier and no project is specified, the provider project is used. +{{ end }} + +* `member/members` - (Required) Identities that will be granted the privilege in `role`. + Each entry can have one of the following values: + * **allUsers**: A special identifier that represents anyone who is on the internet; with or without a Google account. + * **allAuthenticatedUsers**: A special identifier that represents anyone who is authenticated with a Google account or a service account. + * **user:{emailid}**: An email address that represents a specific Google account. For example, alice@gmail.com or joe@example.com. + * **serviceAccount:{emailid}**: An email address that represents a service account. For example, my-other-app@appspot.gserviceaccount.com. + * **group:{emailid}**: An email address that represents a Google group. For example, admins@example.com. + * **domain:{domain}**: A G Suite domain (primary, instead of alias) name that represents all the users of that domain. For example, google.com or example.com. + * **projectOwner:projectid**: Owners of the given project. For example, "projectOwner:my-example-project" + * **projectEditor:projectid**: Editors of the given project. For example, "projectEditor:my-example-project" + * **projectViewer:projectid**: Viewers of the given project. For example, "projectViewer:my-example-project" + +* `role` - (Required) The role that should be applied. Only one + `{{ $.IamTerraformName }}_binding` can be used per role. Note that custom roles must be of the format + `[projects|organizations]/{parent-name}/roles/{role-name}`. + +* `policy_data` - (Required only by `{{ $.IamTerraformName }}_policy`) The policy data generated by + a `google_iam_policy` data source. + +{{ if $.IamPolicy.IamConditionsRequestType }} +* `condition` - (Optional) An [IAM Condition](https://cloud.google.com/iam/docs/conditions-overview) for a given binding. + Structure is documented below. + +--- + +The `condition` block supports: + +* `expression` - (Required) Textual representation of an expression in Common Expression Language syntax. + +* `title` - (Required) A title for the expression, i.e. a short string describing its purpose. + +* `description` - (Optional) An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. + +~> **Warning:** Terraform considers the `role` and condition contents (`title`+`description`+`expression`) as the + identifier for the binding. This means that if any part of the condition is changed out-of-band, Terraform will + consider it to be an entirely different resource and will treat it as such. +{{- end }} +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are +exported: + +* `etag` - (Computed) The etag of the IAM policy. + +## Import + +For all import syntaxes, the "resource in question" can take any of the following forms: + +{{ range $idFormat := $.ImportIdFormatsFromIam }} +* {{ $idFormat }} +{{- end }} + +Any variables not passed in the import command will be taken from the provider configuration. + +{{$.ProductMetadata.DisplayName}} {{lower $.Name}} IAM resources can be imported using the resource identifiers, role, and member. + +IAM member imports use space-delimited identifiers: the resource in question, the role, and the member identity, e.g. +``` +$ terraform import {{ $.IamTerraformName }}_member.editor "{{ $.FirstIamImportIdFormat }} {{$.IamPolicy.AllowedIamRole}} user:jane@example.com" +``` + +IAM binding imports use space-delimited identifiers: the resource in question and the role, e.g. +``` +$ terraform import {{ $.IamTerraformName }}_binding.editor "{{ $.FirstIamImportIdFormat }} {{$.IamPolicy.AllowedIamRole}}" +``` + +IAM policy imports use the identifier of the resource in question, e.g. +``` +$ terraform import {{ $.IamTerraformName }}_policy.editor {{ $.FirstIamImportIdFormat }} +``` + +-> **Custom Roles**: If you're importing a IAM resource with a custom role, make sure to use the + full name of the custom role, e.g. `[projects/my-project|organizations/my-org]/roles/my-custom-role`. + +{{- if contains $.BaseUrl "{{project}}" }} +## User Project Overrides + +This resource supports [User Project Overrides](https://registry.terraform.io/providers/hashicorp/google/latest/docs/guides/provider_reference#user_project_override). +{{- end }} diff --git a/mmv1/templates/terraform/schema_property.go.tmpl b/mmv1/templates/terraform/schema_property.go.tmpl index ea1268ac4a5a..8d4dc2537ac1 100644 --- a/mmv1/templates/terraform/schema_property.go.tmpl +++ b/mmv1/templates/terraform/schema_property.go.tmpl @@ -51,7 +51,7 @@ {{ end -}} {{ end -}} {{ if and (eq .Type "Enum") (not .Output) -}} - ValidateFunc: verify.ValidateEnum([]string{ {{- .EnumValuesToString "\"" -}} }), + ValidateFunc: verify.ValidateEnum([]string{ {{- .EnumValuesToString "\"" true -}} }), {{ end -}} {{ if .DiffSuppressFunc -}} DiffSuppressFunc: {{ .DiffSuppressFunc }}, @@ -61,18 +61,18 @@ {{ if .StateFunc -}} StateFunc: {{ .StateFunc }}, {{ end -}} - Description: `{{ replace .Description "`" "'" -1 -}} -{{ if and (eq .Type "Array") (eq .ItemType.Type "Enum") (not .Output) (not .ItemType.SkipDocsValues) -}} -{{ if .ItemType.DefaultValue -}} + Description: `{{ replace .GetDescription "`" "'" -1 -}} +{{- if and (eq .Type "Array") (eq .ItemType.Type "Enum") (not .Output) (not .ItemType.SkipDocsValues) -}} + {{- if .ItemType.DefaultValue -}} Default value: {{ .ItemType.DefaultValue -}} -{{ end -}} -Possible values: [{{- .EnumValuesToString "\"" -}}] -{{ else if and (eq .Type "Enum") (not .Output) -}} -{{ if .DefaultValue -}} -Default value: {{.DefaultValue -}} -{{ end -}} -{{" "}}Possible values: [{{- .EnumValuesToString "\"" -}}] -{{- end -}}`, + {{- end -}} +Possible values: [{{- .EnumValuesToString "\"" false -}}] + {{- else if and (eq .Type "Enum") (not .Output) -}} + {{- if .DefaultValue -}} + {{- " "}}Default value: "{{ .DefaultValue -}}" + {{- end -}} + {{- " "}}Possible values: [{{- .EnumValuesToString "\"" false -}}] + {{- end -}}`, {{ if eq .Type "NestedObject" -}} {{ if not .Output -}} MaxItems: 1, @@ -110,7 +110,7 @@ Default value: {{.DefaultValue -}} {{ else if eq .ItemType.Type "Enum" -}} Elem: &schema.Schema{ Type: schema.Type{{ .ItemTypeClass -}}, - ValidateFunc: verify.ValidateEnum([]string{ {{- .ItemType.EnumValuesToString "\"" -}} }), + ValidateFunc: verify.ValidateEnum([]string{ {{- .ItemType.EnumValuesToString "\"" true -}} }), }, {{ else -}} Elem: &schema.Schema{ diff --git a/mmv1/templates/terraform/update_mask.go.tmpl b/mmv1/templates/terraform/update_mask.go.tmpl new file mode 100644 index 000000000000..98a47f4f2194 --- /dev/null +++ b/mmv1/templates/terraform/update_mask.go.tmpl @@ -0,0 +1,26 @@ +{{- /* + The license inside this block applies to this file + Copyright 2024 Google Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/}} +{{- define "UpdateMask" }} +updateMask := []string{} +{{- range $field, $masks := $.GetPropertyUpdateMasksGroups }} +if d.HasChange("{{ $field }}") { + updateMask = append(updateMask, {{ join $masks ","}}) +} +{{ end }} +// updateMask is a URL parameter but not present in the schema, so ReplaceVars +// won't set it +url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) +if err != nil { + return err +} +{{- end }}{{/* define */}} diff --git a/mmv1/templates/tgc/resource_converters.go.erb b/mmv1/templates/tgc/resource_converters.go.erb index c1118eccf69d..64015eeb959c 100644 --- a/mmv1/templates/tgc/resource_converters.go.erb +++ b/mmv1/templates/tgc/resource_converters.go.erb @@ -36,6 +36,8 @@ import ( func ResourceConverters() map[string][]cai.ResourceConverter { return map[string][]cai.ResourceConverter{ "google_artifact_registry_repository": {artifactregistry.ResourceConverterArtifactRegistryRepository()}, + "google_alloydb_cluster": {alloydb.ResourceConverterAlloydbCluster()}, + "google_alloydb_instance": {alloydb.ResourceConverterAlloydbInstance()}, "google_compute_address": {compute.ResourceConverterComputeAddress()}, "google_compute_autoscaler": {compute.ResourceConverterComputeAutoscaler()}, "google_compute_firewall": {compute.ResourceConverterComputeFirewall()}, @@ -43,7 +45,7 @@ func ResourceConverters() map[string][]cai.ResourceConverter { "google_compute_forwarding_rule": {compute.ResourceConverterComputeForwardingRule()}, "google_compute_global_address": {compute.ResourceConverterComputeGlobalAddress()}, "google_compute_global_forwarding_rule": {compute.ResourceConverterComputeGlobalForwardingRule()}, - "google_compute_health_check": {compute.ResourceConverterComputeHealthCheck()}, + "google_compute_health_check": {compute.ResourceConverterComputeHealthCheck()}, "google_compute_instance": {compute.ResourceConverterComputeInstance()}, "google_compute_network": {compute.ResourceConverterComputeNetwork()}, "google_compute_node_template": {compute.ResourceConverterComputeNodeTemplate()}, @@ -74,6 +76,7 @@ func ResourceConverters() map[string][]cai.ResourceConverter { "google_bigquery_dataset_iam_member": {bigquery.ResourceConverterBigqueryDatasetIamMember()}, "google_bigquery_table": {resourceConverterBigQueryTable()}, "google_datastream_connection_profile": {datastream.ResourceConverterDatastreamConnectionProfile()}, + "google_datastream_private_connection": {datastream.ResourceConverterDatastreamPrivateConnection()}, "google_datastream_stream": {datastream.ResourceConverterDatastreamStream()}, "google_org_policy_policy": {resourceConverterOrgPolicyPolicy()}, "google_redis_instance": {redis.ResourceConverterRedisInstance()}, diff --git a/mmv1/third_party/terraform/services/bigquery/resource_bigquery_table.go.erb b/mmv1/third_party/terraform/services/bigquery/resource_bigquery_table.go.erb index 505fb9856e71..847bfaa6d224 100644 --- a/mmv1/third_party/terraform/services/bigquery/resource_bigquery_table.go.erb +++ b/mmv1/third_party/terraform/services/bigquery/resource_bigquery_table.go.erb @@ -514,7 +514,7 @@ func ResourceBigQueryTable() *schema.Resource { "source_format": { Type: schema.TypeString, Optional: true, - Description: ` Please see sourceFormat under ExternalDataConfiguration in Bigquery's public API documentation (https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#externaldataconfiguration) for supported formats. To use "GOOGLE_SHEETS" the scopes must include "googleapis.com/auth/drive.readonly".`, + Description: `Please see sourceFormat under ExternalDataConfiguration in Bigquery's public API documentation (https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#externaldataconfiguration) for supported formats. To use "GOOGLE_SHEETS" the scopes must include "googleapis.com/auth/drive.readonly".`, ValidateFunc: validation.StringInSlice([]string{ "CSV", "GOOGLE_SHEETS", "NEWLINE_DELIMITED_JSON", "AVRO", "ICEBERG", "DATASTORE_BACKUP", "PARQUET", "ORC", "BIGTABLE", }, false), @@ -620,7 +620,7 @@ func ResourceBigQueryTable() *schema.Resource { Type: schema.TypeList, Optional: true, MaxItems: 1, - Description: `Additional properties to set if sourceFormat is set to JSON."`, + Description: `Additional properties to set if sourceFormat is set to JSON.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "encoding": { @@ -641,11 +641,105 @@ func ResourceBigQueryTable() *schema.Resource { Description: `Load option to be used together with sourceFormat newline-delimited JSON to indicate that a variant of JSON is being loaded. To load newline-delimited GeoJSON, specify GEOJSON (and sourceFormat must be set to NEWLINE_DELIMITED_JSON).`, }, + "bigtable_options": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Additional options if sourceFormat is set to BIGTABLE.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "column_family": { + Type: schema.TypeList, + Optional: true, + Description: `A list of column families to expose in the table schema along with their types. This list restricts the column families that can be referenced in queries and specifies their value types. You can use this list to do type conversions - see the 'type' field for more details. If you leave this list empty, all column families are present in the table schema and their values are read as BYTES. During a query only the column families referenced in that query are read from Bigtable.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "column": { + Type: schema.TypeList, + Optional: true, + Description: `A List of columns that should be exposed as individual fields as opposed to a list of (column name, value) pairs. All columns whose qualifier matches a qualifier in this list can be accessed as Other columns can be accessed as a list through column field`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "qualifier_encoded": { + Type: schema.TypeString, + Optional: true, + Description: `Qualifier of the column. Columns in the parent column family that has this exact qualifier are exposed as . field. If the qualifier is valid UTF-8 string, it can be specified in the qualifierString field. Otherwise, a base-64 encoded value must be set to qualifierEncoded. The column field name is the same as the column qualifier. However, if the qualifier is not a valid BigQuery field identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as fieldName.`, + }, + "qualifier_string": { + Type: schema.TypeString, + Optional: true, + Description: `Qualifier string.`, + }, + "field_name": { + Type: schema.TypeString, + Optional: true, + Description: `If the qualifier is not a valid BigQuery field identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as the column field name and is used as field name in queries.`, + }, + "type": { + Type: schema.TypeString, + Optional: true, + Description: `The type to convert the value in cells of this column. The values are expected to be encoded using HBase Bytes.toBytes function when using the BINARY encoding value. Following BigQuery types are allowed (case-sensitive): "BYTES", "STRING", "INTEGER", "FLOAT", "BOOLEAN", "JSON", Default type is "BYTES". 'type' can also be set at the column family level. However, the setting at this level takes precedence if 'type' is set at both levels.`, + }, + "encoding": { + Type: schema.TypeString, + Optional: true, + Description: `The encoding of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. 'encoding' can also be set at the column family level. However, the setting at this level takes precedence if 'encoding' is set at both levels.`, + }, + "only_read_latest": { + Type: schema.TypeBool, + Optional: true, + Description: `If this is set, only the latest version of value in this column are exposed. 'onlyReadLatest' can also be set at the column family level. However, the setting at this level takes precedence if 'onlyReadLatest' is set at both levels.`, + }, + }, + }, + }, + "family_id": { + Type: schema.TypeString, + Optional: true, + Description: `Identifier of the column family.`, + }, + "type": { + Type: schema.TypeString, + Optional: true, + Description: `The type to convert the value in cells of this column family. The values are expected to be encoded using HBase Bytes.toBytes function when using the BINARY encoding value. Following BigQuery types are allowed (case-sensitive): "BYTES", "STRING", "INTEGER", "FLOAT", "BOOLEAN", "JSON". Default type is BYTES. This can be overridden for a specific column by listing that column in 'columns' and specifying a type for it.`, + }, + "encoding": { + Type: schema.TypeString, + Optional: true, + Description: `The encoding of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. This can be overridden for a specific column by listing that column in 'columns' and specifying an encoding for it.`, + }, + "only_read_latest": { + Type: schema.TypeBool, + Optional: true, + Description: `If this is set only the latest version of value are exposed for all columns in this column family. This can be overridden for a specific column by listing that column in 'columns' and specifying a different setting for that column.`, + }, + }, + }, + }, + "ignore_unspecified_column_families": { + Type: schema.TypeBool, + Optional: true, + Description: `If field is true, then the column families that are not specified in columnFamilies list are not exposed in the table schema. Otherwise, they are read with BYTES type values. The default value is false.`, + }, + "read_rowkey_as_string": { + Type: schema.TypeBool, + Optional: true, + Description: `If field is true, then the rowkey column families will be read and converted to string. Otherwise they are read with BYTES type values and users need to manually cast them with CAST if necessary. The default value is false.`, + }, + "output_column_families_as_json": { + Type: schema.TypeBool, + Optional: true, + Description: `If field is true, then each column family will be read as a single JSON column. Otherwise they are read as a repeated cell structure containing timestamp/value tuples. The default value is false.`, + }, + }, + }, + }, + "parquet_options": { Type: schema.TypeList, Optional: true, MaxItems: 1, - Description: `Additional properties to set if sourceFormat is set to PARQUET."`, + Description: `Additional properties to set if sourceFormat is set to PARQUET.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "enum_as_string": { @@ -674,7 +768,7 @@ func ResourceBigQueryTable() *schema.Resource { "range": { Type: schema.TypeString, Optional: true, - Description: `Range of a sheet to query from. Only used when non-empty. At least one of range or skip_leading_rows must be set. Typical format: "sheet_name!top_left_cell_id:bottom_right_cell_id" For example: "sheet1!A1:B20"`, + Description: `Range of a sheet to query from. Only used when non-empty. At least one of range or skip_leading_rows must be set. Typical format: "sheet_name!top_left_cell_id:bottom_right_cell_id" For example: "sheet1!A1:B20`, AtLeastOneOf: []string{ "external_data_configuration.0.google_sheets_options.0.skip_leading_rows", "external_data_configuration.0.google_sheets_options.0.range", @@ -1913,6 +2007,9 @@ func expandExternalDataConfiguration(cfg interface{}) (*bigquery.ExternalDataCon if v, ok := raw["json_options"]; ok { edc.JsonOptions = expandJsonOptions(v) } + if v, ok := raw["bigtable_options"]; ok { + edc.BigtableOptions = expandBigtableOptions(v) + } if v, ok := raw["google_sheets_options"]; ok { edc.GoogleSheetsOptions = expandGoogleSheetsOptions(v) } @@ -2002,6 +2099,10 @@ func flattenExternalDataConfiguration(edc *bigquery.ExternalDataConfiguration) ( result["json_options"] = flattenJsonOptions(edc.JsonOptions) } + if edc.BigtableOptions != nil { + result["bigtable_options"] = flattenBigtableOptions(edc.BigtableOptions) + } + if edc.IgnoreUnknownValues { result["ignore_unknown_values"] = edc.IgnoreUnknownValues } @@ -2232,6 +2333,164 @@ func flattenParquetOptions(opts *bigquery.ParquetOptions) []map[string]interface return []map[string]interface{}{result} } +func expandBigtableOptions(configured interface{}) *bigquery.BigtableOptions { + if len(configured.([]interface{})) == 0 { + return nil + } + + raw := configured.([]interface{})[0].(map[string]interface{}) + opts := &bigquery.BigtableOptions{} + + crs := []*bigquery.BigtableColumnFamily{} + if v, ok := raw["column_family"]; ok { + for _, columnFamily := range v.([]interface{}) { + crs = append(crs, expandBigtableColumnFamily(columnFamily)) + } + + if len(crs) > 0 { + opts.ColumnFamilies = crs + } + } + + if v, ok := raw["ignore_unspecified_column_families"]; ok { + opts.IgnoreUnspecifiedColumnFamilies = v.(bool) + } + + if v, ok := raw["read_rowkey_as_string"]; ok { + opts.ReadRowkeyAsString = v.(bool) + } + + if v, ok := raw["output_column_families_as_json"]; ok { + opts.OutputColumnFamiliesAsJson = v.(bool) + } + + return opts +} + +func flattenBigtableOptions(opts *bigquery.BigtableOptions) []map[string]interface{} { + result := map[string]interface{}{} + + if opts.ColumnFamilies != nil { + result["column_family"] = flattenBigtableColumnFamily(opts.ColumnFamilies) + } + + if opts.IgnoreUnspecifiedColumnFamilies { + result["ignore_unspecified_column_families"] = opts.IgnoreUnspecifiedColumnFamilies + } + + if opts.ReadRowkeyAsString { + result["read_rowkey_as_string"] = opts.ReadRowkeyAsString + } + + if opts.OutputColumnFamiliesAsJson { + result["output_column_families_as_json"] = opts.OutputColumnFamiliesAsJson + } + + return []map[string]interface{}{result} +} + +func expandBigtableColumnFamily(configured interface{}) *bigquery.BigtableColumnFamily { + raw := configured.(map[string]interface{}) + + opts := &bigquery.BigtableColumnFamily{} + + crs := []*bigquery.BigtableColumn{} + if v, ok := raw["column"]; ok { + for _, column := range v.([]interface{}) { + crs = append(crs, expandBigtableColumn(column)) + } + + if len(crs) > 0 { + opts.Columns = crs + } + } + + if v, ok := raw["family_id"]; ok { + opts.FamilyId = v.(string) + } + + if v, ok := raw["type"]; ok { + opts.Type = v.(string) + } + + if v, ok := raw["encoding"]; ok { + opts.Encoding = v.(string) + } + + if v, ok := raw["only_read_latest"]; ok { + opts.OnlyReadLatest = v.(bool) + } + + return opts +} + +func flattenBigtableColumnFamily(edc []*bigquery.BigtableColumnFamily) []map[string]interface{} { + results := []map[string]interface{}{} + + for _, fr := range edc { + result := map[string]interface{}{} + if fr.Columns != nil { + result["column"] = flattenBigtableColumn(fr.Columns) + } + result["family_id"] = fr.FamilyId + result["type"] = fr.Type + result["encoding"] = fr.Encoding + result["only_read_latest"] = fr.OnlyReadLatest + results = append(results, result) + } + + return results +} + +func expandBigtableColumn(configured interface{}) *bigquery.BigtableColumn { + raw := configured.(map[string]interface{}) + + opts := &bigquery.BigtableColumn{} + + if v, ok := raw["qualifier_encoded"]; ok { + opts.QualifierEncoded = v.(string) + } + + if v, ok := raw["qualifier_string"]; ok { + opts.QualifierString = v.(string) + } + + if v, ok := raw["field_name"]; ok { + opts.FieldName = v.(string) + } + + if v, ok := raw["type"]; ok { + opts.Type = v.(string) + } + + if v, ok := raw["encoding"]; ok { + opts.Encoding = v.(string) + } + + if v, ok := raw["only_read_latest"]; ok { + opts.OnlyReadLatest = v.(bool) + } + + return opts +} + +func flattenBigtableColumn(edc []*bigquery.BigtableColumn) []map[string]interface{} { + results := []map[string]interface{}{} + + for _, fr := range edc { + result := map[string]interface{}{} + result["qualifier_encoded"] = fr.QualifierEncoded + result["qualifier_string"] = fr.QualifierString + result["field_name"] = fr.FieldName + result["type"] = fr.Type + result["encoding"] = fr.Encoding + result["only_read_latest"] = fr.OnlyReadLatest + results = append(results, result) + } + + return results +} + func expandJsonOptions(configured interface{}) *bigquery.JsonOptions { if len(configured.([]interface{})) == 0 { return nil diff --git a/mmv1/third_party/terraform/services/bigquery/resource_bigquery_table_test.go.erb b/mmv1/third_party/terraform/services/bigquery/resource_bigquery_table_test.go.erb index 1eebf95566f5..f96dfc40debd 100644 --- a/mmv1/third_party/terraform/services/bigquery/resource_bigquery_table_test.go.erb +++ b/mmv1/third_party/terraform/services/bigquery/resource_bigquery_table_test.go.erb @@ -1118,6 +1118,37 @@ func TestAccBigQueryDataTable_bigtable(t *testing.T) { }) } +func TestAccBigQueryDataTable_bigtable_options(t *testing.T) { + // bigtable instance does not use the shared HTTP client, this test creates an instance + acctest.SkipIfVcr(t) + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 8), + "project": envvar.GetTestProjectFromEnv(), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTableFromBigtableOptions(context), + }, + { + ResourceName: "google_bigquery_table.table", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccBigQueryTableFromBigtable(context), + }, + }, + }) +} + func TestAccBigQueryDataTable_sheet(t *testing.T) { t.Parallel() @@ -3287,6 +3318,77 @@ resource "google_bigquery_dataset" "dataset" { `, context) } +func testAccBigQueryTableFromBigtableOptions(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_bigtable_instance" "instance" { + name = "tf-test-bigtable-inst-%{random_suffix}" + cluster { + cluster_id = "tf-test-bigtable-%{random_suffix}" + zone = "us-central1-b" + } + instance_type = "DEVELOPMENT" + deletion_protection = false +} +resource "google_bigtable_table" "table" { + name = "%{random_suffix}" + instance_name = google_bigtable_instance.instance.name + column_family { + family = "cf-%{random_suffix}-first" + } + column_family { + family = "cf-%{random_suffix}-second" + } +} +resource "google_bigquery_table" "table" { + deletion_protection = false + dataset_id = google_bigquery_dataset.dataset.dataset_id + table_id = "tf_test_bigtable_%{random_suffix}" + external_data_configuration { + autodetect = true + source_format = "BIGTABLE" + ignore_unknown_values = true + source_uris = [ + "https://googleapis.com/bigtable/${google_bigtable_table.table.id}", + ] + bigtable_options { + column_family { + family_id = "cf-%{random_suffix}-first" + column { + field_name = "cf-%{random_suffix}-first" + type = "STRING" + encoding = "TEXT" + only_read_latest = true + } + type = "STRING" + encoding = "TEXT" + only_read_latest = true + } + column_family { + family_id = "cf-%{random_suffix}-second" + type = "STRING" + encoding = "TEXT" + only_read_latest = false + } + ignore_unspecified_column_families = true + read_rowkey_as_string = true + output_column_families_as_json = true + } + } +} +resource "google_bigquery_dataset" "dataset" { + dataset_id = "tf_test_ds_%{random_suffix}" + friendly_name = "test" + description = "This is a test description" + location = "EU" + delete_contents_on_destroy = true + default_table_expiration_ms = 3600000 + labels = { + env = "default" + } +} +`, context) +} + func testAccBigQueryTableFromSheet(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_bigquery_table" "table" { diff --git a/mmv1/third_party/terraform/services/cloudrunv2/resource_cloud_run_v2_service_test.go.erb b/mmv1/third_party/terraform/services/cloudrunv2/resource_cloud_run_v2_service_test.go.erb index b4f3b6ad5a9a..b4379dc5a552 100644 --- a/mmv1/third_party/terraform/services/cloudrunv2/resource_cloud_run_v2_service_test.go.erb +++ b/mmv1/third_party/terraform/services/cloudrunv2/resource_cloud_run_v2_service_test.go.erb @@ -733,7 +733,7 @@ func testAccCloudRunV2Service_cloudRunServiceWithDirectVPC(context map[string]in resource "google_cloud_run_v2_service" "default" { name = "%{service_name}" location = "us-central1" - launch_stage = "BETA" + launch_stage = "GA" template { containers { image = "us-docker.pkg.dev/cloudrun/container/hello" @@ -753,7 +753,7 @@ func testAccCloudRunV2Service_cloudRunServiceWithDirectVPCUpdate(context map[str resource "google_cloud_run_v2_service" "default" { name = "%{service_name}" location = "us-central1" - launch_stage = "BETA" + launch_stage = "GA" template { containers { image = "us-docker.pkg.dev/cloudrun/container/hello" diff --git a/mmv1/third_party/terraform/services/composer/resource_composer_user_workloads_secret.go.erb b/mmv1/third_party/terraform/services/composer/resource_composer_user_workloads_secret.go.erb index a6e7564d1f97..7275024598a5 100644 --- a/mmv1/third_party/terraform/services/composer/resource_composer_user_workloads_secret.go.erb +++ b/mmv1/third_party/terraform/services/composer/resource_composer_user_workloads_secret.go.erb @@ -32,9 +32,9 @@ func ResourceComposerUserWorkloadsSecret() *schema.Resource { }, Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(120 * time.Minute), - Update: schema.DefaultTimeout(120 * time.Minute), - Delete: schema.DefaultTimeout(30 * time.Minute), + Create: schema.DefaultTimeout(time.Minute), + Update: schema.DefaultTimeout(time.Minute), + Delete: schema.DefaultTimeout(time.Minute), }, CustomizeDiff: customdiff.All( @@ -48,7 +48,7 @@ func ResourceComposerUserWorkloadsSecret() *schema.Resource { Required: true, ForceNew: true, ValidateFunc: verify.ValidateGCEName, - Description: `Name of the environment.`, + Description: `Name of the secret.`, }, "region": { Type: schema.TypeString, @@ -206,7 +206,7 @@ func resourceComposerUserWorkloadsSecretDelete(d *schema.ResourceData, meta inte func resourceComposerUserWorkloadsSecretImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { config := meta.(*transport_tpg.Config) - if err := tpgresource.ParseImportId([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/environments/(?P[^/]+)/userWorkloadsSecrets/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config); err != nil { + if err := tpgresource.ParseImportId([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/environments/(?P[^/]+)/userWorkloadsSecrets/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)"}, d, config); err != nil { return nil, err } @@ -235,7 +235,7 @@ func resourceComposerUserWorkloadsSecretImport(d *schema.ResourceData, meta inte return []*schema.ResourceData{d}, nil } -func resourceComposerUserWorkloadsSecretName(d *schema.ResourceData, config *transport_tpg.Config) (*UserWorkloadsSecretsName, error) { +func resourceComposerUserWorkloadsSecretName(d *schema.ResourceData, config *transport_tpg.Config) (*UserWorkloadsSecretName, error) { project, err := tpgresource.GetProject(d, config) if err != nil { return nil, err @@ -246,7 +246,7 @@ func resourceComposerUserWorkloadsSecretName(d *schema.ResourceData, config *tra return nil, err } - return &UserWorkloadsSecretsName{ + return &UserWorkloadsSecretName{ Project: project, Region: region, Environment: d.Get("environment").(string), @@ -254,18 +254,18 @@ func resourceComposerUserWorkloadsSecretName(d *schema.ResourceData, config *tra }, nil } -type UserWorkloadsSecretsName struct { +type UserWorkloadsSecretName struct { Project string Region string Environment string Secret string } -func (n *UserWorkloadsSecretsName) ResourceName() string { +func (n *UserWorkloadsSecretName) ResourceName() string { return fmt.Sprintf("projects/%s/locations/%s/environments/%s/userWorkloadsSecrets/%s", n.Project, n.Region, n.Environment, n.Secret) } -func (n *UserWorkloadsSecretsName) ParentName() string { +func (n *UserWorkloadsSecretName) ParentName() string { return fmt.Sprintf("projects/%s/locations/%s/environments/%s", n.Project, n.Region, n.Environment) } diff --git a/mmv1/third_party/terraform/services/composer/resource_composer_user_workloads_secret_test.go.erb b/mmv1/third_party/terraform/services/composer/resource_composer_user_workloads_secret_test.go.erb index 4ae95faece9c..61d851512140 100644 --- a/mmv1/third_party/terraform/services/composer/resource_composer_user_workloads_secret_test.go.erb +++ b/mmv1/third_party/terraform/services/composer/resource_composer_user_workloads_secret_test.go.erb @@ -27,7 +27,7 @@ func TestAccComposerUserWorkloadsSecret_basic(t *testing.T) { acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - // CheckDestroy: testAccComposerUserWorkloadsSecretDestroy(t), + CheckDestroy: testAccComposerEnvironmentDestroyProducer(t), Steps: []resource.TestStep{ { Config: testAccComposerUserWorkloadsSecret_basic(envName, secretName, envvar.GetTestProjectFromEnv(), envvar.GetTestRegionFromEnv()), @@ -53,6 +53,7 @@ func TestAccComposerUserWorkloadsSecret_update(t *testing.T) { acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccComposerEnvironmentDestroyProducer(t), Steps: []resource.TestStep{ { Config: testAccComposerUserWorkloadsSecret_basic(envName, secretName, envvar.GetTestProjectFromEnv(), envvar.GetTestRegionFromEnv()), @@ -78,6 +79,7 @@ func TestAccComposerUserWorkloadsSecret_delete(t *testing.T) { acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccComposerEnvironmentDestroyProducer(t), Steps: []resource.TestStep{ { Config: testAccComposerUserWorkloadsSecret_basic(envName, secretName, envvar.GetTestProjectFromEnv(), envvar.GetTestRegionFromEnv()), @@ -162,9 +164,9 @@ func testAccComposerUserWorkloadsSecretDestroyed(t *testing.T) func(s *terraform if len(idTokens) != 8 { return fmt.Errorf("Invalid ID %q, expected format projects/{project}/regions/{region}/environments/{environment}/userWorkloadsSecrets/{name}", rs.Primary.ID) } - secretName := &composer.UserWorkloadsSecretsName{ + secretName := &composer.UserWorkloadsSecretName{ Project: idTokens[1], - Region: idTokens[3], + Region: idTokens[3], Environment: idTokens[5], Secret: idTokens[7], } diff --git a/mmv1/third_party/terraform/services/compute/compute_instance_helpers.go.erb b/mmv1/third_party/terraform/services/compute/compute_instance_helpers.go.erb index 84a8f1e793b1..c4c3f624529a 100644 --- a/mmv1/third_party/terraform/services/compute/compute_instance_helpers.go.erb +++ b/mmv1/third_party/terraform/services/compute/compute_instance_helpers.go.erb @@ -137,9 +137,6 @@ func expandScheduling(v interface{}) (*compute.Scheduling, error) { <% unless version == 'ga' -%> if v, ok := original["max_run_duration"]; ok { transformedMaxRunDuration, err := expandComputeMaxRunDuration(v) - if scheduling.InstanceTerminationAction == "STOP" && transformedMaxRunDuration != nil { - return nil, fmt.Errorf("Can not set MaxRunDuration on instance with STOP InstanceTerminationAction, it is not supported by terraform.") - } if err != nil { return nil, err } @@ -149,6 +146,15 @@ func expandScheduling(v interface{}) (*compute.Scheduling, error) { if v, ok := original["maintenance_interval"]; ok { scheduling.MaintenanceInterval = v.(string) } + + if v, ok := original["on_instance_stop_action"]; ok { + transformedOnInstanceStopAction, err := expandComputeOnInstanceStopAction(v) + if err != nil { + return nil, err + } + scheduling.OnInstanceStopAction = transformedOnInstanceStopAction + scheduling.ForceSendFields = append(scheduling.ForceSendFields, "OnInstanceStopAction") + } <% end -%> if v, ok := original["local_ssd_recovery_timeout"]; ok { transformedLocalSsdRecoveryTimeout, err := expandComputeLocalSsdRecoveryTimeout(v) @@ -195,6 +201,24 @@ func expandComputeMaxRunDurationNanos(v interface{}) (interface{}, error) { func expandComputeMaxRunDurationSeconds(v interface{}) (interface{}, error) { return v, nil } + +func expandComputeOnInstanceStopAction(v interface{}) (*compute.SchedulingOnInstanceStopAction, error){ + l := v.([]interface{}) + onInstanceStopAction := compute.SchedulingOnInstanceStopAction{} + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + + if d, ok := original["discard_local_ssd"]; ok { + onInstanceStopAction.DiscardLocalSsd = d.(bool) + } else { + return nil, nil + } + + return &onInstanceStopAction, nil +} <% end -%> func expandComputeLocalSsdRecoveryTimeout(v interface{}) (*compute.Duration, error) { @@ -250,6 +274,9 @@ func flattenScheduling(resp *compute.Scheduling) []map[string]interface{} { if resp.MaintenanceInterval != "" { schedulingMap["maintenance_interval"] = resp.MaintenanceInterval } + if resp.OnInstanceStopAction != nil { + schedulingMap["on_instance_stop_action"] = flattenOnInstanceStopAction(resp.OnInstanceStopAction) + } <% end -%> if resp.LocalSsdRecoveryTimeout != nil { @@ -279,6 +306,15 @@ func flattenComputeMaxRunDuration(v *compute.Duration) []interface{} { transformed["seconds"] = v.Seconds return []interface{}{transformed} } + +func flattenOnInstanceStopAction(v *compute.SchedulingOnInstanceStopAction) []interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + transformed["discard_local_ssd"] = v.DiscardLocalSsd + return []interface{}{transformed} +} <% end -%> func flattenComputeLocalSsdRecoveryTimeout(v *compute.Duration) []interface{} { @@ -656,7 +692,11 @@ func schedulingHasChangeRequiringReboot(d *schema.ResourceData) bool { oScheduling := o.([]interface{})[0].(map[string]interface{}) newScheduling := n.([]interface{})[0].(map[string]interface{}) +<% unless version == 'ga' -%> + return hasNodeAffinitiesChanged(oScheduling, newScheduling) || hasMaxRunDurationChanged(oScheduling, newScheduling) +<% else -%> return hasNodeAffinitiesChanged(oScheduling, newScheduling) +<% end -%> } // Terraform doesn't correctly calculate changes on schema.Set, so we do it manually @@ -702,6 +742,32 @@ func schedulingHasChangeWithoutReboot(d *schema.ResourceData) bool { return false } +<% unless version == 'ga' -%> +func hasMaxRunDurationChanged(oScheduling, nScheduling map[string]interface{}) bool { + oMrd := oScheduling["max_run_duration"].([]interface{}) + nMrd := nScheduling["max_run_duration"].([]interface{}) + + if (len(oMrd) == 0 || oMrd[0] == nil) && (len(nMrd) == 0 || nMrd[0] == nil) { + return false + } + if (len(oMrd) == 0 || oMrd[0] == nil) || (len(nMrd) == 0 || nMrd[0] == nil) { + return true + } + + oldMrd := oMrd[0].(map[string]interface{}) + newMrd := nMrd[0].(map[string]interface{}) + + if oldMrd["seconds"] != newMrd["seconds"] { + return true + } + if oldMrd["nanos"] != newMrd["nanos"] { + return true + } + + return false +} +<% end -%> + func hasNodeAffinitiesChanged(oScheduling, newScheduling map[string]interface{}) bool { oldNAs := oScheduling["node_affinities"].(*schema.Set).List() newNAs := newScheduling["node_affinities"].(*schema.Set).List() diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb index 37d5fa66fd6d..7be414a69952 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb @@ -62,6 +62,7 @@ var ( <% unless version == 'ga' -%> "scheduling.0.max_run_duration", "scheduling.0.maintenance_interval", + "scheduling.0.on_instance_stop_action", <% end -%> "scheduling.0.local_ssd_recovery_timeout", } @@ -818,6 +819,24 @@ be from 0 to 999,999,999 inclusive.`, }, }, }, + "on_instance_stop_action": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ForceNew: true, + Description: `Defines the behaviour for instances with the instance_termination_action.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "discard_local_ssd": { + Type: schema.TypeBool, + Optional: true, + Description: `If true, the contents of any attached Local SSD disks will be discarded.`, + Default: false, + ForceNew: true, + }, + }, + }, + }, "maintenance_interval": { Type: schema.TypeString, Optional: true, @@ -1633,16 +1652,6 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error if err := d.Set("scratch_disk", scratchDisks); err != nil { return fmt.Errorf("Error setting scratch_disk: %s", err) } -<% unless version == 'ga' -%> - // Add extra check on Scheduling to prevent STOP instance setting MaxRunDuration. - // When Instance being stopped, GCE will wipe out the MaxRunDuration field. - // And Terraform has no visiblity on this field after then. Given the infrastructure - // constraint, MaxRunDuration will only be supported with instance has - // DELETE InstanceTerminationAction - if instance.Scheduling.MaxRunDuration != nil && instance.Scheduling.InstanceTerminationAction == "STOP" { - return fmt.Errorf("Can not set MaxRunDuration on instance with STOP InstanceTerminationAction, it is not supported by terraform.") - } -<% end -%> if err := d.Set("scheduling", flattenScheduling(instance.Scheduling)); err != nil { return fmt.Errorf("Error setting scheduling: %s", err) } @@ -2339,7 +2348,11 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err desiredStatus := d.Get("desired_status").(string) if statusBeforeUpdate == "RUNNING" && desiredStatus != "TERMINATED" && !d.Get("allow_stopping_for_update").(bool) { +<% unless version == 'ga' -%> + return fmt.Errorf("Changing the machine_type, min_cpu_platform, service_account, enable_display, shielded_instance_config, scheduling.node_affinities, scheduling.max_run_duration " + +<% else -%> return fmt.Errorf("Changing the machine_type, min_cpu_platform, service_account, enable_display, shielded_instance_config, scheduling.node_affinities " + +<% end -%> "or network_interface.[#d].(network/subnetwork/subnetwork_project) or advanced_machine_features on a started instance requires stopping it. " + "To acknowledge this, please set allow_stopping_for_update = true in your config. " + "You can also stop it by setting desired_status = \"TERMINATED\", but the instance will not be restarted after the update.") diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance_from_machine_image_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance_from_machine_image_test.go.erb index fff6ff61c46a..bf62de039797 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance_from_machine_image_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance_from_machine_image_test.go.erb @@ -46,6 +46,41 @@ func TestAccComputeInstanceFromMachineImage_basic(t *testing.T) { }) } +<% unless version == 'ga' -%> +func TestAccComputeInstanceFromMachineImage_maxRunDuration(t *testing.T) { + t.Parallel() + + var instance compute.Instance + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + generatedInstanceName := fmt.Sprintf("tf-test-generated-%s", acctest.RandString(t, 10)) + resourceName := "google_compute_instance_from_machine_image.foobar" + var expectedMaxRunDuration = compute.Duration{} + // Define in testAccComputeInstanceFromMachineImage_maxRunDuration + expectedMaxRunDuration.Nanos = 123 + expectedMaxRunDuration.Seconds = 60 + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckComputeInstanceFromMachineImageDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceFromMachineImage_maxRunDuration(instanceName, generatedInstanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, resourceName, &instance), + + // Check that fields were set based on the template + resource.TestCheckResourceAttr(resourceName, "machine_type", "n1-standard-1"), + resource.TestCheckResourceAttr(resourceName, "attached_disk.#", "0"), + resource.TestCheckResourceAttr(resourceName, "scheduling.0.automatic_restart", "false"), + testAccCheckComputeInstanceMaxRunDuration(&instance, expectedMaxRunDuration), + ), + }, + }, + }) +} +<% end -%> + func TestAccComputeInstanceFromMachineImage_localSsdRecoveryTimeout(t *testing.T) { t.Parallel() @@ -230,6 +265,69 @@ resource "google_compute_instance_from_machine_image" "foobar" { `, instance, instance, newInstance) } +<% unless version == 'ga' -%> +func testAccComputeInstanceFromMachineImage_maxRunDuration(instance, newInstance string) string { + return fmt.Sprintf(` +resource "google_compute_instance" "vm" { + provider = google-beta + + boot_disk { + initialize_params { + image = "debian-cloud/debian-10" + } + } + + name = "%s" + machine_type = "n1-standard-1" + + network_interface { + network = "default" + } + + metadata = { + foo = "bar" + } + + scheduling { + automatic_restart = false + } + +} + +resource "google_compute_machine_image" "foobar" { + provider = google-beta + name = "%s" + source_instance = google_compute_instance.vm.self_link +} + +resource "google_compute_instance_from_machine_image" "foobar" { + provider = google-beta + name = "%s" + zone = "us-central1-a" + + source_machine_image = google_compute_machine_image.foobar.self_link + + labels = { + my_key = "my_value" + } + scheduling { + automatic_restart = false + provisioning_model = "STANDARD" + instance_termination_action = "STOP" + max_run_duration { + nanos = 123 + seconds = 60 + } + on_instance_stop_action { + discard_local_ssd = true + } + } +} +`, instance, instance, newInstance) +} +<% end -%> + + func testAccComputeInstanceFromMachineImage_localSsdRecoveryTimeout(instance, newInstance string) string { return fmt.Sprintf(` resource "google_compute_instance" "vm" { diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance_from_template_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance_from_template_test.go.erb index e5358ed72163..1d5bbd89500c 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance_from_template_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance_from_template_test.go.erb @@ -73,6 +73,39 @@ func TestAccComputeInstanceFromTemplate_self_link_unique(t *testing.T) { }) } +<% unless version == 'ga' -%> +func TestAccComputeInstanceFromTemplate_maxRunDuration_onInstanceStopAction(t *testing.T) { + t.Parallel() + + var instance compute.Instance + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + templateName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + resourceName := "google_compute_instance_from_template.foobar" + + var expectedMaxRunDuration = compute.Duration{} + // Define in testAccComputeInstanceFromTemplate_maxRunDuration_onInstanceStopAction + expectedMaxRunDuration.Nanos = 456 + expectedMaxRunDuration.Seconds = 60 + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceFromTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceFromTemplate_maxRunDuration_onInstanceStopAction(instanceName, templateName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, resourceName, &instance), + + // Check that fields were set based on the template + testAccCheckComputeInstanceMaxRunDuration(&instance, expectedMaxRunDuration), + ), + }, + }, + }) +} +<% end -%> + func TestAccComputeInstanceFromTemplate_localSsdRecoveryTimeout(t *testing.T) { t.Parallel() @@ -446,6 +479,108 @@ resource "google_compute_instance_from_template" "foobar" { `, template, template, instance) } +<% unless version == 'ga' -%> +func testAccComputeInstanceFromTemplate_maxRunDuration_onInstanceStopAction(instance, template string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "foobar" { + name = "%s" + image = data.google_compute_image.my_image.self_link + size = 10 + type = "pd-ssd" + zone = "us-central1-a" +} + +resource "google_compute_instance_template" "foobar" { + name = "%s" + machine_type = "n1-standard-1" // can't be e2 because of local-ssd + + disk { + source = google_compute_disk.foobar.name + auto_delete = false + boot = true + } + + disk { + disk_type = "local-ssd" + type = "SCRATCH" + interface = "NVME" + disk_size_gb = 375 + } + + disk { + device_name = "test-local-ssd" + disk_type = "local-ssd" + type = "SCRATCH" + interface = "NVME" + disk_size_gb = 375 + } + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + disk_size_gb = 100 + boot = false + disk_type = "pd-ssd" + type = "PERSISTENT" + } + + network_interface { + network = "default" + } + + metadata = { + foo = "bar" + } + + scheduling { + automatic_restart = false + provisioning_model = "STANDARD" + instance_termination_action = "STOP" + max_run_duration { + nanos = 123 + seconds = 60 + } + on_instance_stop_action { + discard_local_ssd = true + } + } + + can_ip_forward = true +} + +resource "google_compute_instance_from_template" "foobar" { + name = "%s" + zone = "us-central1-a" + + source_instance_template = google_compute_instance_template.foobar.self_link + + // Overrides + can_ip_forward = false + labels = { + my_key = "my_value" + } + scheduling { + automatic_restart = false + provisioning_model = "STANDARD" + instance_termination_action = "STOP" + max_run_duration { + nanos = 456 + seconds = 60 + } + on_instance_stop_action { + discard_local_ssd = true + } + } +} +`, template, template, instance) +} +<% end -%> + func testAccComputeInstanceFromTemplate_localSsdRecoveryTimeout(instance, template string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance_template.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance_template.go.erb index 2fb03f7c5894..9e652212aead 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance_template.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance_template.go.erb @@ -38,6 +38,7 @@ var ( <% unless version == 'ga' -%> "scheduling.0.max_run_duration", "scheduling.0.maintenance_interval", + "scheduling.0.on_instance_stop_action", <% end -%> "scheduling.0.local_ssd_recovery_timeout", } @@ -732,6 +733,24 @@ be from 0 to 999,999,999 inclusive.`, }, }, }, + "on_instance_stop_action": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ForceNew: true, + Description: `Defines the behaviour for instances with the instance_termination_action.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "discard_local_ssd": { + Type: schema.TypeBool, + Optional: true, + Description: `If true, the contents of any attached Local SSD disks will be discarded.`, + Default: false, + ForceNew: true, + }, + }, + }, + }, "maintenance_interval" : { Type: schema.TypeString, Optional: true, diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance_template_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance_template_test.go.erb index 4c7a840cafb4..0e5a6615d473 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance_template_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance_template_test.go.erb @@ -1075,6 +1075,113 @@ func TestAccComputeInstanceTemplate_spot(t *testing.T) { } <% unless version == 'ga' -%> +func TestAccComputeInstanceTemplate_spot_maxRunDuration_deleteTerminationAction(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + var expectedMaxRunDuration = compute.Duration{} + // Define in testAccComputeInstanceTemplate_spot + expectedMaxRunDuration.Nanos = 123 + expectedMaxRunDuration.Seconds = 60 + var instanceTerminationAction = "DELETE" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_spot_maxRunDuration(acctest.RandString(t, 10), instanceTerminationAction), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists( + t, "google_compute_instance_template.foobar", &instanceTemplate), + testAccCheckComputeInstanceTemplateAutomaticRestart(&instanceTemplate, false), + testAccCheckComputeInstanceTemplatePreemptible(&instanceTemplate, true), + testAccCheckComputeInstanceTemplateProvisioningModel(&instanceTemplate, "SPOT"), + testAccCheckComputeInstanceTemplateInstanceTerminationAction(&instanceTemplate, instanceTerminationAction), + testAccCheckComputeInstanceTemplateMaxRunDuration(&instanceTemplate, expectedMaxRunDuration), + ), + }, + { + ResourceName: "google_compute_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_spot_maxRunDuration_stopTerminationAction(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + var expectedMaxRunDuration = compute.Duration{} + // Define in testAccComputeInstanceTemplate_spot + expectedMaxRunDuration.Nanos = 123 + expectedMaxRunDuration.Seconds = 60 + var instanceTerminationAction = "STOP" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_spot_maxRunDuration(acctest.RandString(t, 10), instanceTerminationAction), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists( + t, "google_compute_instance_template.foobar", &instanceTemplate), + testAccCheckComputeInstanceTemplateAutomaticRestart(&instanceTemplate, false), + testAccCheckComputeInstanceTemplatePreemptible(&instanceTemplate, true), + testAccCheckComputeInstanceTemplateProvisioningModel(&instanceTemplate, "SPOT"), + testAccCheckComputeInstanceTemplateInstanceTerminationAction(&instanceTemplate, instanceTerminationAction), + testAccCheckComputeInstanceTemplateMaxRunDuration(&instanceTemplate, expectedMaxRunDuration), + ), + }, + { + ResourceName: "google_compute_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_maxRunDuration_onInstanceStopAction(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + var expectedMaxRunDuration = compute.Duration{} + // Define in testAccComputeInstanceTemplate_maxRunDuration_onInstanceStopAction + expectedMaxRunDuration.Nanos = 123 + expectedMaxRunDuration.Seconds = 600 + var instanceTerminationAction = "STOP" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_maxRunDuration_onInstanceStopAction(acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists( + t, "google_compute_instance_template.foobar", &instanceTemplate), + testAccCheckComputeInstanceTemplateAutomaticRestart(&instanceTemplate, false), + testAccCheckComputeInstanceTemplateInstanceTerminationAction(&instanceTemplate, instanceTerminationAction), + testAccCheckComputeInstanceTemplateMaxRunDuration(&instanceTemplate, expectedMaxRunDuration), + ), + }, + { + ResourceName: "google_compute_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + + func TestAccComputeInstanceTemplate_spot_maxRunDuration(t *testing.T) { t.Parallel() @@ -1083,6 +1190,7 @@ func TestAccComputeInstanceTemplate_spot_maxRunDuration(t *testing.T) { // Define in testAccComputeInstanceTemplate_spot expectedMaxRunDuration.Nanos = 123 expectedMaxRunDuration.Seconds = 60 + var instanceTerminationAction = "DELETE" acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, @@ -1090,14 +1198,14 @@ func TestAccComputeInstanceTemplate_spot_maxRunDuration(t *testing.T) { CheckDestroy: testAccCheckComputeInstanceTemplateDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccComputeInstanceTemplate_spot_maxRunDuration(acctest.RandString(t, 10)), + Config: testAccComputeInstanceTemplate_spot_maxRunDuration(acctest.RandString(t, 10), instanceTerminationAction), Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceTemplateExists( t, "google_compute_instance_template.foobar", &instanceTemplate), testAccCheckComputeInstanceTemplateAutomaticRestart(&instanceTemplate, false), testAccCheckComputeInstanceTemplatePreemptible(&instanceTemplate, true), testAccCheckComputeInstanceTemplateProvisioningModel(&instanceTemplate, "SPOT"), - testAccCheckComputeInstanceTemplateInstanceTerminationAction(&instanceTemplate, "DELETE"), + testAccCheckComputeInstanceTemplateInstanceTerminationAction(&instanceTemplate, instanceTerminationAction), testAccCheckComputeInstanceTemplateMaxRunDuration(&instanceTemplate, expectedMaxRunDuration), ), }, @@ -3603,7 +3711,7 @@ resource "google_compute_instance_template" "foobar" { `, suffix) } -func testAccComputeInstanceTemplate_spot_maxRunDuration(suffix string) string { +func testAccComputeInstanceTemplate_spot_maxRunDuration(suffix string, instanceTerminationAction string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { family = "debian-11" @@ -3630,7 +3738,7 @@ resource "google_compute_instance_template" "foobar" { preemptible = true automatic_restart = false provisioning_model = "SPOT" - instance_termination_action = "DELETE" + instance_termination_action = "%s" <% unless version == 'ga' -%> max_run_duration { nanos = 123 @@ -3648,8 +3756,58 @@ resource "google_compute_instance_template" "foobar" { scopes = ["userinfo-email", "compute-ro", "storage-ro"] } } +`, suffix, instanceTerminationAction) +} + +<% unless version == 'ga' -%> +func testAccComputeInstanceTemplate_maxRunDuration_onInstanceStopAction(suffix string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + scheduling { + automatic_restart = false + provisioning_model = "STANDARD" + instance_termination_action = "STOP" + max_run_duration { + nanos = 123 + seconds = 600 + } + on_instance_stop_action { + discard_local_ssd = true + } + + } + + metadata = { + foo = "bar" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} `, suffix) } +<% end -%> func testAccComputeInstanceTemplate_localSsdRecoveryTimeout(suffix string) string { return fmt.Sprintf(` diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb index 9f5442dc0497..ef606ea0654c 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb @@ -1291,6 +1291,7 @@ func TestAccComputeInstance_soleTenantNodeAffinities(t *testing.T) { }) } + func TestAccComputeInstance_reservationAffinities(t *testing.T) { t.Parallel() @@ -2581,7 +2582,100 @@ func TestAccComputeInstance_spotVM_update(t *testing.T) { } <% unless version == 'ga' -%> -func TestAccComputeInstance_spotVM_maxRunDration(t *testing.T) { +func TestAccComputeInstance_maxRunDuration_update(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-mrd-%s", acctest.RandString(t, 10)) + var expectedMaxRunDuration = compute.Duration{} + // Define in testAccComputeInstance_standardVM_maxRunDurationUpdated + expectedMaxRunDuration.Nanos = 456 + expectedMaxRunDuration.Seconds = 60 + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_standardVM_maxRunDuration(instanceName, "STOP"), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{"allow_stopping_for_update"}), + { + Config: testAccComputeInstance_standardVM_maxRunDurationUpdated(instanceName, "STOP"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceTerminationAction(&instance, "STOP"), + testAccCheckComputeInstanceMaxRunDuration(&instance, expectedMaxRunDuration), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{"allow_stopping_for_update"}), + }, + }) +} + +func TestAccComputeInstance_standardVM_maxRunDuration_stopTerminationAction(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var expectedMaxRunDuration = compute.Duration{} + // Define in testAccComputeInstance_standardVM_maxRunDuration + expectedMaxRunDuration.Nanos = 123 + expectedMaxRunDuration.Seconds = 60 + var instanceTerminationAction = "STOP" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_standardVM_maxRunDuration(instanceName, instanceTerminationAction), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceTerminationAction(&instance, instanceTerminationAction), + testAccCheckComputeInstanceMaxRunDuration(&instance, expectedMaxRunDuration), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{}), + }, + }) +} + +func TestAccComputeInstance_localSsdVM_maxRunDuration_stopTerminationAction(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var expectedMaxRunDuration = compute.Duration{} + // Define in testAccComputeInstance_localSsdVM_maxRunDuration + expectedMaxRunDuration.Nanos = 123 + expectedMaxRunDuration.Seconds = 180 + var instanceTerminationAction = "STOP" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_localSsdVM_maxRunDuration(instanceName, instanceTerminationAction), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceTerminationAction(&instance, instanceTerminationAction), + testAccCheckComputeInstanceMaxRunDuration(&instance, expectedMaxRunDuration), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{}), + }, + }) +} + +func TestAccComputeInstance_spotVM_maxRunDuration_deleteTerminationAction(t *testing.T) { t.Parallel() var instance compute.Instance @@ -2590,6 +2684,7 @@ func TestAccComputeInstance_spotVM_maxRunDration(t *testing.T) { // Define in testAccComputeInstance_spotVM_maxRunDuration expectedMaxRunDuration.Nanos = 123 expectedMaxRunDuration.Seconds = 60 + var instanceTerminationAction = "DELETE" acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, @@ -2597,11 +2692,41 @@ func TestAccComputeInstance_spotVM_maxRunDration(t *testing.T) { CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccComputeInstance_spotVM_maxRunDuration(instanceName), + Config: testAccComputeInstance_spotVM_maxRunDuration(instanceName, instanceTerminationAction), Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceExists( t, "google_compute_instance.foobar", &instance), - testAccCheckComputeInstanceTerminationAction(&instance, "DELETE"), + testAccCheckComputeInstanceTerminationAction(&instance, instanceTerminationAction), + testAccCheckComputeInstanceMaxRunDuration(&instance, expectedMaxRunDuration), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{}), + }, + }) +} + +func TestAccComputeInstance_standardVM_maxRunDuration_deleteTerminationAction(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var expectedMaxRunDuration = compute.Duration{} + // Define in testAccComputeInstance_standardVM_maxRunDuration + expectedMaxRunDuration.Nanos = 123 + expectedMaxRunDuration.Seconds = 60 + var instanceTerminationAction = "DELETE" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_standardVM_maxRunDuration(instanceName, instanceTerminationAction), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceTerminationAction(&instance, instanceTerminationAction), testAccCheckComputeInstanceMaxRunDuration(&instance, expectedMaxRunDuration), ), }, @@ -2633,7 +2758,7 @@ func TestAccComputeInstance_spotVM_maxRunDuration_update(t *testing.T) { }, computeInstanceImportStep("us-central1-a", instanceName, []string{}), { - Config: testAccComputeInstance_spotVM_maxRunDuration(instanceName), + Config: testAccComputeInstance_spotVM_maxRunDuration(instanceName, "DELETE"), Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceExists( t, "google_compute_instance.foobar", &instance), @@ -7953,8 +8078,121 @@ resource "google_compute_instance" "foobar" { `, instance) } +<% unless version == 'ga' -%> +func testAccComputeInstance_standardVM_maxRunDuration(instance string, instanceTerminationAction string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "ubuntu-2004-lts" + project = "ubuntu-os-cloud" +} -func testAccComputeInstance_spotVM_maxRunDuration(instance string) string { +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } + + scheduling { + provisioning_model = "STANDARD" + automatic_restart = false + instance_termination_action = "%s" + max_run_duration { + nanos = 123 + seconds = 60 + } + } +} +`, instance, instanceTerminationAction) +} + +func testAccComputeInstance_standardVM_maxRunDurationUpdated(instance string, instanceTerminationAction string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "ubuntu-2004-lts" + project = "ubuntu-os-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } + + scheduling { + provisioning_model = "STANDARD" + automatic_restart = false + instance_termination_action = "%s" + max_run_duration { + nanos = 456 + seconds = 60 + } + } +} +`, instance, instanceTerminationAction) +} + +func testAccComputeInstance_localSsdVM_maxRunDuration(instance string, instanceTerminationAction string) string { + return fmt.Sprintf(` + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n2-standard-8" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = "debian-cloud/debian-11" + } + } + + # Local SSD interface type; NVME for image with optimized NVMe drivers or SCSI + # Local SSD are 375 GiB in size + scratch_disk { + interface = "SCSI" + } + + network_interface { + network = "default" + access_config {} + } + + scheduling { + provisioning_model = "STANDARD" + automatic_restart = false + instance_termination_action = "%s" + max_run_duration { + nanos = 123 + seconds = 180 + } + on_instance_stop_action { + discard_local_ssd = true + } + } +} +`, instance, instanceTerminationAction) +} +<% end -%> + + +func testAccComputeInstance_spotVM_maxRunDuration(instance string, instanceTerminationAction string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { family = "ubuntu-2004-lts" @@ -7980,7 +8218,7 @@ resource "google_compute_instance" "foobar" { provisioning_model = "SPOT" automatic_restart = false preemptible = true - instance_termination_action = "DELETE" + instance_termination_action = "%s" <% unless version == 'ga' -%> max_run_duration { nanos = 123 @@ -7989,7 +8227,7 @@ resource "google_compute_instance" "foobar" { <% end -%> } } -`, instance) +`, instance, instanceTerminationAction) } func testAccComputeInstance_localSsdRecoveryTimeout(instance string) string { diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_template.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_template.go.erb index 4eec6dc1d1f5..ca64142f0710 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_template.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_template.go.erb @@ -692,6 +692,24 @@ be from 0 to 999,999,999 inclusive.`, }, }, }, + "on_instance_stop_action": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ForceNew: true, + Description: `Defines the behaviour for instances with the instance_termination_action.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "discard_local_ssd": { + Type: schema.TypeBool, + Optional: true, + Description: `If true, the contents of any attached Local SSD disks will be discarded.`, + Default: false, + ForceNew: true, + }, + }, + }, + }, "maintenance_interval" : { Type: schema.TypeString, Optional: true, diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_template_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_template_test.go.erb index 097bd53259b3..519616b48b55 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_template_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_template_test.go.erb @@ -1033,7 +1033,7 @@ func TestAccComputeRegionInstanceTemplate_spot_maxRunDuration(t *testing.T) { var instanceTemplate compute.InstanceTemplate var expectedMaxRunDuration = compute.Duration{} - // Define in testAccComputeRegionInstanceTemplate_spot + // Define in testAccComputeRegionInstanceTemplate_spot_maxRunDuration expectedMaxRunDuration.Nanos = 123 expectedMaxRunDuration.Seconds = 60 @@ -1062,6 +1062,39 @@ func TestAccComputeRegionInstanceTemplate_spot_maxRunDuration(t *testing.T) { }, }) } + +func TestAccComputeRegionInstanceTemplate_maxRunDuration_onInstanceStopAction(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + var expectedMaxRunDuration = compute.Duration{} + // Define in testAccComputeRegionInstanceTemplate_spot + expectedMaxRunDuration.Nanos = 123 + expectedMaxRunDuration.Seconds = 60 + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionInstanceTemplate_maxRunDuration_onInstanceStopAction(acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionInstanceTemplateExists( + t, "google_compute_region_instance_template.foobar", &instanceTemplate), + testAccCheckComputeRegionInstanceTemplateAutomaticRestart(&instanceTemplate, false), + testAccCheckComputeRegionInstanceTemplateInstanceTerminationAction(&instanceTemplate, "STOP"), + testAccCheckComputeRegionInstanceTemplateMaxRunDuration(&instanceTemplate, expectedMaxRunDuration), + ), + }, + { + ResourceName: "google_compute_region_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} <% end -%> func TestAccComputeRegionInstanceTemplate_localSsdRecoveryTimeout(t *testing.T) { @@ -3317,6 +3350,57 @@ resource "google_compute_region_instance_template" "foobar" { `, suffix) } +<% unless version == 'ga' -%> +func testAccComputeRegionInstanceTemplate_maxRunDuration_onInstanceStopAction(suffix string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_region_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + region = "us-central1" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + scheduling { + automatic_restart = false + provisioning_model = "STANDARD" + instance_termination_action = "STOP" + max_run_duration { + nanos = 123 + seconds = 60 + } + on_instance_stop_action { + discard_local_ssd = true + } + + } + + metadata = { + foo = "bar" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} +`, suffix) +} +<% end -%> + func testAccComputeRegionInstanceTemplate_localSsdRecoveryTimeout(suffix string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_region_ssl_policy_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_region_ssl_policy_test.go.erb index b39fc345172b..aa160ba3b1ad 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_region_ssl_policy_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_region_ssl_policy_test.go.erb @@ -16,6 +16,36 @@ import ( <% end -%> ) +func TestAccComputeRegionSslPolicy_regionInherit(t *testing.T) { + t.Parallel() + + sslPolicyName := fmt.Sprintf("test-ssl-policy-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeSslPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionSslRegionDefaultFromEnv(sslPolicyName), + }, + { + ResourceName: "google_compute_region_ssl_policy.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionSslUpdateRegionDefaultFromEnv(sslPolicyName), + }, + { + ResourceName: "google_compute_region_ssl_policy.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccComputeRegionSslPolicy_update(t *testing.T) { t.Parallel() @@ -229,3 +259,25 @@ resource "google_compute_region_ssl_policy" "update" { } `, resourceName) } + +func testAccComputeRegionSslRegionDefaultFromEnv(resourceName string) string { + return fmt.Sprintf(` +resource "google_compute_region_ssl_policy" "foobar" { + name = "%s" + description = "Generated by TF provider acceptance test" + min_tls_version = "TLS_1_0" + profile = "MODERN" +} +`, resourceName) +} + +func testAccComputeRegionSslUpdateRegionDefaultFromEnv(resourceName string) string { + return fmt.Sprintf(` +resource "google_compute_region_ssl_policy" "foobar" { + name = "%s" + description = "Generated by TF provider acceptance test - updated" + min_tls_version = "TLS_1_0" + profile = "MODERN" +} +`, resourceName) +} diff --git a/mmv1/third_party/terraform/services/dataplex/resource_dataplex_entry_group_test.go b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_entry_group_test.go new file mode 100644 index 000000000000..ba26d84d2a01 --- /dev/null +++ b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_entry_group_test.go @@ -0,0 +1,69 @@ +package dataplex_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" +) + +func TestAccDataplexEntryGroup_update(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": envvar.GetTestProjectFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataplexEntryGroupDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataplexEntryGroup_full(context), + }, + { + ResourceName: "google_dataplex_entry_group.test_entry_group_full", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "entry_group_id", "labels", "terraform_labels"}, + }, + { + Config: testAccDataplexEntryGroup_update(context), + }, + { + ResourceName: "google_dataplex_entry_group.test_entry_group_basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "entry_group_id", "labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccDataplexEntryGroup_full(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_dataplex_entry_group" "test_entry_group_full" { + entry_group_id = "tf-test-entry-group-full%{random_suffix}" + project = "%{project_name}" + location = "us-central1" + + labels = { "tag": "test-tf" } + display_name = "terraform entry group" + description = "entry group created by Terraform" +} +`, context) +} + +func testAccDataplexEntryGroup_update(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_dataplex_entry_group" "test_entry_group_basic" { + entry_group_id = "tf-test-entry-group-basic%{random_suffix}" + project = "%{project_name}" + location = "us-central1" +} +`, context) +} diff --git a/mmv1/third_party/terraform/services/networkservices/resource_network_services_lb_route_extension_test.go b/mmv1/third_party/terraform/services/networkservices/resource_network_services_lb_route_extension_test.go new file mode 100644 index 000000000000..615125529460 --- /dev/null +++ b/mmv1/third_party/terraform/services/networkservices/resource_network_services_lb_route_extension_test.go @@ -0,0 +1,868 @@ +package networkservices_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccNetworkServicesLbRouteExtension_update(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckNetworkServicesLbRouteExtensionDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccNetworkServicesLbRouteExtension_basic(context), + }, + { + ResourceName: "google_network_services_lb_route_extension.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "name", "labels", "terraform_labels"}, + }, + { + Config: testAccNetworkServicesLbRouteExtension_update(context), + }, + { + ResourceName: "google_network_services_lb_route_extension.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "name", "labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccNetworkServicesLbRouteExtension_basic(context map[string]interface{}) string { + return acctest.Nprintf(` +# Internal HTTP load balancer with a managed instance group backend +# VPC network +resource "google_compute_network" "ilb_network" { + name = "tf-test-l7-ilb-network%{random_suffix}" + auto_create_subnetworks = false +} + +# proxy-only subnet +resource "google_compute_subnetwork" "proxy_subnet" { + name = "tf-test-l7-ilb-proxy-subnet%{random_suffix}" + ip_cidr_range = "10.0.0.0/24" + region = "us-west1" + purpose = "REGIONAL_MANAGED_PROXY" + role = "ACTIVE" + network = google_compute_network.ilb_network.id +} + +# backend subnet +resource "google_compute_subnetwork" "ilb_subnet" { + name = "tf-test-l7-ilb-subnet%{random_suffix}" + ip_cidr_range = "10.0.1.0/24" + region = "us-west1" + network = google_compute_network.ilb_network.id + + depends_on = [ + google_compute_subnetwork.proxy_subnet + ] +} + +# forwarding rule +resource "google_compute_forwarding_rule" "default" { + name = "tf-test-l7-ilb-forwarding-rule%{random_suffix}" + region = "us-west1" + ip_protocol = "TCP" + load_balancing_scheme = "INTERNAL_MANAGED" + port_range = "80" + target = google_compute_region_target_http_proxy.default.id + network = google_compute_network.ilb_network.id + subnetwork = google_compute_subnetwork.ilb_subnet.id + network_tier = "PREMIUM" + + depends_on = [ + google_compute_subnetwork.proxy_subnet + ] +} + +# HTTP target proxy +resource "google_compute_region_target_http_proxy" "default" { + name = "tf-test-l7-ilb-target-http-proxy%{random_suffix}" + region = "us-west1" + url_map = google_compute_region_url_map.default.id +} + +# URL map +resource "google_compute_region_url_map" "default" { + name = "tf-test-l7-ilb-regional-url-map%{random_suffix}" + region = "us-west1" + default_service = google_compute_region_backend_service.default.id + + host_rule { + hosts = ["service-extensions.com"] + path_matcher = "callouts" + } + + path_matcher { + name = "callouts" + default_service = google_compute_region_backend_service.callouts_backend.id + } +} + +# backend service +resource "google_compute_region_backend_service" "default" { + name = "tf-test-l7-ilb-backend-subnet%{random_suffix}" + region = "us-west1" + protocol = "HTTP" + load_balancing_scheme = "INTERNAL_MANAGED" + timeout_sec = 10 + health_checks = [google_compute_region_health_check.default.id] + + backend { + group = google_compute_region_instance_group_manager.mig.instance_group + balancing_mode = "UTILIZATION" + capacity_scaler = 1.0 + } +} + +# instance template +resource "google_compute_instance_template" "instance_template" { + name = "tf-test-l7-ilb-mig-template%{random_suffix}" + machine_type = "e2-small" + tags = ["http-server"] + + network_interface { + network = google_compute_network.ilb_network.id + subnetwork = google_compute_subnetwork.ilb_subnet.id + + access_config { + # add external ip to fetch packages + } + } + + disk { + source_image = "debian-cloud/debian-10" + auto_delete = true + boot = true + } + + # install nginx and serve a simple web page + metadata = { + startup-script = <<-EOF1 + #! /bin/bash + set -euo pipefail + + export DEBIAN_FRONTEND=noninteractive + apt-get update + apt-get install -y nginx-light jq + + NAME=$(curl -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/hostname") + IP=$(curl -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ip") + METADATA=$(curl -f -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/attributes/?recursive=True" | jq 'del(.["startup-script"])') + + cat < /var/www/html/index.html +
+      Name: $NAME
+      IP: $IP
+      Metadata: $METADATA
+      
+ EOF + EOF1 + } + + lifecycle { + create_before_destroy = true + } +} + +# health check +resource "google_compute_region_health_check" "default" { + name = "tf-test-l7-ilb-hc%{random_suffix}" + region = "us-west1" + + http_health_check { + port_specification = "USE_SERVING_PORT" + } +} + +# MIG +resource "google_compute_region_instance_group_manager" "mig" { + name = "tf-test-l7-ilb-mig1%{random_suffix}" + region = "us-west1" + + base_instance_name = "vm" + target_size = 2 + + version { + instance_template = google_compute_instance_template.instance_template.id + name = "primary" + } +} + +# allow all access from IAP and health check ranges +resource "google_compute_firewall" "fw_iap" { + name = "tf-test-l7-ilb-fw-allow-iap-hc%{random_suffix}" + direction = "INGRESS" + network = google_compute_network.ilb_network.id + source_ranges = ["130.211.0.0/22", "35.191.0.0/16", "35.235.240.0/20"] + + allow { + protocol = "tcp" + } +} + +# allow http from proxy subnet to backends +resource "google_compute_firewall" "fw_ilb_to_backends" { + name = "tf-test-l7-ilb-fw-allow-ilb-to-backends%{random_suffix}" + direction = "INGRESS" + network = google_compute_network.ilb_network.id + source_ranges = ["10.0.0.0/24"] + target_tags = ["http-server"] + + allow { + protocol = "tcp" + ports = ["80", "443", "8080"] + } + + depends_on = [ + google_compute_firewall.fw_iap + ] +} + +resource "google_network_services_lb_route_extension" "default" { + name = "tf-test-l7-ilb-route-ext%{random_suffix}" + description = "my route extension" + location = "us-west1" + load_balancing_scheme = "INTERNAL_MANAGED" + forwarding_rules = [google_compute_forwarding_rule.default.self_link] + + extension_chains { + name = "chain1" + + match_condition { + cel_expression = "request.path.startsWith('/extensions')" + } + + extensions { + name = "ext11" + authority = "ext11.com" + service = google_compute_region_backend_service.callouts_backend.self_link + timeout = "0.1s" + fail_open = false + + forward_headers = ["custom-header"] + } + } + + labels = { + foo = "bar" + } +} + +# Route Extension Backend Instance +resource "google_compute_instance" "callouts_instance" { + name = "tf-test-l7-ilb-callouts-ins%{random_suffix}" + zone = "us-west1-a" + machine_type = "e2-small" + + labels = { + "container-vm" = "cos-stable-109-17800-147-54" + } + + tags = ["allow-ssh","load-balanced-backend"] + + network_interface { + network = google_compute_network.ilb_network.id + subnetwork = google_compute_subnetwork.ilb_subnet.id + + access_config { + # add external ip to fetch packages + } + } + + boot_disk { + auto_delete = true + + initialize_params { + type = "pd-standard" + size = 10 + image = "https://www.googleapis.com/compute/v1/projects/cos-cloud/global/images/cos-stable-109-17800-147-54" + } + } + + # Initialize an Envoy's Ext Proc gRPC API based on a docker container + metadata = { + startup-script = <<-EOF1 + #! /bin/bash + apt-get update + apt-get install apache2 -y + a2ensite default-ssl + a2enmod ssl + echo "Page served from second backend service" | tee /var/www/html/index.html + systemctl restart apache2' + EOF1 + } + + lifecycle { + create_before_destroy = true + } + + deletion_protection = false +} + +// callouts instance group +resource "google_compute_instance_group" "callouts_instance_group" { + name = "tf-test-l7-ilb-callouts-ins-group%{random_suffix}" + description = "Terraform test instance group" + zone = "us-west1-a" + + instances = [ + google_compute_instance.callouts_instance.id, + ] + + named_port { + name = "http" + port = "80" + } + + named_port { + name = "grpc" + port = "443" + } +} + +# callout health check +resource "google_compute_region_health_check" "callouts_health_check" { + name = "tf-test-l7-ilb-callouts-hc%{random_suffix}" + region = "us-west1" + + http_health_check { + port = 80 + } + + depends_on = [ + google_compute_region_health_check.default + ] +} + +# callout backend service +resource "google_compute_region_backend_service" "callouts_backend" { + name = "tf-test-l7-ilb-callouts-backend%{random_suffix}" + region = "us-west1" + protocol = "HTTP2" + load_balancing_scheme = "INTERNAL_MANAGED" + timeout_sec = 10 + port_name = "grpc" + health_checks = [google_compute_region_health_check.callouts_health_check.id] + + backend { + group = google_compute_instance_group.callouts_instance_group.id + balancing_mode = "UTILIZATION" + capacity_scaler = 1.0 + } + + depends_on = [ + google_compute_region_backend_service.default + ] +} +`, context) +} + +func testAccNetworkServicesLbRouteExtension_update(context map[string]interface{}) string { + return acctest.Nprintf(` +# Internal HTTP load balancer with a managed instance group backend +# VPC network +resource "google_compute_network" "ilb_network" { + name = "tf-test-l7-ilb-network%{random_suffix}" + auto_create_subnetworks = false +} + +# proxy-only subnet +resource "google_compute_subnetwork" "proxy_subnet" { + name = "tf-test-l7-ilb-proxy-subnet%{random_suffix}" + ip_cidr_range = "10.0.0.0/24" + region = "us-west1" + purpose = "REGIONAL_MANAGED_PROXY" + role = "ACTIVE" + network = google_compute_network.ilb_network.id +} + +# backend subnet +resource "google_compute_subnetwork" "ilb_subnet" { + name = "tf-test-l7-ilb-subnet%{random_suffix}" + ip_cidr_range = "10.0.1.0/24" + region = "us-west1" + network = google_compute_network.ilb_network.id + + depends_on = [ + google_compute_subnetwork.proxy_subnet + ] +} + +# forwarding rule +resource "google_compute_forwarding_rule" "default" { + name = "tf-test-l7-ilb-forwarding-rule%{random_suffix}" + region = "us-west1" + ip_protocol = "TCP" + load_balancing_scheme = "INTERNAL_MANAGED" + port_range = "80" + target = google_compute_region_target_http_proxy.default.id + network = google_compute_network.ilb_network.id + subnetwork = google_compute_subnetwork.ilb_subnet.id + network_tier = "PREMIUM" + + depends_on = [ + google_compute_subnetwork.proxy_subnet + ] +} + +# Additional forwarding rule +resource "google_compute_forwarding_rule" "additional_forwarding_rule" { + name = "tf-test-l7-ilb-additional-forwarding-rule%{random_suffix}" + region = "us-west1" + ip_protocol = "TCP" + load_balancing_scheme = "INTERNAL_MANAGED" + port_range = "80" + target = google_compute_region_target_http_proxy.default.id + network = google_compute_network.ilb_network.id + subnetwork = google_compute_subnetwork.ilb_subnet.id + network_tier = "PREMIUM" + + depends_on = [ + google_compute_subnetwork.proxy_subnet, + google_compute_forwarding_rule.default + ] +} + +# HTTP target proxy +resource "google_compute_region_target_http_proxy" "default" { + name = "tf-test-l7-ilb-target-http-proxy%{random_suffix}" + region = "us-west1" + url_map = google_compute_region_url_map.default.id +} + +# URL map +resource "google_compute_region_url_map" "default" { + name = "tf-test-l7-ilb-regional-url-map%{random_suffix}" + region = "us-west1" + default_service = google_compute_region_backend_service.default.id + + host_rule { + hosts = ["service-extensions.com"] + path_matcher = "callouts" + } + + path_matcher { + name = "callouts" + default_service = google_compute_region_backend_service.callouts_backend.id + } + + host_rule { + hosts = ["service-extensions-2.com"] + path_matcher = "callouts2" + } + + path_matcher { + name = "callouts2" + default_service = google_compute_region_backend_service.callouts_backend_2.id + } +} + +# backend service +resource "google_compute_region_backend_service" "default" { + name = "tf-test-l7-ilb-backend-subnet%{random_suffix}" + region = "us-west1" + protocol = "HTTP" + load_balancing_scheme = "INTERNAL_MANAGED" + timeout_sec = 10 + health_checks = [google_compute_region_health_check.default.id] + + backend { + group = google_compute_region_instance_group_manager.mig.instance_group + balancing_mode = "UTILIZATION" + capacity_scaler = 1.0 + } +} + +# instance template +resource "google_compute_instance_template" "instance_template" { + name = "tf-test-l7-ilb-mig-template%{random_suffix}" + machine_type = "e2-small" + tags = ["http-server"] + + network_interface { + network = google_compute_network.ilb_network.id + subnetwork = google_compute_subnetwork.ilb_subnet.id + + access_config { + # add external ip to fetch packages + } + } + + disk { + source_image = "debian-cloud/debian-10" + auto_delete = true + boot = true + } + + # install nginx and serve a simple web page + metadata = { + startup-script = <<-EOF1 + #! /bin/bash + set -euo pipefail + + export DEBIAN_FRONTEND=noninteractive + apt-get update + apt-get install -y nginx-light jq + + NAME=$(curl -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/hostname") + IP=$(curl -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ip") + METADATA=$(curl -f -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/attributes/?recursive=True" | jq 'del(.["startup-script"])') + + cat < /var/www/html/index.html +
+      Name: $NAME
+      IP: $IP
+      Metadata: $METADATA
+      
+ EOF + EOF1 + } + + lifecycle { + create_before_destroy = true + } +} + +# health check +resource "google_compute_region_health_check" "default" { + name = "tf-test-l7-ilb-hc%{random_suffix}" + region = "us-west1" + + http_health_check { + port_specification = "USE_SERVING_PORT" + } +} + +# MIG +resource "google_compute_region_instance_group_manager" "mig" { + name = "tf-test-l7-ilb-mig1%{random_suffix}" + region = "us-west1" + + base_instance_name = "vm" + target_size = 2 + + version { + instance_template = google_compute_instance_template.instance_template.id + name = "primary" + } +} + +# allow all access from IAP and health check ranges +resource "google_compute_firewall" "fw_iap" { + name = "tf-test-l7-ilb-fw-allow-iap-hc%{random_suffix}" + direction = "INGRESS" + network = google_compute_network.ilb_network.id + source_ranges = ["130.211.0.0/22", "35.191.0.0/16", "35.235.240.0/20"] + + allow { + protocol = "tcp" + } +} + +# allow http from proxy subnet to backends +resource "google_compute_firewall" "fw_ilb_to_backends" { + name = "tf-test-l7-ilb-fw-allow-ilb-to-backends%{random_suffix}" + direction = "INGRESS" + network = google_compute_network.ilb_network.id + source_ranges = ["10.0.0.0/24"] + target_tags = ["http-server"] + + allow { + protocol = "tcp" + ports = ["80", "443", "8080"] + } + + depends_on = [ + google_compute_firewall.fw_iap + ] +} + +resource "google_network_services_lb_route_extension" "default" { + name = "tf-test-l7-ilb-route-ext%{random_suffix}" + description = "my route extension" + location = "us-west1" + load_balancing_scheme = "INTERNAL_MANAGED" + forwarding_rules = [ + google_compute_forwarding_rule.default.self_link, + google_compute_forwarding_rule.additional_forwarding_rule.self_link + ] + + extension_chains { + name = "chain1" + + match_condition { + cel_expression = "request.path.startsWith('/extensions')" + } + + extensions { + name = "ext12" + authority = "ext12.com" + service = google_compute_region_backend_service.callouts_backend_2.self_link + timeout = "0.2s" + fail_open = false + + forward_headers = ["custom-header"] + } + } + + extension_chains { + name = "chain2" + + match_condition { + cel_expression = "request.path.startsWith('/extensions2')" + } + + extensions { + name = "ext11" + authority = "ext11.com" + service = google_compute_region_backend_service.callouts_backend.self_link + timeout = "0.1s" + fail_open = false + + forward_headers = ["custom-header"] + } + } + + labels = { + bar = "foo" + } +} + +# Route Extension Backend Instance +resource "google_compute_instance" "callouts_instance" { + name = "tf-test-l7-ilb-callouts-ins%{random_suffix}" + zone = "us-west1-a" + machine_type = "e2-small" + + labels = { + "container-vm" = "cos-stable-109-17800-147-54" + } + + tags = ["allow-ssh","load-balanced-backend"] + + network_interface { + network = google_compute_network.ilb_network.id + subnetwork = google_compute_subnetwork.ilb_subnet.id + + access_config { + # add external ip to fetch packages + } + } + + boot_disk { + auto_delete = true + + initialize_params { + type = "pd-standard" + size = 10 + image = "https://www.googleapis.com/compute/v1/projects/cos-cloud/global/images/cos-stable-109-17800-147-54" + } + } + + # Initialize an Envoy's Ext Proc gRPC API based on a docker container + metadata = { + startup-script = <<-EOF1 + #! /bin/bash + apt-get update + apt-get install apache2 -y + a2ensite default-ssl + a2enmod ssl + echo "Page served from second backend service" | tee /var/www/html/index.html + systemctl restart apache2' + EOF1 + } + + lifecycle { + create_before_destroy = true + } + + deletion_protection = false +} + +// callouts instance group +resource "google_compute_instance_group" "callouts_instance_group" { + name = "tf-test-l7-ilb-callouts-ins-group%{random_suffix}" + description = "Terraform test instance group" + zone = "us-west1-a" + + instances = [ + google_compute_instance.callouts_instance.id, + ] + + named_port { + name = "http" + port = "80" + } + + named_port { + name = "grpc" + port = "443" + } +} + +# callout health check +resource "google_compute_region_health_check" "callouts_health_check" { + name = "tf-test-l7-ilb-callouts-hc%{random_suffix}" + region = "us-west1" + + http_health_check { + port = 80 + } + + depends_on = [ + google_compute_region_health_check.default + ] +} + +# callout backend service +resource "google_compute_region_backend_service" "callouts_backend" { + name = "tf-test-l7-ilb-callouts-backend%{random_suffix}" + region = "us-west1" + protocol = "HTTP2" + load_balancing_scheme = "INTERNAL_MANAGED" + timeout_sec = 10 + port_name = "grpc" + health_checks = [google_compute_region_health_check.callouts_health_check.id] + + backend { + group = google_compute_instance_group.callouts_instance_group.id + balancing_mode = "UTILIZATION" + capacity_scaler = 1.0 + } + + depends_on = [ + google_compute_region_backend_service.default + ] +} + +# route extension backend instance 2 +resource "google_compute_instance" "callouts_instance_2" { + name = "tf-test-l7-ilb-callouts-ins-2%{random_suffix}" + zone = "us-west1-a" + machine_type = "e2-small" + + labels = { + "container-vm" = "cos-stable-109-17800-147-54" + } + + tags = ["allow-ssh","load-balanced-backend"] + + network_interface { + network = google_compute_network.ilb_network.id + subnetwork = google_compute_subnetwork.ilb_subnet.id + + access_config { + # add external ip to fetch packages + } + } + + boot_disk { + auto_delete = true + + initialize_params { + type = "pd-standard" + size = 10 + image = "https://www.googleapis.com/compute/v1/projects/cos-cloud/global/images/cos-stable-109-17800-147-54" + } + } + + # Initialize an Envoy's Ext Proc gRPC API based on a docker container + metadata = { + startup-script = <<-EOF1 + #! /bin/bash + apt-get update + apt-get install apache2 -y + a2ensite default-ssl + a2enmod ssl + echo "Page served from second backend service" | tee /var/www/html/index.html + systemctl restart apache2' + EOF1 + } + + lifecycle { + create_before_destroy = true + } + + deletion_protection = false + + depends_on = [ + google_compute_instance.callouts_instance + ] +} + +// callouts instance group 2 +resource "google_compute_instance_group" "callouts_instance_group_2" { + name = "tf-test-l7-ilb-callouts-ins-group-2%{random_suffix}" + description = "Terraform test instance group" + zone = "us-west1-a" + + instances = [ + google_compute_instance.callouts_instance_2.id, + ] + + named_port { + name = "http" + port = "80" + } + + named_port { + name = "grpc" + port = "443" + } + + depends_on = [ + google_compute_instance_group.callouts_instance_group + ] +} + +# callout health check 2 +resource "google_compute_region_health_check" "callouts_health_check_2" { + name = "tf-test-l7-ilb-callouts-hc-2%{random_suffix}" + region = "us-west1" + + http_health_check { + port = 80 + } + + depends_on = [ + google_compute_region_health_check.callouts_health_check + ] +} + +# callout backend service +resource "google_compute_region_backend_service" "callouts_backend_2" { + name = "tf-test-l7-ilb-callouts-backend-2%{random_suffix}" + region = "us-west1" + protocol = "HTTP2" + load_balancing_scheme = "INTERNAL_MANAGED" + timeout_sec = 10 + port_name = "grpc" + health_checks = [google_compute_region_health_check.callouts_health_check_2.id] + + backend { + group = google_compute_instance_group.callouts_instance_group_2.id + balancing_mode = "UTILIZATION" + capacity_scaler = 1.0 + } + + depends_on = [ + google_compute_region_backend_service.callouts_backend + ] +} +`, context) +} diff --git a/mmv1/third_party/terraform/services/pubsub/resource_pubsub_subscription_test.go b/mmv1/third_party/terraform/services/pubsub/resource_pubsub_subscription_test.go index 25529a550cc2..4ae710112c1c 100644 --- a/mmv1/third_party/terraform/services/pubsub/resource_pubsub_subscription_test.go +++ b/mmv1/third_party/terraform/services/pubsub/resource_pubsub_subscription_test.go @@ -206,6 +206,40 @@ func TestAccPubsubSubscriptionBigQuery_update(t *testing.T) { }) } +func TestAccPubsubSubscriptionCloudStorage_update(t *testing.T) { + t.Parallel() + + bucket := fmt.Sprintf("tf-test-bucket-%s", acctest.RandString(t, 10)) + topic := fmt.Sprintf("tf-test-topic-%s", acctest.RandString(t, 10)) + subscriptionShort := fmt.Sprintf("tf-test-sub-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckPubsubSubscriptionDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccPubsubSubscriptionCloudStorage_basic(bucket, topic, subscriptionShort, "", "", "", 0, ""), + }, + { + ResourceName: "google_pubsub_subscription.foo", + ImportStateId: subscriptionShort, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccPubsubSubscriptionCloudStorage_basic(bucket, topic, subscriptionShort, "pre-", "-suffix", "YYYY-MM-DD/hh_mm_ssZ", 1000, "300s"), + }, + { + ResourceName: "google_pubsub_subscription.foo", + ImportStateId: subscriptionShort, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + // Context: hashicorp/terraform-provider-google#4993 // This test makes a call to GET an subscription before it is actually created. // The PubSub API negative-caches responses so this tests we are @@ -459,6 +493,66 @@ resource "google_pubsub_subscription" "foo" { `, dataset, table, topic, subscription, useTableSchema) } +func testAccPubsubSubscriptionCloudStorage_basic(bucket, topic, subscription, filenamePrefix, filenameSuffix, filenameDatetimeFormat string, maxBytes int, maxDuration string) string { + filenamePrefixString := "" + if filenamePrefix != "" { + filenamePrefixString = fmt.Sprintf(`filename_prefix = "%s"`, filenamePrefix) + } + filenameSuffixString := "" + if filenameSuffix != "" { + filenameSuffixString = fmt.Sprintf(`filename_suffix = "%s"`, filenameSuffix) + } + filenameDatetimeString := "" + if filenameDatetimeFormat != "" { + filenameDatetimeString = fmt.Sprintf(`filename_datetime_format = "%s"`, filenameDatetimeFormat) + } + maxBytesString := "" + if maxBytes != 0 { + maxBytesString = fmt.Sprintf(`max_bytes = %d`, maxBytes) + } + maxDurationString := "" + if maxDuration != "" { + maxDurationString = fmt.Sprintf(`max_duration = "%s"`, maxDuration) + } + return fmt.Sprintf(` +data "google_project" "project" { } + +resource "google_storage_bucket_iam_member" "admin" { + bucket = google_storage_bucket.test.name + role = "roles/storage.admin" + member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-pubsub.iam.gserviceaccount.com" +} + +resource "google_storage_bucket" "test" { + name = "%s" + location = "US" +} + +resource "google_pubsub_topic" "foo" { + name = "%s" +} + +resource "google_pubsub_subscription" "foo" { + name = "%s" + topic = google_pubsub_topic.foo.id + + cloud_storage_config { + bucket = "${google_storage_bucket.test.name}" + %s + %s + %s + %s + %s + } + + depends_on = [ + google_storage_bucket.test, + google_storage_bucket_iam_member.admin, + ] +} +`, bucket, topic, subscription, filenamePrefixString, filenameSuffixString, filenameDatetimeString, maxBytesString, maxDurationString) +} + func testAccPubsubSubscription_topicOnly(topic string) string { return fmt.Sprintf(` resource "google_pubsub_topic" "foo" { diff --git a/mmv1/third_party/terraform/services/sql/resource_sql_database_instance_test.go b/mmv1/third_party/terraform/services/sql/resource_sql_database_instance_test.go index 8308b4ae7e28..e1026f891282 100644 --- a/mmv1/third_party/terraform/services/sql/resource_sql_database_instance_test.go +++ b/mmv1/third_party/terraform/services/sql/resource_sql_database_instance_test.go @@ -2383,7 +2383,7 @@ resource "google_sql_database_instance" "instance" { database_version = "%s" deletion_protection = false settings { - tier = "db-f1-micro" + tier = "db-g1-small" ip_configuration { ipv4_enabled = true require_ssl = %t @@ -2401,7 +2401,7 @@ resource "google_sql_database_instance" "mysql_pvp_instance_name" { database_version = "MYSQL_8_0" root_password = "abcABC123!" settings { - tier = "db-f1-micro" + tier = "db-g1-small" password_validation_policy { min_length = 6 complexity = "COMPLEXITY_DEFAULT" @@ -2421,7 +2421,7 @@ resource "google_sql_database_instance" "instance" { database_version = "MYSQL_5_7" deletion_protection = false settings { - tier = "db-f1-micro" + tier = "db-g1-small" } } ` @@ -2433,7 +2433,7 @@ resource "google_sql_database_instance" "instance" { database_version = "MYSQL_5_7" deletion_protection = false settings { - tier = "db-f1-micro" + tier = "db-g1-small" } } ` @@ -2445,7 +2445,7 @@ resource "google_sql_database_instance" "instance" { database_version = "MYSQL_8_0" deletion_protection = false settings { - tier = "db-f1-micro" + tier = "db-g1-small" } } ` @@ -2962,7 +2962,7 @@ resource "google_sql_database_instance" "instance" { database_version = "MYSQL_8_0" deletion_protection = false settings { - tier = "db-f1-micro" + tier = "db-g1-small" backup_configuration { enabled = true binary_log_enabled = true @@ -2989,7 +2989,7 @@ resource "google_sql_database_instance" "instance" { database_version = "MYSQL_8_0" deletion_protection = false settings { - tier = "db-f1-micro" + tier = "db-g1-small" ip_configuration { psc_config { psc_enabled = true @@ -3022,7 +3022,7 @@ resource "google_sql_database_instance" "instance" { database_version = "MYSQL_8_0" deletion_protection = false settings { - tier = "db-f1-micro" + tier = "db-g1-small" ip_configuration { psc_config { psc_enabled = true @@ -3055,7 +3055,7 @@ resource "google_sql_database_instance" "instance" { database_version = "MYSQL_8_0" deletion_protection = false settings { - tier = "db-f1-micro" + tier = "db-g1-small" ip_configuration { psc_config { psc_enabled = true @@ -3089,7 +3089,7 @@ resource "google_sql_database_instance" "instance" { database_version = "MYSQL_8_0" deletion_protection = false settings { - tier = "db-f1-micro" + tier = "db-g1-small" ip_configuration { psc_config { psc_enabled = true @@ -3160,7 +3160,7 @@ resource "google_sql_database_instance" "instance" { database_version = "MYSQL_5_7" deletion_protection = false settings { - tier = "db-f1-micro" + tier = "db-g1-small" ip_configuration { ipv4_enabled = "false" private_network = data.google_compute_network.servicenet.self_link @@ -3183,7 +3183,7 @@ resource "google_sql_database_instance" "instance" { database_version = "MYSQL_5_7" deletion_protection = false settings { - tier = "db-f1-micro" + tier = "db-g1-small" ip_configuration { ipv4_enabled = "false" private_network = data.google_compute_network.servicenet.self_link @@ -3206,7 +3206,7 @@ resource "google_sql_database_instance" "instance" { database_version = "MYSQL_5_7" deletion_protection = false settings { - tier = "db-f1-micro" + tier = "db-g1-small" ip_configuration { ipv4_enabled = "false" private_network = data.google_compute_network.servicenet.self_link @@ -3224,7 +3224,7 @@ resource "google_sql_database_instance" "replica1" { database_version = "MYSQL_5_7" deletion_protection = false settings { - tier = "db-f1-micro" + tier = "db-g1-small" ip_configuration { ipv4_enabled = "false" private_network = data.google_compute_network.servicenet.self_link @@ -3258,7 +3258,7 @@ resource "google_sql_database_instance" "instance" { database_version = "MYSQL_5_7" deletion_protection = false settings { - tier = "db-f1-micro" + tier = "db-g1-small" ip_configuration { ipv4_enabled = "false" private_network = data.google_compute_network.servicenet.self_link @@ -3298,7 +3298,7 @@ resource "google_sql_database_instance" "instance" { database_version = "MYSQL_5_7" deletion_protection = false settings { - tier = "db-f1-micro" + tier = "db-g1-small" ip_configuration { ipv4_enabled = "false" private_network = data.google_compute_network.servicenet.self_link @@ -3323,7 +3323,7 @@ resource "google_sql_database_instance" "clone1" { } settings { - tier = "db-f1-micro" + tier = "db-g1-small" backup_configuration { enabled = false } @@ -3339,7 +3339,7 @@ resource "google_sql_database_instance" "instance" { database_version = "MYSQL_5_7" deletion_protection = false settings { - tier = "db-f1-micro" + tier = "db-g1-small" location_preference { zone = "us-central1-f" } @@ -3370,7 +3370,7 @@ resource "google_sql_database_instance" "instance" { database_version = "MYSQL_5_7" deletion_protection = false settings { - tier = "db-f1-micro" + tier = "db-g1-small" availability_type = "REGIONAL" location_preference { zone = "us-central1-f" @@ -3405,7 +3405,7 @@ resource "google_sql_database_instance" "instance" { database_version = "MYSQL_5_7" deletion_protection = %s settings { - tier = "db-f1-micro" + tier = "db-g1-small" location_preference { zone = "us-central1-f" } @@ -3437,7 +3437,7 @@ resource "google_sql_database_instance" "instance" { deletion_protection = false maintenance_version = "MYSQL_5_7_37.R20210508.01_03" settings { - tier = "db-f1-micro" + tier = "db-g1-small" } } ` @@ -3450,7 +3450,7 @@ resource "google_sql_database_instance" "instance" { deletion_protection = false settings { deletion_protection_enabled = %s - tier = "db-f1-micro" + tier = "db-g1-small" } } ` @@ -3466,7 +3466,7 @@ resource "google_sql_database_instance" "instance" { database_version = "MYSQL_5_7" deletion_protection = false settings { - tier = "db-f1-micro" + tier = "db-g1-small" ip_configuration { ipv4_enabled = "false" private_network = google_compute_network.servicenet.self_link @@ -3553,7 +3553,7 @@ resource "google_sql_database_instance" "instance_master" { deletion_protection = false settings { - tier = "db-f1-micro" + tier = "db-g1-small" backup_configuration { enabled = true @@ -3571,7 +3571,7 @@ resource "google_sql_database_instance" "instance_slave" { master_instance_name = google_sql_database_instance.instance_master.name settings { - tier = "db-f1-micro" + tier = "db-g1-small" } } ` @@ -3584,7 +3584,7 @@ resource "google_sql_database_instance" "instance" { deletion_protection = false settings { - tier = "db-f1-micro" + tier = "db-g1-small" availability_type = "REGIONAL" @@ -3604,7 +3604,7 @@ resource "google_sql_database_instance" "instance" { deletion_protection = false settings { - tier = "db-f1-micro" + tier = "db-g1-small" disk_autoresize = true disk_autoresize_limit = 50 disk_size = 15 @@ -3621,7 +3621,7 @@ resource "google_sql_database_instance" "instance" { deletion_protection = false settings { - tier = "db-f1-micro" + tier = "db-g1-small" maintenance_window { day = 7 @@ -3639,7 +3639,7 @@ resource "google_sql_database_instance" "instance" { database_version = "MYSQL_5_7" deletion_protection = false settings { - tier = "db-f1-micro" + tier = "db-g1-small" ip_configuration { authorized_networks { @@ -3659,7 +3659,7 @@ resource "google_sql_database_instance" "instance" { database_version = "MYSQL_5_7" deletion_protection = false settings { - tier = "db-f1-micro" + tier = "db-g1-small" ip_configuration { ipv4_enabled = "true" @@ -3675,7 +3675,7 @@ resource "google_sql_database_instance" "instance" { database_version = "MYSQL_5_7" deletion_protection = false settings { - tier = "db-f1-micro" + tier = "db-g1-small" } } @@ -3699,7 +3699,7 @@ resource "google_sql_database_instance" "instance" { database_version = "MYSQL_5_7" deletion_protection = false settings { - tier = "db-f1-micro" + tier = "db-g1-small" user_labels = { track = "production" location = "western-division" @@ -3714,7 +3714,7 @@ resource "google_sql_database_instance" "instance" { database_version = "MYSQL_5_7" deletion_protection = false settings { - tier = "db-f1-micro" + tier = "db-g1-small" user_labels = { track = "production" } @@ -3730,7 +3730,7 @@ resource "google_sql_database_instance" "instance" { deletion_protection = false settings { - tier = "db-f1-micro" + tier = "db-g1-small" insights_config { query_insights_enabled = true @@ -3923,7 +3923,7 @@ resource "google_sql_database_instance" "instance" { database_version = "MYSQL_8_0" deletion_protection = false settings { - tier = "db-f1-micro" + tier = "db-g1-small" backup_configuration { enabled = true start_time = "00:00" @@ -3946,7 +3946,7 @@ resource "google_sql_database_instance" "instance" { region = "us-central1" settings { - tier = "db-f1-micro" + tier = "db-g1-small" backup_configuration { enabled = "false" } @@ -3965,7 +3965,7 @@ resource "google_sql_database_instance" "instance" { region = "us-central1" settings { - tier = "db-f1-micro" + tier = "db-g1-small" backup_configuration { enabled = "false" } @@ -4026,7 +4026,7 @@ resource "google_sql_database_instance" "instance" { region = "us-central1" settings { - tier = "db-f1-micro" + tier = "db-g1-small" backup_configuration { enabled = false } @@ -4159,7 +4159,7 @@ resource "google_sql_database_instance" "instance" { database_version = "MYSQL_5_7" deletion_protection = false settings { - tier = "db-f1-micro" + tier = "db-g1-small" database_flags { name = "character_set_server" @@ -4181,7 +4181,7 @@ resource "google_sql_database_instance" "instance" { database_version = "MYSQL_5_7" deletion_protection = false settings { - tier = "db-f1-micro" + tier = "db-g1-small" database_flags { name = "auto_increment_increment" @@ -4207,7 +4207,7 @@ resource "google_sql_database_instance" "master" { disk_autoresize = true disk_size = 10 disk_type = "PD_SSD" - tier = "db-f1-micro" + tier = "db-g1-small" activation_policy = "ALWAYS" pricing_plan = "PER_USE" @@ -4238,7 +4238,7 @@ resource "google_sql_database_instance" "replica" { failover_target = false } settings { - tier = "db-f1-micro" + tier = "db-g1-small" availability_type = "ZONAL" pricing_plan = "PER_USE" disk_autoresize = true @@ -4278,7 +4278,7 @@ resource "google_sql_database_instance" "master" { disk_autoresize = true disk_size = 10 disk_type = "PD_SSD" - tier = "db-f1-micro" + tier = "db-g1-small" activation_policy = "ALWAYS" pricing_plan = "PER_USE" @@ -4309,7 +4309,7 @@ resource "google_sql_database_instance" "replica" { failover_target = false } settings { - tier = "db-f1-micro" + tier = "db-g1-small" availability_type = "ZONAL" pricing_plan = "PER_USE" disk_autoresize = true @@ -4359,7 +4359,7 @@ resource "google_sql_database_instance" "instance" { database_version = "%s" deletion_protection = %t settings { - tier = "db-f1-micro" + tier = "db-g1-small" activation_policy = "%s" } } diff --git a/mmv1/third_party/terraform/services/storage/resource_storage_bucket.go.erb b/mmv1/third_party/terraform/services/storage/resource_storage_bucket.go.erb index e5cb82ebbe90..3fcf25a12c99 100644 --- a/mmv1/third_party/terraform/services/storage/resource_storage_bucket.go.erb +++ b/mmv1/third_party/terraform/services/storage/resource_storage_bucket.go.erb @@ -331,6 +331,14 @@ func ResourceStorageBucket() *schema.Resource { return true } } + if new == "0" && old == "1" { + n := d.Get(strings.TrimSuffix(k, ".#")) + l = n.([]interface{}) + contents := l[0].(map[string]interface{}) + if contents["enabled"] == false { + return true + } + } return false }, }, @@ -754,6 +762,11 @@ func resourceStorageBucketUpdate(d *schema.ResourceData, meta interface{}) error if d.HasChange("autoclass") { if v, ok := d.GetOk("autoclass"); ok { sb.Autoclass = expandBucketAutoclass(v) + } else { + sb.Autoclass = &storage.BucketAutoclass{ + Enabled: false, + ForceSendFields: []string{"Enabled"}, + } } } diff --git a/mmv1/third_party/terraform/services/storage/resource_storage_bucket_test.go.erb b/mmv1/third_party/terraform/services/storage/resource_storage_bucket_test.go.erb index e41e5bb47f0a..5ebb36dc255f 100644 --- a/mmv1/third_party/terraform/services/storage/resource_storage_bucket_test.go.erb +++ b/mmv1/third_party/terraform/services/storage/resource_storage_bucket_test.go.erb @@ -109,6 +109,73 @@ func TestAccStorageBucket_basicWithAutoclass(t *testing.T) { }) } +func TestAccStorageBucket_AutoclassDiffSupress(t *testing.T) { + t.Parallel() + + var bucket storage.Bucket + bucketName := acctest.TestBucketName(t) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccStorageBucketDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccStorageBucket_basic(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + t, "google_storage_bucket.bucket", bucketName, &bucket), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + { + Config: testAccStorageBucket_basicWithAutoclass(bucketName,false), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + t, "google_storage_bucket.bucket", bucketName, &bucket), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + { + Config: testAccStorageBucket_basicWithAutoclass(bucketName,true), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + t, "google_storage_bucket.bucket", bucketName, &bucket), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + { + Config: testAccStorageBucket_basic(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + t, "google_storage_bucket.bucket", bucketName, &bucket), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + }, + }) +} + func TestAccStorageBucket_requesterPays(t *testing.T) { t.Parallel() diff --git a/mmv1/third_party/terraform/services/storagetransfer/resource_storage_transfer_job.go.erb b/mmv1/third_party/terraform/services/storagetransfer/resource_storage_transfer_job.go.erb index 0a955d9f11b3..d593658c30b5 100644 --- a/mmv1/third_party/terraform/services/storagetransfer/resource_storage_transfer_job.go.erb +++ b/mmv1/third_party/terraform/services/storagetransfer/resource_storage_transfer_job.go.erb @@ -1062,10 +1062,9 @@ func flattenAwsS3Data(awsS3Data *storagetransfer.AwsS3Data, d *schema.ResourceDa "path": awsS3Data.Path, "role_arn": awsS3Data.RoleArn, } - if awsS3Data.AwsAccessKey != nil { + if _, exist := d.GetOkExists("transfer_spec.0.aws_s3_data_source.0.aws_access_key"); exist{ data["aws_access_key"] = flattenAwsAccessKeys(d) } - return []map[string]interface{}{data} } diff --git a/mmv1/third_party/terraform/website/docs/r/bigquery_table.html.markdown b/mmv1/third_party/terraform/website/docs/r/bigquery_table.html.markdown index dc0c5df595c4..8f455d59a552 100644 --- a/mmv1/third_party/terraform/website/docs/r/bigquery_table.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/bigquery_table.html.markdown @@ -167,6 +167,17 @@ in Terraform state, a `terraform destroy` or `terraform apply` that would delete * `table_constraints` - (Optional) Defines the primary key and foreign keys. Structure is [documented below](#nested_table_constraints). +* `resource_tags` - (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)) + This field is in beta. The tags attached to this table. Tag keys are + globally unique. Tag key is expected to be in the namespaced format, for + example "123456789012/environment" where 123456789012 is the ID of the + parent organization or project resource for this tag key. Tag value is + expected to be the short name, for example "Production". + +* `allow_resource_tags_on_deletion` - (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)) + This field is in beta. Whether or not to allow table deletion when there are + still resource tags attached. The default value is false. + The `external_data_configuration` block supports: * `autodetect` - (Required) - Let BigQuery try to autodetect the schema @@ -187,6 +198,9 @@ in Terraform state, a `terraform destroy` or `terraform apply` that would delete * `csv_options` (Optional) - Additional properties to set if `source_format` is set to "CSV". Structure is [documented below](#nested_csv_options). +* `bigtable_options` (Optional) - Additional properties to set if + `source_format` is set to "BIGTABLE". Structure is [documented below](#nested_bigtable_options). + * `json_options` (Optional) - Additional properties to set if `source_format` is set to "JSON". Structure is [documented below](#nested_json_options). @@ -276,6 +290,30 @@ in Terraform state, a `terraform destroy` or `terraform apply` that would delete * `skip_leading_rows` (Optional) - The number of rows at the top of a CSV file that BigQuery will skip when reading the data. +The `bigtable_options` block supports: + +* `column_family` (Optional) - A list of column families to expose in the table schema along with their types. This list restricts the column families that can be referenced in queries and specifies their value types. You can use this list to do type conversions - see the 'type' field for more details. If you leave this list empty, all column families are present in the table schema and their values are read as BYTES. During a query only the column families referenced in that query are read from Bigtable. Structure is [documented below](#nested_column_family). +* `ignore_unspecified_column_families` (Optional) - If field is true, then the column families that are not specified in columnFamilies list are not exposed in the table schema. Otherwise, they are read with BYTES type values. The default value is false. +* `read_rowkey_as_string` (Optional) - If field is true, then the rowkey column families will be read and converted to string. Otherwise they are read with BYTES type values and users need to manually cast them with CAST if necessary. The default value is false. +* `output_column_families_as_json` (Optional) - If field is true, then each column family will be read as a single JSON column. Otherwise they are read as a repeated cell structure containing timestamp/value tuples. The default value is false. + +The `column_family` block supports: + +* `column` (Optional) - A List of columns that should be exposed as individual fields as opposed to a list of (column name, value) pairs. All columns whose qualifier matches a qualifier in this list can be accessed as Other columns can be accessed as a list through column field. Structure is [documented below](#nested_column). +* `family_id` (Optional) - Identifier of the column family. +* `type` (Optional) - The type to convert the value in cells of this column family. The values are expected to be encoded using HBase Bytes.toBytes function when using the BINARY encoding value. Following BigQuery types are allowed (case-sensitive): "BYTES", "STRING", "INTEGER", "FLOAT", "BOOLEAN", "JSON". Default type is BYTES. This can be overridden for a specific column by listing that column in 'columns' and specifying a type for it. +* `encoding` (Optional) - The encoding of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. This can be overridden for a specific column by listing that column in 'columns' and specifying an encoding for it. +* `only_read_latest` (Optional) - If this is set only the latest version of value are exposed for all columns in this column family. This can be overridden for a specific column by listing that column in 'columns' and specifying a different setting for that column. + +The `column` block supports: + +* `qualifier_encoded` (Optional) - Qualifier of the column. Columns in the parent column family that has this exact qualifier are exposed as . field. If the qualifier is valid UTF-8 string, it can be specified in the qualifierString field. Otherwise, a base-64 encoded value must be set to qualifierEncoded. The column field name is the same as the column qualifier. However, if the qualifier is not a valid BigQuery field identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as fieldName. +* `qualifier_string` (Optional) - Qualifier string. +* `field_name` (Optional) - If the qualifier is not a valid BigQuery field identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as the column field name and is used as field name in queries. +* `type` (Optional) - The type to convert the value in cells of this column. The values are expected to be encoded using HBase Bytes.toBytes function when using the BINARY encoding value. Following BigQuery types are allowed (case-sensitive): "BYTES", "STRING", "INTEGER", "FLOAT", "BOOLEAN", "JSON", Default type is "BYTES". 'type' can also be set at the column family level. However, the setting at this level takes precedence if 'type' is set at both levels. +* `encoding` (Optional) - The encoding of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. 'encoding' can also be set at the column family level. However, the setting at this level takes precedence if 'encoding' is set at both levels. +* `only_read_latest` (Optional) - If this is set, only the latest version of value in this column are exposed. 'onlyReadLatest' can also be set at the column family level. However, the setting at this level takes precedence if 'onlyReadLatest' is set at both levels. + The `json_options` block supports: * `encoding` (Optional) - The character encoding of the data. The supported values are UTF-8, UTF-16BE, UTF-16LE, UTF-32BE, and UTF-32LE. The default value is UTF-8. diff --git a/mmv1/third_party/terraform/website/docs/r/composer_user_workloads_secret.html.markdown b/mmv1/third_party/terraform/website/docs/r/composer_user_workloads_secret.html.markdown index bf1dd46340d4..797798867148 100644 --- a/mmv1/third_party/terraform/website/docs/r/composer_user_workloads_secret.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/composer_user_workloads_secret.html.markdown @@ -83,7 +83,7 @@ Secret can be imported using any of these accepted formats: * `projects/{{project}}/locations/{{region}}/environments/{{environment}}/userWorkloadsSecrets/{{name}}` * `{{project}}/{{region}}/{{environment}}/{{name}}` -* `{{name}}` +* `{{environment}}/{{name}}` In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import User Workloads Secret using one of the formats above. For example: @@ -99,5 +99,5 @@ When using the [`terraform import` command](https://developer.hashicorp.com/terr ``` $ terraform import google_composer_user_workloads_secret.example projects/{{project}}/locations/{{region}}/environments/{{environment}}/userWorkloadsSecrets/{{name}} $ terraform import google_composer_user_workloads_secret.example {{project}}/{{region}}/{{environment}}/{{name}} -$ terraform import google_composer_user_workloads_secret.example {{name}} +$ terraform import google_composer_user_workloads_secret.example {{environment}}/{{name}} ``` diff --git a/mmv1/third_party/terraform/website/docs/r/compute_instance.html.markdown b/mmv1/third_party/terraform/website/docs/r/compute_instance.html.markdown index 9c7c41e1422f..218fe12cdf8e 100644 --- a/mmv1/third_party/terraform/website/docs/r/compute_instance.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/compute_instance.html.markdown @@ -428,17 +428,11 @@ specified, then this instance will have no external IPv6 Internet access. Struct * `instance_termination_action` - (Optional) Describe the type of termination action for VM. Can be `STOP` or `DELETE`. Read more on [here](https://cloud.google.com/compute/docs/instances/create-use-spot) -* `max_run_duration` - (Optional) [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html) The duration of the instance. Instance will run and be terminated after then, the termination action could be defined in `instance_termination_action`. Only support `DELETE` `instance_termination_action` at this point. Structure is [documented below](#nested_max_run_duration). -The `max_run_duration` block supports: +* `max_run_duration` - (Optional) [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html) The duration of the instance. Instance will run and be terminated after then, the termination action could be defined in `instance_termination_action`. Structure is [documented below](#nested_max_run_duration). -* `nanos` - (Optional) Span of time that's a fraction of a second at nanosecond - resolution. Durations less than one second are represented with a 0 - `seconds` field and a positive `nanos` field. Must be from 0 to - 999,999,999 inclusive. -* `seconds` - (Required) Span of time at a resolution of a second. Must be from 0 to - 315,576,000,000 inclusive. Note: these bounds are computed from: 60 - sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years. +* `on_instance_stop_action` - (Optional) [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html) Specifies the action to be performed when the instance is terminated using `max_run_duration` and `STOP` `instance_termination_action`. Only support `true` `discard_local_ssd` at this point. Structure is [documented below](#nested_on_instance_stop_action). + * `maintenance_interval` - (Optional) [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html) Specifies the frequency of planned maintenance events. The accepted values are: `PERIODIC`. @@ -454,6 +448,21 @@ specified, then this instance will have no external IPv6 Internet access. Struct 315,576,000,000 inclusive. Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years. +The `max_run_duration` block supports: + +* `nanos` - (Optional) Span of time that's a fraction of a second at nanosecond + resolution. Durations less than one second are represented with a 0 + `seconds` field and a positive `nanos` field. Must be from 0 to + 999,999,999 inclusive. + +* `seconds` - (Required) Span of time at a resolution of a second. Must be from 0 to + 315,576,000,000 inclusive. Note: these bounds are computed from: 60 + sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years. + +The `on_instance_stop_action` block supports: + +* `discard_local_ssd` - (Optional) [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html) Whether to discard local SSDs attached to the VM while terminating using `max_run_duration`. Only supports `true` at this point. + The `guest_accelerator` block supports: * `type` (Required) - The accelerator type resource to expose to this instance. E.g. `nvidia-tesla-k80`. diff --git a/mmv1/third_party/terraform/website/docs/r/compute_instance_template.html.markdown b/mmv1/third_party/terraform/website/docs/r/compute_instance_template.html.markdown index 8700dbdffaf0..d0f6f6c16eb2 100644 --- a/mmv1/third_party/terraform/website/docs/r/compute_instance_template.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/compute_instance_template.html.markdown @@ -587,8 +587,14 @@ specified, then this instance will have no external IPv6 Internet access. Struct * `instance_termination_action` - (Optional) Describe the type of termination action for `SPOT` VM. Can be `STOP` or `DELETE`. Read more on [here](https://cloud.google.com/compute/docs/instances/create-use-spot) -* `max_run_duration` - (Optional) [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html) The duration of the instance. Instance will run and be terminated after then, the termination action could be defined in `instance_termination_action`. Only support `DELETE` `instance_termination_action` at this point. Structure is [documented below](#nested_max_run_duration). -The `max_run_duration` block supports: +* `max_run_duration` - (Optional) [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html) The duration of the instance. Instance will run and be terminated after then, the termination action could be defined in `instance_termination_action`. Structure is [documented below](#nested_max_run_duration). + +* `on_instance_stop_action` - (Optional) [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html) Specifies the action to be performed when the instance is terminated using `max_run_duration` and `STOP` `instance_termination_action`. Only support `true` `discard_local_ssd` at this point. Structure is [documented below](#nested_on_instance_stop_action). + +* `maintenance_interval` - (Optional) [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html) Specifies the frequency of planned maintenance events. The accepted values are: `PERIODIC`. + +* `local_ssd_recovery_timeout` - (Optional) (https://terraform.io/docs/providers/google/guides/provider_versions.html) Specifies the maximum amount of time a Local Ssd Vm should wait while recovery of the Local Ssd state is attempted. Its value should be in between 0 and 168 hours with hour granularity and the default value being 1 hour. Structure is [documented below](#nested_local_ssd_recovery_timeout). +The `local_ssd_recovery_timeout` block supports: * `nanos` - (Optional) Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are represented with a 0 @@ -599,20 +605,21 @@ specified, then this instance will have no external IPv6 Internet access. Struct 315,576,000,000 inclusive. Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years. -* `maintenance_interval` - (Optional) [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html) Specifies the frequency of planned maintenance events. The accepted values are: `PERIODIC`. - -* `local_ssd_recovery_timeout` - (Optional) (https://terraform.io/docs/providers/google/guides/provider_versions.html) Specifies the maximum amount of time a Local Ssd Vm should wait while recovery of the Local Ssd state is attempted. Its value should be in between 0 and 168 hours with hour granularity and the default value being 1 hour. Structure is [documented below](#nested_local_ssd_recovery_timeout). -The `local_ssd_recovery_timeout` block supports: +The `max_run_duration` block supports: * `nanos` - (Optional) Span of time that's a fraction of a second at nanosecond - resolution. Durations less than one second are represented with a 0 - `seconds` field and a positive `nanos` field. Must be from 0 to - 999,999,999 inclusive. + resolution. Durations less than one second are represented with a 0 + `seconds` field and a positive `nanos` field. Must be from 0 to + 999,999,999 inclusive. * `seconds` - (Required) Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years. +The `on_instance_stop_action` block supports: + +* `discard_local_ssd` - (Optional) [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html) Whether to discard local SSDs attached to the VM while terminating using `max_run_duration`. Only supports `true` at this point. + The `guest_accelerator` block supports: * `type` (Required) - The accelerator type resource to expose to this instance. E.g. `nvidia-tesla-k80`. diff --git a/mmv1/third_party/tgc/tests/data/example_alloydb_instance.json b/mmv1/third_party/tgc/tests/data/example_alloydb_instance.json new file mode 100644 index 000000000000..3cb342f850e4 --- /dev/null +++ b/mmv1/third_party/tgc/tests/data/example_alloydb_instance.json @@ -0,0 +1,41 @@ +[ + { + "name":"//alloydb.googleapis.com/alloydb-cluster/instances/alloydb-instance", + "asset_type":"alloydb.googleapis.com/Instance", + "resource":{ + "version":"v1beta", + "discovery_document_uri":"https://www.googleapis.com/discovery/v1/apis/alloydb/v1beta/rest", + "discovery_name":"Instance", + "parent":"//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", + "data":{ + "instanceType":"PRIMARY", + "machineConfig":{ + "cpuCount":2 + } + } + }, + "ancestry_path": "{{.Ancestry}}/project/{{.Provider.project}}", + "ancestors": ["organizations/{{.OrgID}}"] + + + }, + { + "name":"//alloydb.googleapis.com/projects/{{.Provider.project}}/locations/us-central1/clusters/alloydb-cluster", + "asset_type":"alloydb.googleapis.com/Cluster", + "resource":{ + "version":"v1beta", + "discovery_document_uri":"https://www.googleapis.com/discovery/v1/apis/alloydb/v1beta/rest", + "discovery_name":"Cluster", + "parent":"//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", + "data":{ + "clusterType":"PRIMARY", + "initialUser":{ + "password":"alloydb-cluster" + }, + "network":"default" + } + }, + "ancestry_path": "{{.Ancestry}}/project/{{.Provider.project}}", + "ancestors": ["organizations/{{.OrgID}}"] + } +] diff --git a/mmv1/third_party/tgc/tests/data/example_alloydb_instance.tf b/mmv1/third_party/tgc/tests/data/example_alloydb_instance.tf new file mode 100644 index 000000000000..3295f6ecaa50 --- /dev/null +++ b/mmv1/third_party/tgc/tests/data/example_alloydb_instance.tf @@ -0,0 +1,33 @@ +terraform { + required_providers { + google = { + source = "hashicorp/google-beta" + version = "~> {{.Provider.version}}" + } + } +} + +provider "google" { + {{if .Provider.credentials }}credentials = "{{.Provider.credentials}}"{{end}} +} + + +resource "google_alloydb_cluster" "default" { + cluster_id = "alloydb-cluster" + location = "us-central1" + network = "default" + + initial_user { + password = "alloydb-cluster" + } +} + +resource "google_alloydb_instance" "default" { + cluster = google_alloydb_cluster.default.cluster_id + instance_id = "alloydb-instance" + instance_type = "PRIMARY" + + machine_config { + cpu_count = 2 + } +} diff --git a/mmv1/third_party/tgc/tests/data/example_google_datastream_private_connection.json b/mmv1/third_party/tgc/tests/data/example_google_datastream_private_connection.json new file mode 100644 index 000000000000..0e5276318617 --- /dev/null +++ b/mmv1/third_party/tgc/tests/data/example_google_datastream_private_connection.json @@ -0,0 +1,40 @@ +[ + { + "name": "//compute.googleapis.com/projects/{{.Provider.project}}/global/networks/pc-network", + "asset_type": "compute.googleapis.com/Network", + "resource": { + "version": "beta", + "discovery_document_uri": "https://www.googleapis.com/discovery/v1/apis/compute/beta/rest", + "discovery_name": "Network", + "parent": "//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", + "data": { + "autoCreateSubnetworks": true, + "name": "pc-network", + "networkFirewallPolicyEnforcementOrder": "AFTER_CLASSIC_FIREWALL" + } + }, + "ancestors": ["organizations/{{.OrgID}}"], + "ancestry_path": "{{.Ancestry}}/project/{{.Provider.project}}" + }, + { + "name": "//datastream.googleapis.com/projects/{{.Provider.project}}/locations/us-central1/privateConnections/pc-connection", + "asset_type": "datastream.googleapis.com/PrivateConnection", + "resource": { + "version": "v1", + "discovery_document_uri": "https://www.googleapis.com/discovery/v1/apis/datastream/v1/rest", + "discovery_name": "PrivateConnection", + "parent": "//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", + "data": { + "displayName": "Connection profile", + "labels": { + "key": "value" + }, + "vpcPeeringConfig": { + "subnet": "10.0.0.0/29" + } + } + }, + "ancestors": ["organizations/{{.OrgID}}"], + "ancestry_path": "{{.Ancestry}}/project/{{.Provider.project}}" + } +] \ No newline at end of file diff --git a/mmv1/third_party/tgc/tests/data/example_google_datastream_private_connection.tf b/mmv1/third_party/tgc/tests/data/example_google_datastream_private_connection.tf new file mode 100644 index 000000000000..5bae95941921 --- /dev/null +++ b/mmv1/third_party/tgc/tests/data/example_google_datastream_private_connection.tf @@ -0,0 +1,31 @@ +terraform { + required_providers { + google = { + source = "hashicorp/google-beta" + version = "~> {{.Provider.version}}" + } + } +} + +provider "google" { + {{if .Provider.credentials }}credentials = "{{.Provider.credentials}}"{{end}} +} + +resource "google_datastream_private_connection" "default" { + display_name = "Connection profile" + location = "us-central1" + private_connection_id = "pc-connection" + + labels = { + key = "value" + } + + vpc_peering_config { + vpc = google_compute_network.default.id + subnet = "10.0.0.0/29" + } +} + +resource "google_compute_network" "default" { + name = "pc-network" +} \ No newline at end of file