diff --git a/.ci/magician/cmd/membership_checker.go b/.ci/magician/cmd/membership_checker.go index bc37d0c7234d..4af9c95b81bc 100644 --- a/.ci/magician/cmd/membership_checker.go +++ b/.ci/magician/cmd/membership_checker.go @@ -97,41 +97,6 @@ func execMembershipChecker(prNumber, commitSha, branchName, headRepoUrl, headBra authorUserType := gh.GetUserType(author) trusted := authorUserType == github.CoreContributorUserType || authorUserType == github.GooglerUserType - if authorUserType != github.CoreContributorUserType { - fmt.Println("Not core contributor - assigning reviewer") - - requestedReviewers, err := gh.GetPullRequestRequestedReviewers(prNumber) - if err != nil { - fmt.Println(err) - os.Exit(1) - } - - previousReviewers, err := gh.GetPullRequestPreviousReviewers(prNumber) - if err != nil { - fmt.Println(err) - os.Exit(1) - } - - reviewersToRequest, newPrimaryReviewer := github.ChooseCoreReviewers(requestedReviewers, previousReviewers) - - for _, reviewer := range reviewersToRequest { - err = gh.RequestPullRequestReviewer(prNumber, reviewer) - if err != nil { - fmt.Println(err) - os.Exit(1) - } - } - - if newPrimaryReviewer != "" { - comment := github.FormatReviewerComment(newPrimaryReviewer, authorUserType, trusted) - err = gh.PostComment(prNumber, comment) - if err != nil { - fmt.Println(err) - os.Exit(1) - } - } - } - // auto_run(contributor-membership-checker) will be run on every commit or /gcbrun: // only triggers builds for trusted users if trusted { diff --git a/.ci/magician/cmd/membership_checker_test.go b/.ci/magician/cmd/membership_checker_test.go index 93b3805d29f4..a3a40b4b89bd 100644 --- a/.ci/magician/cmd/membership_checker_test.go +++ b/.ci/magician/cmd/membership_checker_test.go @@ -18,7 +18,6 @@ package cmd import ( "magician/github" "reflect" - "regexp" "testing" ) @@ -78,20 +77,7 @@ func TestExecMembershipChecker_GooglerFlow(t *testing.T) { execMembershipChecker("pr1", "sha1", "branch1", "url1", "head1", "base1", gh, cb) - method := "RequestPullRequestReviewer" - if calls, ok := gh.calledMethods[method]; !ok { - t.Fatal("Review wasn't requested for googler") - } else if len(calls) != 1 { - t.Fatalf("Wrong number of calls for %s, got %d, expected 1", method, len(calls)) - } else if params := calls[0]; len(params) != 2 { - t.Fatalf("Wrong number of params for %s, got %d, expected 2", method, len(params)) - } else if param := params[0]; param != "pr1" { - t.Fatalf("Wrong first param for %s, got %v, expected pr1", method, param) - } else if param := params[1]; !github.IsTeamReviewer(param.(string)) { - t.Fatalf("Wrong second param for %s, got %v, expected a team reviewer", method, param) - } - - method = "TriggerMMPresubmitRuns" + method := "TriggerMMPresubmitRuns" expected := [][]any{{"sha1", map[string]string{"BRANCH_NAME": "branch1", "_BASE_BRANCH": "base1", "_HEAD_BRANCH": "head1", "_HEAD_REPO_URL": "url1", "_PR_NUMBER": "pr1"}}} if calls, ok := cb.calledMethods[method]; !ok { t.Fatal("Presubmit runs not triggered for googler") @@ -126,20 +112,7 @@ func TestExecMembershipChecker_AmbiguousUserFlow(t *testing.T) { execMembershipChecker("pr1", "sha1", "branch1", "url1", "head1", "base1", gh, cb) - method := "RequestPullRequestReviewer" - if calls, ok := gh.calledMethods[method]; !ok { - t.Fatal("Review wasn't requested for ambiguous user") - } else if len(calls) != 1 { - t.Fatalf("Wrong number of calls for %s, got %d, expected 1", method, len(calls)) - } else if params := calls[0]; len(params) != 2 { - t.Fatalf("Wrong number of params for %s, got %d, expected 2", method, len(params)) - } else if param := params[0]; param != "pr1" { - t.Fatalf("Wrong first param for %s, got %v, expected pr1", method, param) - } else if param := params[1]; !github.IsTeamReviewer(param.(string)) { - t.Fatalf("Wrong second param for %s, got %v, expected a team reviewer", method, param) - } - - method = "AddLabel" + method := "AddLabel" expected := [][]any{{"pr1", "awaiting-approval"}} if calls, ok := gh.calledMethods[method]; !ok { t.Fatal("Label wasn't posted to pull request") @@ -181,33 +154,4 @@ func TestExecMembershipChecker_CommentForNewPrimaryReviewer(t *testing.T) { } execMembershipChecker("pr1", "sha1", "branch1", "url1", "head1", "base1", gh, cb) - - method := "RequestPullRequestReviewer" - if calls, ok := gh.calledMethods[method]; !ok { - t.Fatal("Review wasn't requested for googler") - } else if len(calls) != 1 { - t.Fatalf("Wrong number of calls for %s, got %d, expected 1", method, len(calls)) - } else if params := calls[0]; len(params) != 2 { - t.Fatalf("Wrong number of params for %s, got %d, expected 2", method, len(params)) - } else if param := params[0]; param != "pr1" { - t.Fatalf("Wrong first param for %s, got %v, expected pr1", method, param) - } else if param := params[1]; !github.IsTeamReviewer(param.(string)) { - t.Fatalf("Wrong second param for %s, got %v, expected a team reviewer", method, param) - } - - method = "PostComment" - reviewerExp := regexp.MustCompile(`@(.*?),`) - if calls, ok := gh.calledMethods[method]; !ok { - t.Fatal("Comment wasn't posted stating user status") - } else if len(calls) != 1 { - t.Fatalf("Wrong number of calls for %s, got %d, expected 1", method, len(calls)) - } else if params := calls[0]; len(params) != 2 { - t.Fatalf("Wrong number of params for %s, got %d, expected 2", method, len(params)) - } else if param := params[0]; param != "pr1" { - t.Fatalf("Wrong first param for %s, got %v, expected pr1", method, param) - } else if param, ok := params[1].(string); !ok { - t.Fatalf("Got non-string second param for %s", method) - } else if submatches := reviewerExp.FindStringSubmatch(param); len(submatches) != 2 || !github.IsTeamReviewer(submatches[1]) { - t.Fatalf("%s called without a team reviewer (found %v) in the comment: %s", method, submatches, param) - } } diff --git a/.ci/magician/cmd/request_reviewer.go b/.ci/magician/cmd/request_reviewer.go new file mode 100644 index 000000000000..b5deed4e828e --- /dev/null +++ b/.ci/magician/cmd/request_reviewer.go @@ -0,0 +1,102 @@ +/* +* Copyright 2024 Google LLC. All Rights Reserved. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. + */ +package cmd + +import ( + "fmt" + "magician/github" + "os" + + "github.com/spf13/cobra" +) + +// requestReviewerCmd represents the requestReviewer command +var requestReviewerCmd = &cobra.Command{ + Use: "request-reviewer", + Short: "Assigns and re-requests reviewers", + Long: `This command automatically requests (or re-requests) core contributor reviews for a PR based on whether the user is a core contributor. + + The command expects the following pull request details as arguments: + 1. PR Number + 2. Commit SHA + 3. Branch Name + 4. Head Repo URL + 5. Head Branch + 6. Base Branch + + It then performs the following operations: + 1. Determines the author of the pull request + 2. If the author is not a core contributor: + a. Identifies the initially requested reviewer and those who previously reviewed this PR. + b. Determines and requests reviewers based on the above. + c. As appropriate, posts a welcome comment on the PR. + `, + Args: cobra.ExactArgs(1), + Run: func(cmd *cobra.Command, args []string) { + prNumber := args[0] + fmt.Println("PR Number: ", prNumber) + gh := github.NewClient() + execRequestReviewer(prNumber, gh) + }, +} + +func execRequestReviewer(prNumber string, gh GithubClient) { + pullRequest, err := gh.GetPullRequest(prNumber) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + + author := pullRequest.User.Login + if !github.IsCoreContributor(author) { + fmt.Println("Not core contributor - assigning reviewer") + + requestedReviewers, err := gh.GetPullRequestRequestedReviewers(prNumber) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + + previousReviewers, err := gh.GetPullRequestPreviousReviewers(prNumber) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + + reviewersToRequest, newPrimaryReviewer := github.ChooseCoreReviewers(requestedReviewers, previousReviewers) + + for _, reviewer := range reviewersToRequest { + err = gh.RequestPullRequestReviewer(prNumber, reviewer) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + } + + if newPrimaryReviewer != "" { + comment := github.FormatReviewerComment(newPrimaryReviewer) + err = gh.PostComment(prNumber, comment) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + } + } +} + +func init() { + rootCmd.AddCommand(requestReviewerCmd) +} diff --git a/.ci/magician/cmd/request_reviewer_test.go b/.ci/magician/cmd/request_reviewer_test.go new file mode 100644 index 000000000000..d3daa08960f4 --- /dev/null +++ b/.ci/magician/cmd/request_reviewer_test.go @@ -0,0 +1,103 @@ +/* +* Copyright 2024 Google LLC. All Rights Reserved. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. + */ + +package cmd + +import ( + "github.com/stretchr/testify/assert" + "magician/github" + "testing" +) + +func TestExecRequestReviewer(t *testing.T) { + availableReviewers := github.AvailableReviewers() + cases := map[string]struct { + pullRequest github.PullRequest + requestedReviewers []string + previousReviewers []string + teamMembers map[string][]string + expectSpecificReviewers []string + expectReviewersFromList []string + }{ + "core contributor author doesn't get a new reviewer, re-request, or comment with no previous reviewers": { + pullRequest: github.PullRequest{ + User: github.User{Login: availableReviewers[0]}, + }, + expectSpecificReviewers: []string{}, + }, + "core contributor author doesn't get a new reviewer, re-request, or comment with previous reviewers": { + pullRequest: github.PullRequest{ + User: github.User{Login: availableReviewers[0]}, + }, + previousReviewers: []string{availableReviewers[1]}, + expectSpecificReviewers: []string{}, + }, + "non-core-contributor author gets a new reviewer with no previous reviewers": { + pullRequest: github.PullRequest{ + User: github.User{Login: "author"}, + }, + expectReviewersFromList: availableReviewers, + }, + "non-core-contributor author doesn't get a new reviewer (but does get re-request) with previous reviewers": { + pullRequest: github.PullRequest{ + User: github.User{Login: "author"}, + }, + previousReviewers: []string{availableReviewers[1], "author2", availableReviewers[2]}, + expectSpecificReviewers: []string{availableReviewers[1], availableReviewers[2]}, + }, + "non-core-contributor author doesn't get a new reviewer or a re-request with already-requested reviewers": { + pullRequest: github.PullRequest{ + User: github.User{Login: "author"}, + }, + requestedReviewers: []string{availableReviewers[1], "author2", availableReviewers[2]}, + expectSpecificReviewers: []string{}, + }, + } + for tn, tc := range cases { + t.Run(tn, func(t *testing.T) { + requestedReviewers := []github.User{} + for _, login := range tc.requestedReviewers { + requestedReviewers = append(requestedReviewers, github.User{Login: login}) + } + previousReviewers := []github.User{} + for _, login := range tc.previousReviewers { + previousReviewers = append(previousReviewers, github.User{Login: login}) + } + gh := &mockGithub{ + pullRequest: tc.pullRequest, + requestedReviewers: requestedReviewers, + previousReviewers: previousReviewers, + calledMethods: make(map[string][][]any), + } + + execRequestReviewer("1", gh) + + actualReviewers := []string{} + for _, args := range gh.calledMethods["RequestPullRequestReviewer"] { + actualReviewers = append(actualReviewers, args[1].(string)) + } + + if tc.expectSpecificReviewers != nil { + assert.ElementsMatch(t, tc.expectSpecificReviewers, actualReviewers) + } + if tc.expectReviewersFromList != nil { + for _, reviewer := range actualReviewers { + assert.Contains(t, tc.expectReviewersFromList, reviewer) + } + } + }) + } +} diff --git a/.ci/magician/cmd/test_terraform_vcr.go b/.ci/magician/cmd/test_terraform_vcr.go index 99ca6cd74a81..2749ddd586c0 100644 --- a/.ci/magician/cmd/test_terraform_vcr.go +++ b/.ci/magician/cmd/test_terraform_vcr.go @@ -168,6 +168,8 @@ Affected tests: ` + fmt.Sprintf("`%d`", len(replayingResult.FailedTests)) + ` recordingResult, recordingErr := vt.RunParallel(vcr.Recording, provider.Beta, testDirs, replayingResult.FailedTests) if recordingErr != nil { testState = "failure" + } else { + testState = "success" } if err := vt.UploadCassettes("ci-vcr-cassettes", prNumber, provider.Beta); err != nil { @@ -200,7 +202,7 @@ Affected tests: ` + fmt.Sprintf("`%d`", len(replayingResult.FailedTests)) + ` testState = "failure" } - if err := vt.UploadLogs("ci-vcr-logs", prNumber, buildID, true, false, vcr.Recording, provider.Beta); err != nil { + if err := vt.UploadLogs("ci-vcr-logs", prNumber, buildID, true, true, vcr.Replaying, provider.Beta); err != nil { fmt.Println("Error uploading recording logs: ", err) os.Exit(1) } diff --git a/.ci/magician/github/REVIEWER_ASSIGNMENT_COMMENT.md b/.ci/magician/github/REVIEWER_ASSIGNMENT_COMMENT.md index 09d5bfc62936..2678f295947d 100644 --- a/.ci/magician/github/REVIEWER_ASSIGNMENT_COMMENT.md +++ b/.ci/magician/github/REVIEWER_ASSIGNMENT_COMMENT.md @@ -1,4 +1,4 @@ -Hello! I am a robot. It looks like you are a: {{if eq .authorUserType "Community Contributor"}}Community Contributor{{else}}~Community Contributor~{{end}} {{if eq .authorUserType "Googler"}}Googler{{else}}~Googler~{{end}} {{if eq .authorUserType "Core Contributor"}}Core Contributor{{else}}~Core Contributor~{{end}}. {{if .trusted}}Tests will run automatically.{{else}}Tests will require approval to run.{{end}} +Hello! I am a robot. Tests will require approval from a repository maintainer to run. @{{.reviewer}}, a repository maintainer, has been assigned to review your changes. If you have not received review feedback within 2 business days, please leave a comment on this PR asking them to take a look. diff --git a/.ci/magician/github/membership.go b/.ci/magician/github/membership.go index 0f41990a121c..fd151935d98b 100644 --- a/.ci/magician/github/membership.go +++ b/.ci/magician/github/membership.go @@ -39,12 +39,11 @@ var ( "trodge", "hao-nan-li", "NickElliot", + "BBBmau", } // This is for new team members who are onboarding - trustedContributors = []string{ - "BBBmau", - } + trustedContributors = []string{} // This is for reviewers who are "on vacation": will not receive new review assignments but will still receive re-requests for assigned PRs. onVacationReviewers = []string{ @@ -74,8 +73,8 @@ func (ut UserType) String() string { } func (gh *Client) GetUserType(user string) UserType { - if isTeamMember(user, gh.token) { - fmt.Println("User is a team member") + if IsCoreContributor(user) { + fmt.Println("User is a core contributor") return CoreContributorUserType } @@ -93,11 +92,11 @@ func (gh *Client) GetUserType(user string) UserType { } // Check if a user is team member to not request a random reviewer -func isTeamMember(author, githubToken string) bool { - return slices.Contains(reviewerRotation, author) || slices.Contains(trustedContributors, author) +func IsCoreContributor(user string) bool { + return slices.Contains(reviewerRotation, user) || slices.Contains(trustedContributors, user) } -func IsTeamReviewer(reviewer string) bool { +func IsCoreReviewer(reviewer string) bool { return slices.Contains(reviewerRotation, reviewer) } @@ -112,8 +111,12 @@ func isOrgMember(author, org, githubToken string) bool { } func GetRandomReviewer() string { - availableReviewers := utils.Removes(reviewerRotation, onVacationReviewers) + availableReviewers := AvailableReviewers() rand.Seed(time.Now().UnixNano()) reviewer := availableReviewers[rand.Intn(len(availableReviewers))] return reviewer } + +func AvailableReviewers() []string { + return utils.Removes(reviewerRotation, onVacationReviewers) +} diff --git a/.ci/magician/github/reviewer_assignment.go b/.ci/magician/github/reviewer_assignment.go index 23651a15b1a0..d06415bab810 100644 --- a/.ci/magician/github/reviewer_assignment.go +++ b/.ci/magician/github/reviewer_assignment.go @@ -34,14 +34,14 @@ func ChooseCoreReviewers(requestedReviewers, previousReviewers []User) (reviewer newPrimaryReviewer = "" for _, reviewer := range requestedReviewers { - if IsTeamReviewer(reviewer.Login) { + if IsCoreReviewer(reviewer.Login) { hasPrimaryReviewer = true break } } for _, reviewer := range previousReviewers { - if IsTeamReviewer(reviewer.Login) { + if IsCoreReviewer(reviewer.Login) { hasPrimaryReviewer = true reviewersToRequest = append(reviewersToRequest, reviewer.Login) } @@ -55,16 +55,14 @@ func ChooseCoreReviewers(requestedReviewers, previousReviewers []User) (reviewer return reviewersToRequest, newPrimaryReviewer } -func FormatReviewerComment(newPrimaryReviewer string, authorUserType UserType, trusted bool) string { +func FormatReviewerComment(newPrimaryReviewer string) string { tmpl, err := template.New("REVIEWER_ASSIGNMENT_COMMENT.md").Parse(reviewerAssignmentComment) if err != nil { panic(fmt.Sprintf("Unable to parse REVIEWER_ASSIGNMENT_COMMENT.md: %s", err)) } sb := new(strings.Builder) tmpl.Execute(sb, map[string]any{ - "reviewer": newPrimaryReviewer, - "authorUserType": authorUserType.String(), - "trusted": trusted, + "reviewer": newPrimaryReviewer, }) return sb.String() } diff --git a/.ci/magician/github/reviewer_assignment_test.go b/.ci/magician/github/reviewer_assignment_test.go index 252fc4d61186..6247891fd7db 100644 --- a/.ci/magician/github/reviewer_assignment_test.go +++ b/.ci/magician/github/reviewer_assignment_test.go @@ -127,27 +127,12 @@ func TestFormatReviewerComment(t *testing.T) { tc := tc t.Run(tn, func(t *testing.T) { t.Parallel() - comment := FormatReviewerComment(tc.Reviewer, tc.AuthorUserType, tc.Trusted) + comment := FormatReviewerComment(tc.Reviewer) t.Log(comment) if !strings.Contains(comment, fmt.Sprintf("@%s", tc.Reviewer)) { t.Errorf("wanted comment to contain @%s; does not.", tc.Reviewer) } - if !strings.Contains(comment, tc.AuthorUserType.String()) { - t.Errorf("wanted comment to contain user type (%s); does not.", tc.AuthorUserType.String()) - } - if strings.Contains(comment, fmt.Sprintf("~%s~", tc.AuthorUserType.String())) { - t.Errorf("wanted user type (%s) in comment to not be crossed out, but it is", tc.AuthorUserType.String()) - } - for _, ut := range []UserType{CommunityUserType, GooglerUserType, CoreContributorUserType} { - if ut != tc.AuthorUserType && !strings.Contains(comment, fmt.Sprintf("~%s~", ut.String())) { - t.Errorf("wanted other user type (%s) in comment to be crossed out, but it is not", ut) - } - } - - if tc.Trusted && !strings.Contains(comment, "Tests will run automatically") { - t.Errorf("wanted comment to say tests will run automatically; does not") - } - if !tc.Trusted && !strings.Contains(comment, "Tests will require approval") { + if !strings.Contains(comment, "Tests will require approval") { t.Errorf("wanted comment to say tests will require approval; does not") } }) diff --git a/.github/workflows/request-reviewer.yml b/.github/workflows/request-reviewer.yml new file mode 100644 index 000000000000..2831ec975caa --- /dev/null +++ b/.github/workflows/request-reviewer.yml @@ -0,0 +1,40 @@ +name: request-reviewer + +permissions: read-all + +on: + pull_request_target: + types: + - edited + - opened + - ready_for_review + - reopened + - synchronize + branches: + - 'main' + - 'FEATURE-BRANCH-*' + +jobs: + request-review: + if: github.event.pull_request.draft == false + runs-on: ubuntu-latest + permissions: + pull-requests: write + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + steps: + - name: Checkout Repository + uses: actions/checkout@v4 + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: '^1.20' + # Disable caching for now due to issues with large provider dependency caches + cache: false + - name: Build magician + run: | + cd .ci/magician + go build . + - name: Request reviewer + run: .ci/magician/magician request-reviewer ${{ github.event.pull_request.number }} + diff --git a/.github/workflows/teamcity-pr-checks.yml b/.github/workflows/teamcity-pr-checks.yml new file mode 100644 index 000000000000..6a576185e912 --- /dev/null +++ b/.github/workflows/teamcity-pr-checks.yml @@ -0,0 +1,46 @@ +name: TeamCity Configuration Tests +permissions: read-all + +on: + workflow_dispatch: + pull_request: + paths: + - '.github/workflows/teamcity-pr-checks.yml' + - 'mmv1/third_party/terraform/.teamcity/**' + + +jobs: + teamcity-config-tests: + runs-on: ubuntu-22.04 + steps: + - name: Checkout Repository + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + fetch-depth: 0 + + - name: Setup Java + uses: actions/setup-java@387ac29b308b003ca37ba93a6cab5eb57c8f5f93 # v4.0.0 + with: + distribution: zulu + java-version: 17 + java-package: jdk + + - name: Cache Maven files + uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4.0.0 + with: + path: ~/.m2/repository + key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }} + restore-keys: | + ${{ runner.os }}-maven- + + - name: Download TeamCity Tools + run : | + cd mmv1/third_party/terraform/.teamcity + make tools + + # Running the tests twice ensures that the tests are discovered and run + - name: Run TeamCity Tests + run : | + cd mmv1/third_party/terraform/.teamcity + make test + make test \ No newline at end of file diff --git a/CODEOWNERS b/CODEOWNERS index 5528c7612e1b..e69de29bb2d1 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1 +0,0 @@ -* @GoogleCloudPlatform/terraform-team diff --git a/docs/content/develop/field-reference.md b/docs/content/develop/field-reference.md index 6e292e9e39a6..14d2e03b1af0 100644 --- a/docs/content/develop/field-reference.md +++ b/docs/content/develop/field-reference.md @@ -81,7 +81,7 @@ state. See for more information. Sensitive fields are often not returned by the API (because they are sensitive). -In this case, the field will also need to use [`ignore_read` or a `custom_flatten` function]({{< ref "/develop/field-reference#ignore_read" >}}). +In this case, the field will also need to use [`ignore_read` or a `custom_flatten` function]({{< ref "/develop/permadiff#ignore_read" >}}). Example: @@ -212,6 +212,20 @@ Example: - nested_object.0.nested_field ``` +### `diff_suppress_func` +Specifies the name of a [diff suppress function](https://developer.hashicorp.com/terraform/plugin/sdkv2/schemas/schema-behaviors#diffsuppressfunc) +to use for this field. In many cases, a [custom flattener](https://googlecloudplatform.github.io/magic-modules/develop/custom-code/#custom_flatten) +is preferred because it will allow the user to see a clearer diff when the field actually is being changed. See +[Fix a permadiff]({{< ref "/develop/permadiff.md" >}}) for more information and best practices. + +Example: + +```yaml +- !ruby/object:Api::Type::String + name: 'fieldOne' + diff_suppress_func: 'tpgresource.CaseDiffSuppress' +``` + ## `Enum` properties ### `values` diff --git a/docs/content/develop/permadiff.md b/docs/content/develop/permadiff.md new file mode 100644 index 000000000000..96a6adf0739e --- /dev/null +++ b/docs/content/develop/permadiff.md @@ -0,0 +1,263 @@ +--- +title: "Fix a permadiff" +weight: 60 +--- + +# Fix a permadiff + +Permadiffs are an extremely common class of errors that users experience. They manifest as diffs at plan time on fields that a user has not modified in their configuration. They can also show up as test failures with the error message: "After applying this test step, the plan was not empty." + +In a general sense, permadiffs are caused by the API returning a different value for the field than what the user sent, which causes Terraform to try to re-send the same request, which gets the same response, which continues to result in the user seeing a diff. In general, APIs that return exactly what the user sent are more friendly for Terraform or other declarative tooling. However, many GCP APIs normalize inputs, have server-side defaults that are returned to the user, do not return all the fields set on a resource, or return data in a different format in some other way. + +This page outlines best practices for working around various types of permadiffs in the `google` and `google-beta` providers. + +## API returns default value for unset field {#default} + +For new fields, if possible, set a client-side default that matches the API default. This will prevent the diff and will allow users to accurately see what the end state will be if the field is not set in their configuration. A client-side default should only be used if the API sets the same default value in all cases and the default value will be stable over time. Changing a client-side default is a [breaking change]({{< ref "/develop/breaking-changes/breaking-changes" >}}). + +{{< tabs "default_value" >}} +{{< tab "MMv1" >}} +```yaml +default_value: DEFAULT_VALUE +``` + +In the providers, this will be converted to: + +```go +"field": { + // ... + Default: "DEFAULT_VALUE", +} +``` + +See [SDKv2 Schema Behaviors - Default ↗](https://developer.hashicorp.com/terraform/plugin/sdkv2/schemas/schema-behaviors#default) for more information. +{{< /tab >}} +{{< tab "Handwritten" >}} +```go +"field": { + // ... + Default: "DEFAULT_VALUE", +} +``` + +See [SDKv2 Schema Behaviors - Default ↗](https://developer.hashicorp.com/terraform/plugin/sdkv2/schemas/schema-behaviors#default) for more information. +{{< /tab >}} +{{< /tabs >}} + +For existing fields (or new fields that are not eligible for a client-side default), mark the field as having an API-side default. If the field is not set (or is set to an "empty" value such as zero, false, or an empty string) the provider will treat the most recent value returned by the API as the value for the field, and will send that value for the field on subsequent requests. The field will show as `(known after apply)` in plans and it will not be possible for the user to explicitly set the field to an "empty" value. + +{{< tabs "default_from_api" >}} +{{< tab "MMv1" >}} +```yaml +default_from_api: true +``` + +In the providers, this will be converted to: + +```go +"field": { + // ... + Optional: true, + Computed: true, +} +``` + +See [SDKv2 Schema Behaviors - Optional ↗](https://developer.hashicorp.com/terraform/plugin/sdkv2/schemas/schema-behaviors#optional) and [SDKv2 Schema Behaviors - Computed ↗](https://developer.hashicorp.com/terraform/plugin/sdkv2/schemas/schema-behaviors#computed) for more information. +{{< /tab >}} +{{< tab "Handwritten" >}} +```go +"field": { + // ... + Optional: true, + Computed: true, +} +``` + +See [SDKv2 Schema Behaviors - Optional ↗](https://developer.hashicorp.com/terraform/plugin/sdkv2/schemas/schema-behaviors#optional) and [SDKv2 Schema Behaviors - Computed ↗](https://developer.hashicorp.com/terraform/plugin/sdkv2/schemas/schema-behaviors#computed) for more information. +{{< /tab >}} +{{< /tabs >}} + +## API returns an empty value if default value is sent {#default_if_empty} + +Use a flattener to store the default value in state if the response has an empty (or unset) value. + +{{< tabs "default_if_empty" >}} +{{< tab "MMv1" >}} +Use the standard `default_if_empty` flattener. + +```yaml +custom_flatten: 'templates/terraform/custom_flatten/default_if_empty.erb' +``` +{{< /tab >}} +{{< tab "Handwritten" >}} +```go +func flattenResourceNameFieldName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil || tpgresource.IsEmptyValue(reflect.ValueOf(v)) { + return "DEFAULT_VALUE" + } + // Any other necessary logic goes here. + return v +} +``` +{{< /tab >}} +{{< /tabs >}} + +## API normalizes a value {#normalized-value} + +In cases where the API normalizes and returns a value in a simple, predictable way (such as capitalizing the value) add a diff suppress function for the field to suppress the diff. + +The `tpgresource` package in each provider supplies diff suppress functions for the following common cases: + +- `tpgresource.CaseDiffSuppress`: Suppress diffs from capitalization differences between the user's configuration and the API. +- `tpgresource.DurationDiffSuppress`: Suppress diffs from duration format differences such as "60.0s" vs "60s". This is necessary for [`Duration`](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#duration) API fields. +- `tpgresource.ProjectNumberDiffSuppress`: Suppress diffs caused by the provider sending a project ID and the API returning a project number. + + +{{< tabs "diff_suppress_simple" >}} + +{{< tab "MMv1" >}} +```yaml +# Use a built-in function +diff_suppress_func: tpgresource.CaseDiffSuppress + +# Reference a resource-specific function +diff_suppress_func: resourceNameFieldNameDiffSuppress +``` + +Define resource-specific functions in a [`custom_code.constants`](https://googlecloudplatform.github.io/magic-modules/develop/custom-code/#add-reusable-variables-and-functions) file. + +```go +func resourceNameFieldNameDiffSuppress(_, old, new string, _ *schema.ResourceData) bool { + // Separate function for easier unit testing + return resourceNameFieldNameDiffSuppressLogic(old, new) +} + +func resourceNameFieldNameDiffSuppressLogic(old, new) bool { + // Diff suppression logic. Returns true if the diff should be suppressed - that is, if the + // old and new values should be considered "the same". +} +``` + +See [SDKv2 Schema Behaviors - DiffSuppressFunc ↗](https://developer.hashicorp.com/terraform/plugin/sdkv2/schemas/schema-behaviors#diffsuppressfunc) for more information. +{{< /tab >}} +{{< tab "Handwritten" >}} +Define resource-specific functions in your service package, for example at the top of the related resource file. + +```go +func resourceNameFieldNameDiffSuppress(_, old, new string, _ *schema.ResourceData) bool { + // Separate function for easier unit testing + return resourceNameFieldNameDiffSuppressLogic(old, new) +} + +func resourceNameFieldNameDiffSuppressLogic(old, new) bool { + // Diff suppression logic. Returns true if the diff should be suppressed - that is, if the + // old and new values should be considered "the same". +} +``` + +Reference diff suppress functions from the field definition. + +```go +"field": { + // ... + DiffSuppressFunc: resourceNameFieldNameDiffSuppress, +} +``` + +See [SDKv2 Schema Behaviors - DiffSuppressFunc ↗](https://developer.hashicorp.com/terraform/plugin/sdkv2/schemas/schema-behaviors#diffsuppressfunc) for more information. +{{< /tab >}} +{{< /tabs >}} + +## API field that is never included in the response {#ignore_read} + +This is common for fields that store credentials or similar information. Such fields should also be marked as [`sensitive`]({{< ref "/develop/field-reference#sensitive" >}}). + +In the flattener for the field, return the value of the field in the user's configuration. + +{{< tabs "ignore_read" >}} +{{< tab "MMv1" >}} +On top-level fields, this can be done with: + +```yaml +ignore_read: true +``` + +For nested fields, `ignore_read` is [not currently supported](https://github.com/hashicorp/terraform-provider-google/issues/12410), so this must be implemented with a [custom flattener]({{< ref "/develop/custom-code#custom_flatten" >}}). You will also need to add the field to `ignore_read_extra` on any examples that are used to generate tests; this will cause tests to ignore the field when checking that the values in the API match the user's configuration. + +```go +func flatten<%= prefix -%><%= titlelize_property(property) -%>(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // We want to ignore read on this field, but cannot because it is nested + return d.Get("path.0.to.0.nested.0.field") +} +``` + +```yaml +examples: + - !ruby/object:Provider::Terraform::Examples + # example configuration + ignore_read_extra: + - "path.0.to.0.nested.0.field" +``` +{{< /tab >}} +{{< tab "Handwritten" >}} +Use `d.Get` to set the flattened value to be the same as the user-configured value (instead of a value from the API). + +```go +func flattenParentField(d *schema.ResourceData, disk *compute.AttachedDisk, config *transport_tpg.Config) []map[string]interface{} { + result := map[string]interface{}{ + "nested_field": d.Get("path.0.to.0.parent_field.0.nested_field") + } + return []map[string]interface{}{result} +} +``` + +In tests, add the field to `ImportStateVerifyIgnore` on any relevant import steps. + +``` +{ + ResourceName: "google_product_resource.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{""path.0.to.0.parent_field.0.nested_field"}, +}, +``` +{{< /tab >}} +{{< /tabs >}} + +## API returns a list in a different order than was sent {#list-order} + +For an Array of nested objects, convert it to a Set – this is a [breaking change]({{< ref "/develop/breaking-changes/breaking-changes" >}}) and can only happen in a major release. + +For an Array of simple values (such as strings or ints), rewrite the value in the flattener to match the order in the user's configuration. This will also simplify diffs if new values are added or removed. + +{{< tabs "diff_suppress_list" >}} + +{{< tab "MMv1" >}} +Add a [custom flattener]({{< ref "/develop/custom-code#custom_flatten" >}}) for the field. + +```go +func flatten<%= prefix -%><%= titlelize_property(property) -%>(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + configValue := d.Get("path.0.to.0.parent_field.0.nested_field").([]string) + + ret := []string{} + // Add values from v to ret to match order in configValue and put any new strings at the end + + return ret +} +``` +{{< /tab >}} +{{< tab "Handwritten" >}} +Define resource-specific functions in your service package, for example at the top of the related resource file. + +```go +func flattenResourceNameFieldName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + configValue := d.Get("path.0.to.0.parent_field.0.nested_field").([]string) + + ret := []string{} + // Add values from v to ret to match order in configValue and put any new strings at the end + + return ret +} +``` +{{< /tab >}} +{{< /tabs >}} diff --git a/mmv1/products/clouddeploy/CustomTargetType.yaml b/mmv1/products/clouddeploy/CustomTargetType.yaml new file mode 100644 index 000000000000..d805ae20bbbd --- /dev/null +++ b/mmv1/products/clouddeploy/CustomTargetType.yaml @@ -0,0 +1,160 @@ +# Copyright 2023 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- !ruby/object:Api::Resource +name: "CustomTargetType" +description: | + A Cloud Deploy `CustomTargetType` defines a type of custom target that can be referenced in a + Cloud Deploy `Target` in order to facilitate deploying to other systems besides the supported runtimes. +references: !ruby/object:Api::Resource::ReferenceLinks + guides: + "Define and use a custom target type": "https://cloud.google.com/deploy/docs/deploy-app-custom-target" + api: "https://cloud.google.com/deploy/docs/api/reference/rest/v1/projects.locations.customTargetTypes" +base_url: "projects/{{project}}/locations/{{location}}/customTargetTypes" +self_link: "projects/{{project}}/locations/{{location}}/customTargetTypes/{{name}}" +create_url: "projects/{{project}}/locations/{{location}}/customTargetTypes?customTargetTypeId={{name}}" +update_verb: :PATCH +update_mask: true +autogen_async: true +async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: "name" + base_url: "{{op_id}}" + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: "response" + resource_inside_response: false + status: !ruby/object:Api::OpAsync::Status + path: "done" + complete: true + allowed: + - true + - false + error: !ruby/object:Api::OpAsync::Error + path: "error" + message: "message" +id_format: "projects/{{project}}/locations/{{location}}/customTargetTypes/{{name}}" +import_format: + [ + 'projects/{{project}}/locations/{{location}}/customTargetTypes/{{name}}' + ] +examples: + - !ruby/object:Provider::Terraform::Examples + name: "clouddeploy_custom_target_type_basic" + primary_resource_id: "custom-target-type" + vars: + custom_target_type_name: "my-custom-target-type" + - !ruby/object:Provider::Terraform::Examples + name: "clouddeploy_custom_target_type_git_skaffold_modules" + primary_resource_id: "custom-target-type" + vars: + custom_target_type_name: "my-custom-target-type" + - !ruby/object:Provider::Terraform::Examples + name: "clouddeploy_custom_target_type_gcs_skaffold_modules" + primary_resource_id: "custom-target-type" + vars: + custom_target_type_name: "my-custom-target-type" +parameters: + - !ruby/object:Api::Type::String + name: "location" + required: true + immutable: true + url_param_only: true + description: "The location of the source." +properties: + - !ruby/object:Api::Type::String + name: "name" + description: "Name of the `CustomTargetType`." + required: true + immutable: true + url_param_only: true + - !ruby/object:Api::Type::String + name: "customTargetTypeId" + description: "Resource id of the `CustomTargetType`." + output: true + - !ruby/object:Api::Type::String + name: "uid" + description: "Unique identifier of the `CustomTargetType`." + output: true + - !ruby/object:Api::Type::String + name: "description" + description: "Description of the `CustomTargetType`. Max length is 255 characters." + - !ruby/object:Api::Type::KeyValueAnnotations + name: "annotations" + description: "User annotations. These attributes can only be set and used by the user, and not by Cloud Deploy. See https://google.aip.dev/128#annotations for more details such as format and size limitations." + - !ruby/object:Api::Type::KeyValueLabels + name: "labels" + description: "Labels are attributes that can be set and used by both the user and by Cloud Deploy. Labels must meet the following constraints: * Keys and values can contain only lowercase letters, numeric characters, underscores, and dashes. * All characters must use UTF-8 encoding, and international characters are allowed. * Keys must start with a lowercase letter or international character. * Each resource is limited to a maximum of 64 labels. Both keys and values are additionally constrained to be <= 128 bytes." + - !ruby/object:Api::Type::String + name: "createTime" + description: "Time at which the `CustomTargetType` was created." + output: true + - !ruby/object:Api::Type::String + name: "updateTime" + description: "Time at which the `CustomTargetType` was updated." + output: true + - !ruby/object:Api::Type::String + name: "etag" + description: "The weak etag of the `CustomTargetType` resource. This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding." + output: true + - !ruby/object:Api::Type::NestedObject + name: "customActions" + description: "Configures render and deploy for the `CustomTargetType` using Skaffold custom actions." + properties: + - !ruby/object:Api::Type::String + name: "renderAction" + description: "The Skaffold custom action responsible for render operations. If not provided then Cloud Deploy will perform the render operations via `skaffold render`." + - !ruby/object:Api::Type::String + name: "deployAction" + description: "The Skaffold custom action responsible for deploy operations." + required: true + - !ruby/object:Api::Type::Array + name: "includeSkaffoldModules" + description: "List of Skaffold modules Cloud Deploy will include in the Skaffold Config as required before performing diagnose." + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Array + name: "configs" + description: "The Skaffold Config modules to use from the specified source." + item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: "git" + description: "Remote git repository containing the Skaffold Config modules." + exactly_one_of: + - git + - googleCloudStorage + properties: + - !ruby/object:Api::Type::String + name: "repo" + description: "Git repository the package should be cloned from." + required: true + - !ruby/object:Api::Type::String + name: "path" + description: "Relative path from the repository root to the Skaffold file." + - !ruby/object:Api::Type::String + name: "ref" + description: "Git ref the package should be cloned from." + - !ruby/object:Api::Type::NestedObject + name: "googleCloudStorage" + description: "Cloud Storage bucket containing Skaffold Config modules." + exactly_one_of: + - git + - googleCloudStorage + properties: + - !ruby/object:Api::Type::String + name: "source" + description: "Cloud Storage source paths to copy recursively. For example, providing `gs://my-bucket/dir/configs/*` will result in Skaffold copying all files within the `dir/configs` directory in the bucket `my-bucket`." + required: true + - !ruby/object:Api::Type::String + name: "path" + description: "Relative path from the source to the Skaffold file." diff --git a/mmv1/products/compute/Instance.yaml b/mmv1/products/compute/Instance.yaml index a8f04bc8e24f..e3912dac2c84 100644 --- a/mmv1/products/compute/Instance.yaml +++ b/mmv1/products/compute/Instance.yaml @@ -186,23 +186,21 @@ properties: description: | Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the - disk can handle. Values must be between 10,000 and 120,000. - Note: Updating currently is only supported for hyperdisk skus - via disk update api/gcloud without the need to delete and recreate - the disk, hyperdisk allows for an update of IOPS every - 4 hours. To update your hyperdisk more frequently, + disk can handle. Note: Updating currently is only supported for + hyperdisk skus via disk update api/gcloud without the need to + delete and recreate the disk, hyperdisk allows for an update of + IOPS every 4 hours. To update your hyperdisk more frequently, you'll need to manually delete and recreate it. - !ruby/object:Api::Type::Integer name: 'provisionedThroughput' description: | Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that - the disk can handle. Values must be between 1 and 7,124. - Note: Updating currently is only supported for hyperdisk skus - via disk update api/gcloud without the need to delete and recreate - the disk, hyperdisk allows for an update of throughput every - 4 hours. To update your hyperdisk more frequently, - you'll need to manually delete and recreate it. + the disk can handle. Note: Updating currently is only supported + for hyperdisk skus via disk update api/gcloud without the need + to delete and recreate the disk, hyperdisk allows for an update + of throughput every 4 hours. To update your hyperdisk more + frequently, you'll need to manually delete and recreate it. - !ruby/object:Api::Type::Boolean name: 'enableConfidentialCompute' description: | diff --git a/mmv1/products/compute/InstanceGroup.yaml b/mmv1/products/compute/InstanceGroup.yaml index e9504d021b87..59abe5b63864 100644 --- a/mmv1/products/compute/InstanceGroup.yaml +++ b/mmv1/products/compute/InstanceGroup.yaml @@ -59,7 +59,7 @@ parameters: and will not be deleted. Only the full identifier of the instance will be returned. - **NOTE** If a user will be recreating instances under the same name + !> **WARNING** If a user will be recreating instances under the same name (eg. via `terraform taint`), please consider adding instances to an instance group via the `instance_group_membership` resource, along side the `update_triggered_by` lifecycle method with an instance's ID. diff --git a/mmv1/products/compute/InstanceGroupMembership.yaml b/mmv1/products/compute/InstanceGroupMembership.yaml index 414b0846cb83..f41938f17bb8 100644 --- a/mmv1/products/compute/InstanceGroupMembership.yaml +++ b/mmv1/products/compute/InstanceGroupMembership.yaml @@ -18,11 +18,11 @@ base_url: 'projects/{{project}}/zones/{{zone}}/instanceGroups/{{instance_group}} description: | Represents the Instance membership to the Instance Group. - **NOTE** You can use this resource instead of the `instances` field in the + -> **NOTE** You can use this resource instead of the `instances` field in the `google_compute_instance_group`, however it's not recommended to use it alongside this field. It might cause inconsistencies, as they can end up competing over control. - **NOTE** This resource has been added to avoid a situation, where after + -> **NOTE** This resource has been added to avoid a situation, where after Instance is recreated, it's removed from Instance Group and it's needed to perform `apply` twice. To avoid situations like this, please use this resource with the lifecycle `update_triggered_by` method, with the passed Instance's ID. diff --git a/mmv1/products/compute/RegionNetworkEndpoint.yaml b/mmv1/products/compute/RegionNetworkEndpoint.yaml index cf4f49b6c5d2..81d31f7e1f6e 100644 --- a/mmv1/products/compute/RegionNetworkEndpoint.yaml +++ b/mmv1/products/compute/RegionNetworkEndpoint.yaml @@ -19,7 +19,7 @@ description: | A Region network endpoint represents a IP address/FQDN and port combination that is part of a specific network endpoint group (NEG). - **NOTE**: Network endpoints cannot be created outside of a network endpoint group. + ~> **NOTE**: Network endpoints cannot be created outside of a network endpoint group. immutable: true create_verb: :POST create_url: projects/{{project}}/regions/{{region}}/networkEndpointGroups/{{region_network_endpoint_group}}/attachNetworkEndpoints diff --git a/mmv1/products/datacatalog/Taxonomy.yaml b/mmv1/products/datacatalog/Taxonomy.yaml index 8fa5bf8c9b7b..c472fd415119 100644 --- a/mmv1/products/datacatalog/Taxonomy.yaml +++ b/mmv1/products/datacatalog/Taxonomy.yaml @@ -23,6 +23,7 @@ references: !ruby/object:Api::Resource::ReferenceLinks guides: 'Official Documentation': https://cloud.google.com/data-catalog/docs api: https://cloud.google.com/data-catalog/docs/reference/rest/v1/projects.locations.taxonomies +# Skip sweeper as this resource name is autogenerated from the API, thus the sweeper runs on the display_name field. Handwritten sweeper added #9908. skip_sweeper: true iam_policy: !ruby/object:Api::Resource::IamPolicy skip_import_test: true diff --git a/mmv1/products/firebaseappcheck/AppAttestConfig.yaml b/mmv1/products/firebaseappcheck/AppAttestConfig.yaml new file mode 100644 index 000000000000..025e59fbcca9 --- /dev/null +++ b/mmv1/products/firebaseappcheck/AppAttestConfig.yaml @@ -0,0 +1,89 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- +!ruby/object:Api::Resource +name: "AppAttestConfig" +base_url: projects/{{project}}/apps/{{app_id}}/appAttestConfig +self_link: projects/{{project}}/apps/{{app_id}}/appAttestConfig +create_url: projects/{{project}}/apps/{{app_id}}/appAttestConfig?updateMask=tokenTtl +create_verb: :PATCH +update_verb: :PATCH +update_mask: true +skip_delete: true +description: | + An app's App Attest configuration object. Note that the Team ID registered with your + app is used as part of the validation process. Make sure your `google_firebase_apple_app` has a team_id present. +references: !ruby/object:Api::Resource::ReferenceLinks + guides: + "Official Documentation": "https://firebase.google.com/docs/app-check" + api: "https://firebase.google.com/docs/reference/appcheck/rest/v1/projects.apps.appAttestConfig" +import_format: + [ + "projects/{{project}}/apps/{{app_id}}/appAttestConfig", + "{{project}}/{{app_id}}", + "{{app_id}}", + ] +examples: + - !ruby/object:Provider::Terraform::Examples + name: "firebase_app_check_app_attest_config_minimal" + # Need the time_sleep resource + pull_external: true + primary_resource_id: "default" + vars: + team_id: "9987654321" + bundle_id: "bundle.id.appattest" + test_vars_overrides: + # Don't add random suffix + team_id: '"9987654321"' + test_env_vars: + project_id: :PROJECT_NAME + - !ruby/object:Provider::Terraform::Examples + name: "firebase_app_check_app_attest_config_full" + # Need the time_sleep resource + pull_external: true + primary_resource_id: "default" + vars: + team_id: "9987654321" + bundle_id: "bundle.id.appattest" + token_ttl: "7200s" + test_vars_overrides: + # Don't add random suffix + team_id: '"9987654321"' + token_ttl: '"7200s"' + test_env_vars: + project_id: :PROJECT_NAME +parameters: + - !ruby/object:Api::Type::String + name: app_id + description: | + The ID of an + [Apple App](https://firebase.google.com/docs/reference/firebase-management/rest/v1beta1/projects.iosApps#IosApp.FIELDS.app_id). + required: true + immutable: true + url_param_only: true +properties: + - !ruby/object:Api::Type::String + name: name + description: | + The relative resource name of the App Attest configuration object + output: true + pattern: projects/{{project}}/apps/{{app_id}}/appAttestConfig + - !ruby/object:Api::Type::String + name: tokenTtl + description: | + Specifies the duration for which App Check tokens exchanged from App Attest artifacts will be valid. + If unset, a default value of 1 hour is assumed. Must be between 30 minutes and 7 days, inclusive. + + A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". + default_from_api: true diff --git a/mmv1/products/firebaseappcheck/PlayIntegrityConfig.yaml b/mmv1/products/firebaseappcheck/PlayIntegrityConfig.yaml new file mode 100644 index 000000000000..7631e64d1a4b --- /dev/null +++ b/mmv1/products/firebaseappcheck/PlayIntegrityConfig.yaml @@ -0,0 +1,83 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- +!ruby/object:Api::Resource +name: "PlayIntegrityConfig" +base_url: projects/{{project}}/apps/{{app_id}}/playIntegrityConfig +self_link: projects/{{project}}/apps/{{app_id}}/playIntegrityConfig +create_url: projects/{{project}}/apps/{{app_id}}/playIntegrityConfig?updateMask=tokenTtl +create_verb: :PATCH +update_verb: :PATCH +update_mask: true +skip_delete: true +description: | + An app's Play Integrity configuration object. Note that your registered SHA-256 certificate fingerprints are used to validate tokens issued by the Play Integrity API. + Make sure your `google_firebase_android_app` has at least one `sha256_hashes` present. +references: !ruby/object:Api::Resource::ReferenceLinks + guides: + "Official Documentation": "https://firebase.google.com/docs/app-check" + api: "https://firebase.google.com/docs/reference/appcheck/rest/v1/projects.apps.playIntegrityConfig" +import_format: + [ + "projects/{{project}}/apps/{{app_id}}/playIntegrityConfig", + "{{project}}/{{app_id}}", + "{{app_id}}", + ] +examples: + - !ruby/object:Provider::Terraform::Examples + name: "firebase_app_check_play_integrity_config_minimal" + # Need the time_sleep resource + pull_external: true + primary_resource_id: "default" + vars: + package_name: "package.name.playintegrity" + test_env_vars: + project_id: :PROJECT_NAME + - !ruby/object:Provider::Terraform::Examples + name: "firebase_app_check_play_integrity_config_full" + # Need the time_sleep resource + pull_external: true + primary_resource_id: "default" + vars: + package_name: "package.name.playintegrity" + token_ttl: "7200s" + test_vars_overrides: + # Don't add random suffix + token_ttl: '"7200s"' + test_env_vars: + project_id: :PROJECT_NAME +parameters: + - !ruby/object:Api::Type::String + name: app_id + description: | + The ID of an + [Android App](https://firebase.google.com/docs/reference/firebase-management/rest/v1beta1/projects.androidApps#AndroidApp.FIELDS.app_id). + required: true + immutable: true + url_param_only: true +properties: + - !ruby/object:Api::Type::String + name: name + description: | + The relative resource name of the Play Integrity configuration object + output: true + pattern: projects/{{project}}/apps/{{app_id}}/playIntegrityConfig + - !ruby/object:Api::Type::String + name: tokenTtl + description: | + Specifies the duration for which App Check tokens exchanged from Play Integrity artifacts will be valid. + If unset, a default value of 1 hour is assumed. Must be between 30 minutes and 7 days, inclusive. + + A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". + default_from_api: true diff --git a/mmv1/products/healthcare/Hl7V2Store.yaml b/mmv1/products/healthcare/Hl7V2Store.yaml index 2c395e9a1338..b923d404a489 100644 --- a/mmv1/products/healthcare/Hl7V2Store.yaml +++ b/mmv1/products/healthcare/Hl7V2Store.yaml @@ -73,6 +73,12 @@ properties: ** Changing this property may recreate the Hl7v2 store (removing all data) ** required: true immutable: true + - !ruby/object:Api::Type::Boolean + name: rejectDuplicateMessage + required: false + default_value: false + description: | + Determines whether duplicate messages are allowed. - !ruby/object:Api::Type::NestedObject name: parserConfig required: false diff --git a/mmv1/products/integrationconnectors/Connection.yaml b/mmv1/products/integrationconnectors/Connection.yaml index 51baae956ad3..cde9925b86dd 100644 --- a/mmv1/products/integrationconnectors/Connection.yaml +++ b/mmv1/products/integrationconnectors/Connection.yaml @@ -927,7 +927,7 @@ properties: - !ruby/object:Api::Type::NestedObject name: "connectorVersionInfraConfig" description: | - This cofiguration provides infra configs like rate limit threshold which need to be configurable for every connector version. + This configuration provides infra configs like rate limit threshold which need to be configurable for every connector version. output: true properties: - !ruby/object:Api::Type::String diff --git a/mmv1/products/looker/Instance.yaml b/mmv1/products/looker/Instance.yaml index 38b871deb18e..be77445ddd7a 100644 --- a/mmv1/products/looker/Instance.yaml +++ b/mmv1/products/looker/Instance.yaml @@ -73,6 +73,14 @@ examples: kms_key_name: 'acctest.BootstrapKMSKeyInLocation(t, "us-central1").CryptoKey.Name' network_name: 'acctest.BootstrapSharedServiceNetworkingConnection(t, "looker-vpc-network-1", acctest.ServiceNetworkWithPrefixLength(20))' skip_docs: true + - !ruby/object:Provider::Terraform::Examples + name: 'looker_instance_custom_domain' + primary_resource_id: 'looker-instance' + vars: + instance_name: 'my-instance' + client_id: 'my-client-id' + client_secret: 'my-client-secret' + custom_domain: 'my-custom-domain' parameters: - !ruby/object:Api::Type::String name: 'region' @@ -416,3 +424,19 @@ properties: description: | Number of additional Developer Users to allocate to the Looker Instance. # UserMetadata Object - End + # CustomDomain Object + - !ruby/object:Api::Type::NestedObject + name: customDomain + description: | + Custom domain settings for a Looker instance. + properties: + - !ruby/object:Api::Type::String + name: 'domain' + description: | + Domain name + - !ruby/object:Api::Type::String + name: 'state' + description: | + Status of the custom domain. + output: true + # CustomDomain Object - End diff --git a/mmv1/products/migrationcenter/PreferenceSet.yaml b/mmv1/products/migrationcenter/PreferenceSet.yaml new file mode 100644 index 000000000000..9232294c71e5 --- /dev/null +++ b/mmv1/products/migrationcenter/PreferenceSet.yaml @@ -0,0 +1,206 @@ +# Copyright 2023 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- !ruby/object:Api::Resource +base_url: projects/{{project}}/locations/{{location}}/preferenceSets +create_url: projects/{{project}}/locations/{{location}}/preferenceSets?preferenceSetId={{preference_set_id}} +self_link: projects/{{project}}/locations/{{location}}/preferenceSets/{{preference_set_id}} +id_format: projects/{{project}}/locations/{{location}}/preferenceSets/{{preference_set_id}} +import_format: + - projects/{{project}}/locations/{{location}}/preferenceSets/{{preference_set_id}} +name: PreferenceSet +description: Manages the PreferenceSet resource. +update_verb: :PATCH +update_mask: true +autogen_async: true +references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Managing Migration Preferences': 'https://cloud.google.com/migration-center/docs/migration-preferences' + api: 'https://cloud.google.com/migration-center/docs/reference/rest/v1' +examples: + - !ruby/object:Provider::Terraform::Examples + name: 'preference_set_basic' + primary_resource_id: 'default' + vars: + set_name: 'preference-set-test' + - !ruby/object:Provider::Terraform::Examples + name: 'preference_set_full' + primary_resource_id: 'default' + vars: + set_name: 'preference-set-test' +properties: + - !ruby/object:Api::Type::String + name: name + description: 'Output only. Name of the preference set. ' + output: true + - !ruby/object:Api::Type::String + name: createTime + description: 'Output only. The timestamp when the preference set was created. ' + output: true + - !ruby/object:Api::Type::String + name: updateTime + description: 'Output only. The timestamp when the preference set was last updated. ' + output: true + - !ruby/object:Api::Type::String + name: displayName + description: 'User-friendly display name. Maximum length is 63 characters. ' + - !ruby/object:Api::Type::String + name: description + description: 'A description of the preference set. ' + - !ruby/object:Api::Type::NestedObject + name: virtualMachinePreferences + description: 'VirtualMachinePreferences enables you to create sets of assumptions, + for example, a geographical location and pricing track, for your migrated virtual + machines. The set of preferences influence recommendations for migrating virtual + machine assets. ' + properties: + - !ruby/object:Api::Type::String + name: targetProduct + description: "Target product for assets using this preference set. Specify either + target product or business goal, but not both. \n Possible values:\n COMPUTE_MIGRATION_TARGET_PRODUCT_UNSPECIFIED\nCOMPUTE_MIGRATION_TARGET_PRODUCT_COMPUTE_ENGINE\nCOMPUTE_MIGRATION_TARGET_PRODUCT_VMWARE_ENGINE\nCOMPUTE_MIGRATION_TARGET_PRODUCT_SOLE_TENANCY" + - !ruby/object:Api::Type::NestedObject + name: regionPreferences + description: 'The user preferences relating to target regions. ' + properties: + - !ruby/object:Api::Type::Array + name: preferredRegions + item_type: Api::Type::String + description: 'A list of preferred regions, ordered by the most preferred region + first. Set only valid Google Cloud region names. See https://cloud.google.com/compute/docs/regions-zones + for available regions. ' + - !ruby/object:Api::Type::String + name: commitmentPlan + description: "Commitment plan to consider when calculating costs for virtual machine + insights and recommendations. If you are unsure which value to set, a 3 year + commitment plan is often a good value to start with. \n Possible values:\n COMMITMENT_PLAN_UNSPECIFIED\nCOMMITMENT_PLAN_NONE\nCOMMITMENT_PLAN_ONE_YEAR\nCOMMITMENT_PLAN_THREE_YEARS" + - !ruby/object:Api::Type::String + name: sizingOptimizationStrategy + description: "Sizing optimization strategy specifies the preferred strategy used + when extrapolating usage data to calculate insights and recommendations for + a virtual machine. If you are unsure which value to set, a moderate sizing optimization + strategy is often a good value to start with. \n Possible values:\n SIZING_OPTIMIZATION_STRATEGY_UNSPECIFIED\nSIZING_OPTIMIZATION_STRATEGY_SAME_AS_SOURCE\nSIZING_OPTIMIZATION_STRATEGY_MODERATE\nSIZING_OPTIMIZATION_STRATEGY_AGGRESSIVE" + - !ruby/object:Api::Type::NestedObject + name: computeEnginePreferences + description: 'The user preferences relating to Compute Engine target platform. ' + properties: + - !ruby/object:Api::Type::NestedObject + name: machinePreferences + description: 'The type of machines to consider when calculating virtual machine + migration insights and recommendations. Not all machine types are available + in all zones and regions. ' + properties: + - !ruby/object:Api::Type::Array + name: allowedMachineSeries + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: code + description: 'Code to identify a Compute Engine machine series. Consult + https://cloud.google.com/compute/docs/machine-resource#machine_type_comparison + for more details on the available series. ' + description: 'Compute Engine machine series to consider for insights and recommendations. + If empty, no restriction is applied on the machine series. ' + - !ruby/object:Api::Type::String + name: licenseType + description: "License type to consider when calculating costs for virtual machine + insights and recommendations. If unspecified, costs are calculated based on + the default licensing plan. \n Possible values:\n LICENSE_TYPE_UNSPECIFIED\nLICENSE_TYPE_DEFAULT\nLICENSE_TYPE_BRING_YOUR_OWN_LICENSE" + - !ruby/object:Api::Type::NestedObject + name: vmwareEnginePreferences + description: 'The user preferences relating to Google Cloud VMware Engine target + platform. ' + properties: + - !ruby/object:Api::Type::Double + name: cpuOvercommitRatio + description: 'CPU overcommit ratio. Acceptable values are between 1.0 and 8.0, + with 0.1 increment. ' + - !ruby/object:Api::Type::Double + name: memoryOvercommitRatio + description: 'Memory overcommit ratio. Acceptable values are 1.0, 1.25, 1.5, + 1.75 and 2.0. ' + - !ruby/object:Api::Type::Double + name: storageDeduplicationCompressionRatio + description: 'The Deduplication and Compression ratio is based on the logical + (Used Before) space required to store data before applying deduplication and + compression, in relation to the physical (Used After) space required after + applying deduplication and compression. Specifically, the ratio is the Used + Before space divided by the Used After space. For example, if the Used Before + space is 3 GB, but the physical Used After space is 1 GB, the deduplication + and compression ratio is 3x. Acceptable values are between 1.0 and 4.0. ' + - !ruby/object:Api::Type::String + name: commitmentPlan + description: "Commitment plan to consider when calculating costs for virtual + machine insights and recommendations. If you are unsure which value to set, + a 3 year commitment plan is often a good value to start with. \n Possible + values:\n COMMITMENT_PLAN_UNSPECIFIED\nON_DEMAND\nCOMMITMENT_1_YEAR_MONTHLY_PAYMENTS\nCOMMITMENT_3_YEAR_MONTHLY_PAYMENTS\nCOMMITMENT_1_YEAR_UPFRONT_PAYMENT\nCOMMITMENT_3_YEAR_UPFRONT_PAYMENT" + - !ruby/object:Api::Type::NestedObject + name: soleTenancyPreferences + description: 'Preferences concerning Sole Tenancy nodes and VMs. ' + properties: + - !ruby/object:Api::Type::Double + name: cpuOvercommitRatio + description: 'CPU overcommit ratio. Acceptable values are between 1.0 and 2.0 + inclusive. ' + - !ruby/object:Api::Type::String + name: hostMaintenancePolicy + description: "Sole Tenancy nodes maintenance policy. \n Possible values:\n HOST_MAINTENANCE_POLICY_UNSPECIFIED\nHOST_MAINTENANCE_POLICY_DEFAULT\nHOST_MAINTENANCE_POLICY_RESTART_IN_PLACE\nHOST_MAINTENANCE_POLICY_MIGRATE_WITHIN_NODE_GROUP" + - !ruby/object:Api::Type::String + name: commitmentPlan + description: "Commitment plan to consider when calculating costs for virtual + machine insights and recommendations. If you are unsure which value to set, + a 3 year commitment plan is often a good value to start with. \n Possible + values:\n COMMITMENT_PLAN_UNSPECIFIED\nON_DEMAND\nCOMMITMENT_1_YEAR\nCOMMITMENT_3_YEAR" + - !ruby/object:Api::Type::Array + name: nodeTypes + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: nodeName + description: 'Name of the Sole Tenant node. Consult https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes ' + description: 'A list of sole tenant node types. An empty list means that all + possible node types will be considered. ' +parameters: + - !ruby/object:Api::Type::String + name: location + description: 'Part of `parent`. See documentation of `projectsId`. ' + url_param_only: true + required: true + immutable: true + - !ruby/object:Api::Type::String + name: preferenceSetId + description: 'Required. User specified ID for the preference set. It will become + the last component of the preference set name. The ID must be unique within the + project, must conform with RFC-1034, is restricted to lower-cased letters, and + has a maximum length of 63 characters. The ID must match the regular expression + `[a-z]([a-z0-9-]{0,61}[a-z0-9])?`. ' + url_param_only: true + required: true + immutable: true +async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: name + base_url: "{{op_id}}" + wait_ms: 1000 + timeouts: + result: !ruby/object:Api::OpAsync::Result + path: response + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: done + complete: true + allowed: + - true + - false + error: !ruby/object:Api::OpAsync::Error + path: error + message: message diff --git a/mmv1/products/netapp/volume.yaml b/mmv1/products/netapp/volume.yaml index 4f617b3ab27c..698f8ab4906b 100644 --- a/mmv1/products/netapp/volume.yaml +++ b/mmv1/products/netapp/volume.yaml @@ -53,7 +53,7 @@ parameters: url_param_only: true examples: - !ruby/object:Provider::Terraform::Examples - name: 'volume_basic' + name: 'netapp_volume_basic' primary_resource_id: 'test_volume' vars: volume_name: 'test-volume' @@ -64,6 +64,30 @@ examples: test_vars_overrides: network_name: 'acctest.BootstrapSharedServiceNetworkingConnection(t, "gcnv-network-config-1", acctest.ServiceNetworkWithParentService("netapp.servicenetworking.goog"))' properties: + - !ruby/object:Api::Type::Enum + name: 'state' + description: | + State of the volume. + values: + - STATE_UNSPECIFIED + - READY + - CREATING + - DELETING + - UPDATING + - RESTORING + - DISABLED + - ERROR + output: true + - !ruby/object:Api::Type::String + name: 'stateDetails' + description: | + State details of the volume. + output: true + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Create time of the volume. A timestamp in RFC3339 UTC "Zulu" format. Examples: "2023-06-22T09:13:01.617Z". + output: true - !ruby/object:Api::Type::String name: 'shareName' description: | @@ -205,11 +229,11 @@ properties: name: 'description' description: | An optional description of this resource. - # Use of snapReserve is depricated. We don't expose it intentially. + # Use of snapReserve is deprecated. Here as a comment to express intention. # - !ruby/object:Api::Type::Integer # name: 'snapReserve' # description: | - # Snap_reserve specifies percentage of volume storage reserved for snapshot storage. Default is 0 percent. Use is deprecated. + # `snap_reserve` specifies percentage of volume storage reserved for snapshot storage. Default is 0 percent. Use is deprecated. - !ruby/object:Api::Type::Boolean name: 'snapshotDirectory' description: | @@ -245,6 +269,34 @@ properties: description: | Reports the resource name of the Active Directory policy being used. Inherited from storage pool. output: true + - !ruby/object:Api::Type::NestedObject + name: 'restoreParameters' + description: |- + Used to create this volume from a snapshot (= cloning) or an backup. + immutable: true + # This parameter is only used at CREATE. READs will omit it. + ignore_read: true + properties: + - !ruby/object:Api::Type::String + name: 'sourceSnapshot' + description: |- + Full name of the snapshot to use for creating this volume. + `source_snapshot` and `source_backup` cannot be used simultaneously. + Format: `projects/{{project}}/locations/{{location}}/volumes/{{volume}}/snapshots/{{snapshot}}`. + exactly_one_of: + - restore_parameters.0.source_backup + - restore_parameters.0.source_snapshot + immutable: true + - !ruby/object:Api::Type::String + name: 'sourceBackup' + description: |- + Full name of the snapshot to use for creating this volume. + `source_snapshot` and `source_backup` cannot be used simultaneously. + Format: `projects/{{project}}/locations/{{location}}/backupVaults/{{backupVaultId}}/backups/{{backup}}`. + exactly_one_of: + - restore_parameters.0.source_backup + - restore_parameters.0.source_snapshot + immutable: true - !ruby/object:Api::Type::String name: 'kmsConfig' description: | @@ -405,17 +457,6 @@ properties: description: |- Set the day or days of the month to make a snapshot (1-31). Accepts a comma separated number of days. Defaults to '1'. default_value: '1' -# This is disabled until we have support for backup resource and can test it. -# - !ruby/object:Api::Type::NestedObject -# name: restoreParameters -# description: Specifies the source information to create a volume from. -# immutable: true -# properties: -# - !ruby/object:Api::Type::String -# name: 'sourceSnapshot' -# description: |- -# Full name of the snapshot resource. Format: `projects/{{project}}/locations/{{location}}/volumes/{{volume}}/snapshots/{{snapshot}}`. -# required: true virtual_fields: - !ruby/object:Api::Type::Enum name: 'deletion_policy' diff --git a/mmv1/products/networksecurity/SecurityProfileGroup.yaml b/mmv1/products/networksecurity/SecurityProfileGroup.yaml new file mode 100644 index 000000000000..4878950fe263 --- /dev/null +++ b/mmv1/products/networksecurity/SecurityProfileGroup.yaml @@ -0,0 +1,95 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the 'License'); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an 'AS IS' BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- !ruby/object:Api::Resource +name: 'SecurityProfileGroup' +base_url: '{{parent}}/locations/{{location}}securityProfileGroups' +create_url: '{{parent}}/locations/{{location}}/securityProfileGroups?securityProfileGroupId={{name}}' +self_link: '{{parent}}/locations/{{location}}/securityProfileGroups/{{name}}' +min_version: beta +update_verb: :PATCH +update_mask: true +description: | + A security profile group defines a container for security profiles. +references: + !ruby/object:Api::Resource::ReferenceLinks + api: 'https://cloud.google.com/firewall/docs/reference/network-security/rest/v1beta1/organizations.locations.securityProfileGroups' + guides: + 'Security profile groups overview': 'https://cloud.google.com/firewall/docs/about-security-profile-groups' + 'Create and manage security profile groups': 'https://cloud.google.com/firewall/docs/configure-security-profile-groups' +async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + base_url: '{{op_id}}' + include_project: true +import_format: ['{{%parent}}/locations/{{location}}/securityProfileGroups/{{name}}'] +examples: + - !ruby/object:Provider::Terraform::Examples + name: 'network_security_security_profile_group_basic' + primary_resource_id: 'default' + min_version: beta + test_env_vars: + org_id: :ORG_ID + vars: + security_profile_group_name: 'sec-profile-group' + security_profile_name: 'sec-profile' +parameters: + - !ruby/object:Api::Type::String + name: 'name' + required: true + immutable: true + url_param_only: true + description: | + The name of the security profile group resource. + - !ruby/object:Api::Type::String + name: 'location' + default_value: 'global' + immutable: true + description: | + The location of the security profile group. + The default value is `global`. + url_param_only: true + - !ruby/object:Api::Type::String + name: 'parent' + description: | + The name of the parent this security profile group belongs to. + Format: organizations/{organization_id}. + immutable: true + url_param_only: true +properties: + - !ruby/object:Api::Type::Time + name: 'createTime' + description: Time the security profile group was created in UTC. + output: true + - !ruby/object:Api::Type::Time + name: 'updateTime' + description: Time the security profile group was updated in UTC. + output: true + - !ruby/object:Api::Type::String + name: 'etag' + output: true + description: | + This checksum is computed by the server based on the value of other fields, + and may be sent on update and delete requests to ensure the client has an up-to-date + value before proceeding. + - !ruby/object:Api::Type::String + name: 'description' + description: | + An optional description of the profile. The Max length is 512 characters. + - !ruby/object:Api::Type::KeyValueLabels + name: 'labels' + description: | + A map of key/value label pairs to assign to the resource. + - !ruby/object:Api::Type::String + name: 'threatPreventionProfile' + description: | + Reference to a SecurityProfile with the threat prevention configuration for the SecurityProfileGroup. diff --git a/mmv1/products/notebooks/Instance.yaml b/mmv1/products/notebooks/Instance.yaml index b770e0489040..1e0ad5c49374 100644 --- a/mmv1/products/notebooks/Instance.yaml +++ b/mmv1/products/notebooks/Instance.yaml @@ -55,6 +55,17 @@ examples: region_override: 'us-west1-a' vars: instance_name: 'notebooks-instance' + - !ruby/object:Provider::Terraform::Examples + name: 'notebook_instance_basic_stopped' + primary_resource_id: 'instance' + primary_resource_name: "fmt.Sprintf(\"tf-test-notebooks-instance%s\", + context[\"\ + random_suffix\"])" + region_override: 'us-west1-a' + vars: + instance_name: 'notebooks-instance' + ignore_read_extra: + - 'desired_state' - !ruby/object:Provider::Terraform::Examples name: 'notebook_instance_basic_container' primary_resource_id: 'instance' @@ -87,9 +98,20 @@ examples: key_name: 'acctest.BootstrapKMSKeyInLocation(t, "global").CryptoKey.Name' test_env_vars: service_account: :SERVICE_ACCT +virtual_fields: + - !ruby/object:Api::Type::Enum + name: desired_state + description: | + Desired state of the Notebook Instance. Set this field to `ACTIVE` to start the Instance, and `STOPPED` to stop the Instance. + values: + - :ACTIVE + - :STOPPED + default_value: :ACTIVE custom_code: !ruby/object:Provider::Terraform::CustomCode constants: templates/terraform/constants/notebooks_instance.go update_encoder: templates/terraform/update_encoder/notebooks_instance.go + post_create: templates/terraform/post_create/notebooks_instance.go.erb + post_update: templates/terraform/post_update/notebooks_instance.go.erb state_upgraders: true schema_version: 1 parameters: diff --git a/mmv1/products/securityposture/posture.yaml b/mmv1/products/securityposture/posture.yaml index 993ada901fd7..cc1882c4d055 100644 --- a/mmv1/products/securityposture/posture.yaml +++ b/mmv1/products/securityposture/posture.yaml @@ -120,6 +120,7 @@ properties: output: true - !ruby/object:Api::Type::Array name: 'policySets' + required: true description: | List of policy sets for the posture. item_type: !ruby/object:Api::Type::NestedObject @@ -141,6 +142,7 @@ properties: name: 'policies' description: | List of security policy + required: true item_type: !ruby/object:Api::Type::NestedObject name: 'Policy' description: | @@ -231,7 +233,7 @@ properties: If `true`, then the policy is enforced. If `false`, then any configuration is acceptable. This field can be set only in policies for boolean constraints. - !ruby/object:Api::Type::NestedObject - name: 'expr' + name: 'condition' description: | Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. This page details the objects and attributes that are used to the build the CEL expressions for @@ -348,7 +350,7 @@ properties: If `true`, then the policy is enforced. If `false`, then any configuration is acceptable. This field can be set only in policies for boolean constraints. - !ruby/object:Api::Type::NestedObject - name: 'expr' + name: 'condition' description: | Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. This page details the objects and attributes that are used to the build the CEL expressions for diff --git a/mmv1/products/vertexai/FeatureOnlineStoreFeatureview.yaml b/mmv1/products/vertexai/FeatureOnlineStoreFeatureview.yaml index de319a0b83f4..ef350a823964 100644 --- a/mmv1/products/vertexai/FeatureOnlineStoreFeatureview.yaml +++ b/mmv1/products/vertexai/FeatureOnlineStoreFeatureview.yaml @@ -55,6 +55,11 @@ examples: primary_resource_id: 'featureview' vars: name: 'example_feature_view' + - !ruby/object:Provider::Terraform::Examples + name: 'vertex_ai_featureonlinestore_featureview_feature_registry' + primary_resource_id: 'featureview_featureregistry' + vars: + name: 'example_feature_view_feature_registry' - !ruby/object:Provider::Terraform::Examples name: 'vertex_ai_featureonlinestore_featureview_with_vector_search' primary_resource_id: 'featureview_vector_search' @@ -112,6 +117,9 @@ properties: name: 'bigQuerySource' description: | Configures how data is supposed to be extracted from a BigQuery source to be loaded onto the FeatureOnlineStore. + exactly_one_of: + - big_query_source + - feature_registry_source properties: - !ruby/object:Api::Type::String name: 'uri' @@ -124,10 +132,40 @@ properties: description: | Columns to construct entityId / row keys. Start by supporting 1 only. item_type: Api::Type::String + - !ruby/object:Api::Type::NestedObject + name: 'featureRegistrySource' + conflicts: + - vector_search_config + exactly_one_of: + - big_query_source + - feature_registry_source + description: | + Configures the features from a Feature Registry source that need to be loaded onto the FeatureOnlineStore. + properties: + - !ruby/object:Api::Type::Array + name: 'featureGroups' + required: true + description: | + List of features that need to be synced to Online Store. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'featureGroupId' + required: true + description: | + Identifier of the feature group. + - !ruby/object:Api::Type::Array + name: featureIds + required: true + description: | + Identifiers of features under the feature group. + item_type: Api::Type::String - !ruby/object:Api::Type::NestedObject name: 'vectorSearchConfig' description: | Configuration for vector search. It contains the required configurations to create an index from source data, so that approximate nearest neighbor (a.k.a ANN) algorithms search can be performed during online serving. + conflicts: + - feature_registry_source immutable: true min_version: beta properties: diff --git a/mmv1/products/workbench/Instance.yaml b/mmv1/products/workbench/Instance.yaml index b0b2b08e8d43..1c0faeab3958 100644 --- a/mmv1/products/workbench/Instance.yaml +++ b/mmv1/products/workbench/Instance.yaml @@ -55,7 +55,7 @@ examples: ignore_read_extra: - 'gce_setup.0.vm_image' - !ruby/object:Provider::Terraform::Examples - name: 'workbench_instance_labels' + name: 'workbench_instance_labels_stopped' primary_resource_id: 'instance' primary_resource_name: "fmt.Sprintf(\"tf-test-workbench-instance%s\", context[\"\ @@ -66,6 +66,8 @@ examples: network_name: 'wbi-test-default' test_env_vars: service_account: :SERVICE_ACCT + ignore_read_extra: + - 'desired_state' - !ruby/object:Provider::Terraform::Examples name: 'workbench_instance_full' primary_resource_id: 'instance' @@ -83,15 +85,20 @@ examples: service_account: :SERVICE_ACCT ignore_read_extra: - 'gce_setup.0.vm_image' - - 'gce_setup.0.boot_disk.0.disk_encryption' - 'gce_setup.0.boot_disk.0.disk_type' - - 'gce_setup.0.boot_disk.0.kms_key' - - 'gce_setup.0.data_disks.0.disk_encryption' - 'gce_setup.0.data_disks.0.disk_type' - - 'gce_setup.0.data_disks.0.kms_key' timeouts: !ruby/object:Api::Timeouts insert_minutes: 10 update_minutes: 20 +virtual_fields: + - !ruby/object:Api::Type::Enum + name: desired_state + description: | + Desired state of the Workbench Instance. Set this field to `ACTIVE` to start the Instance, and `STOPPED` to stop the Instance. + values: + - :ACTIVE + - :STOPPED + default_value: :ACTIVE custom_code: !ruby/object:Provider::Terraform::CustomCode constants: templates/terraform/constants/workbench_instance.go post_create: templates/terraform/post_create/workbench_instance.go.erb @@ -149,6 +156,36 @@ properties: use accelerators, make sure that your configuration has [enough vCPUs and memory to support the `machine_type` you have selected](https://cloud.google.com/compute/docs/gpus/#gpus-list). Currently supports only one accelerator configuration. + - !ruby/object:Api::Type::NestedObject + name: shieldedInstanceConfig + default_from_api: true + allow_empty_object: true + send_empty_value: true + description: | + A set of Shielded Instance options. See [Images using supported Shielded + VM features](https://cloud.google.com/compute/docs/instances/modifying-shielded-vm). + Not all combinations are valid. + properties: + - !ruby/object:Api::Type::Boolean + name: enableSecureBoot + description: | + Optional. Defines whether the VM instance has Secure Boot enabled. + Secure Boot helps ensure that the system only runs authentic software by verifying + the digital signature of all boot components, and halting the boot process + if signature verification fails. Disabled by default. + - !ruby/object:Api::Type::Boolean + name: enableVtpm + description: | + Optional. Defines whether the VM instance has the vTPM enabled. + Enabled by default. + - !ruby/object:Api::Type::Boolean + name: enableIntegrityMonitoring + description: | + Optional. Defines whether the VM instance has integrity monitoring + enabled. Enables monitoring and attestation of the boot integrity of the VM + instance. The attestation is performed against the integrity policy baseline. + This baseline is initially derived from the implicitly trusted boot image + when the VM instance is created. Enabled by default. - !ruby/object:Api::Type::Array name: serviceAccounts description: | @@ -231,7 +268,6 @@ properties: - !ruby/object:Api::Type::Enum name: diskEncryption default_from_api: true - custom_flatten: templates/terraform/custom_flatten/workbench_instance_boot_disk_encryption_flatten.go.erb values: - GMEK - CMEK @@ -242,11 +278,11 @@ properties: - !ruby/object:Api::Type::String name: kmsKey description: | - 'Optional. Input only. The KMS key used to encrypt the disks, only + 'Optional. The KMS key used to encrypt the disks, only applicable if disk_encryption is CMEK. Format: `projects/{project_id}/locations/{location}/keyRings/{key_ring_id}/cryptoKeys/{key_id}` Learn more about using your own encryption keys.' immutable: true - custom_flatten: templates/terraform/custom_flatten/workbench_instance_boot_disk_kms_flatten.go.erb + diff_suppress_func: WorkbenchInstanceKmsDiffSuppress - !ruby/object:Api::Type::Array name: dataDisks description: Data disks attached to the VM instance. Currently supports only one data disk. @@ -276,7 +312,6 @@ properties: - !ruby/object:Api::Type::Enum name: diskEncryption default_from_api: true - custom_flatten: templates/terraform/custom_flatten/workbench_instance_data_disk_encryption_flatten.go.erb values: - GMEK - CMEK @@ -287,11 +322,11 @@ properties: - !ruby/object:Api::Type::String name: kmsKey description: | - 'Optional. Input only. The KMS key used to encrypt the disks, + 'Optional. The KMS key used to encrypt the disks, only applicable if disk_encryption is CMEK. Format: `projects/{project_id}/locations/{location}/keyRings/{key_ring_id}/cryptoKeys/{key_id}` Learn more about using your own encryption keys.' immutable: true - custom_flatten: templates/terraform/custom_flatten/workbench_instance_data_disk_kms_flatten.go.erb + diff_suppress_func: WorkbenchInstanceKmsDiffSuppress description: | Optional. Data disks attached to the VM instance. Currently supports only one data disk. diff --git a/mmv1/templates/terraform/constants/notebooks_instance.go b/mmv1/templates/terraform/constants/notebooks_instance.go index 5d376547fe83..230d2aece282 100644 --- a/mmv1/templates/terraform/constants/notebooks_instance.go +++ b/mmv1/templates/terraform/constants/notebooks_instance.go @@ -33,3 +33,56 @@ func NotebooksInstanceKmsDiffSuppress(_, old, new string, _ *schema.ResourceData } return false } + +<% unless compiler == "terraformgoogleconversion-codegen" -%> +// waitForNotebooksInstanceActive waits for an Notebook instance to become "ACTIVE" +func waitForNotebooksInstanceActive(d *schema.ResourceData, config *transport_tpg.Config, timeout time.Duration) error { + return resource.Retry(timeout, func() *resource.RetryError { + if err := resourceNotebooksInstanceRead(d, config); err != nil { + return resource.NonRetryableError(err) + } + + name := d.Get("name").(string) + state := d.Get("state").(string) + if state == "ACTIVE" { + log.Printf("[DEBUG] Notebook Instance %q has state %q.", name, state) + return nil + } else { + return resource.RetryableError(fmt.Errorf("Notebook Instance %q has state %q. Waiting for ACTIVE state", name, state)) + } + + }) +} +<% end -%> + +func modifyNotebooksInstanceState(config *transport_tpg.Config, d *schema.ResourceData, project string, billingProject string, userAgent string, state string) (map[string]interface{}, error) { + url, err := tpgresource.ReplaceVars(d, config, "{{NotebooksBasePath}}projects/{{project}}/locations/{{location}}/instances/{{name}}:"+state) + if err != nil { + return nil, err + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return nil, fmt.Errorf("Unable to %q google_notebooks_instance %q: %s", state, d.Id(), err) + } + return res, nil +} + +<% unless compiler == "terraformgoogleconversion-codegen" -%> +func waitForNotebooksOperation(config *transport_tpg.Config, d *schema.ResourceData, project string, billingProject string, userAgent string, response map[string]interface{}) error { + var opRes map[string]interface{} + err := NotebooksOperationWaitTimeWithResponse( + config, response, &opRes, project, "Modifying Notebook Instance state", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + return nil +} +<% end -%> diff --git a/mmv1/templates/terraform/constants/workbench_instance.go b/mmv1/templates/terraform/constants/workbench_instance.go index 6ae6d20dca67..4462c0e5baa7 100644 --- a/mmv1/templates/terraform/constants/workbench_instance.go +++ b/mmv1/templates/terraform/constants/workbench_instance.go @@ -140,3 +140,42 @@ func waitForWorkbenchInstanceActive(d *schema.ResourceData, config *transport_tp }) } <% end -%> + +func modifyWorkbenchInstanceState(config *transport_tpg.Config, d *schema.ResourceData, project string, billingProject string, userAgent string, state string) (map[string]interface{}, error) { + url, err := tpgresource.ReplaceVars(d, config, "{{WorkbenchBasePath}}projects/{{project}}/locations/{{location}}/instances/{{name}}:"+state) + if err != nil { + return nil, err + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return nil, fmt.Errorf("Unable to %q google_workbench_instance %q: %s", state, d.Id(), err) + } + return res, nil +} + +func WorkbenchInstanceKmsDiffSuppress(_, old, new string, _ *schema.ResourceData) bool { + if strings.HasPrefix(old, new) { + return true + } + return false +} + +<% unless compiler == "terraformgoogleconversion-codegen" -%> +func waitForWorkbenchOperation(config *transport_tpg.Config, d *schema.ResourceData, project string, billingProject string, userAgent string, response map[string]interface{}) error { + var opRes map[string]interface{} + err := WorkbenchOperationWaitTimeWithResponse( + config, response, &opRes, project, "Modifying Workbench Instance state", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + return nil +} +<% end -%> diff --git a/mmv1/templates/terraform/custom_flatten/workbench_instance_boot_disk_encryption_flatten.go.erb b/mmv1/templates/terraform/custom_flatten/workbench_instance_boot_disk_encryption_flatten.go.erb deleted file mode 100644 index eb44d9657f33..000000000000 --- a/mmv1/templates/terraform/custom_flatten/workbench_instance_boot_disk_encryption_flatten.go.erb +++ /dev/null @@ -1,17 +0,0 @@ -<%# The license inside this block applies to this file. - # Copyright 2023 Google Inc. - # Licensed under the Apache License, Version 2.0 (the "License"); - # you may not use this file except in compliance with the License. - # You may obtain a copy of the License at - # - # http://www.apache.org/licenses/LICENSE-2.0 - # - # Unless required by applicable law or agreed to in writing, software - # distributed under the License is distributed on an "AS IS" BASIS, - # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - # See the License for the specific language governing permissions and - # limitations under the License. --%> -func flatten<%= prefix -%><%= titlelize_property(property) -%>(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - return d.Get("gce_setup.0.boot_disk.0.disk_encryption") -} diff --git a/mmv1/templates/terraform/custom_flatten/workbench_instance_boot_disk_kms_flatten.go.erb b/mmv1/templates/terraform/custom_flatten/workbench_instance_boot_disk_kms_flatten.go.erb deleted file mode 100644 index 70c27a7661e2..000000000000 --- a/mmv1/templates/terraform/custom_flatten/workbench_instance_boot_disk_kms_flatten.go.erb +++ /dev/null @@ -1,17 +0,0 @@ -<%# The license inside this block applies to this file. - # Copyright 2023 Google Inc. - # Licensed under the Apache License, Version 2.0 (the "License"); - # you may not use this file except in compliance with the License. - # You may obtain a copy of the License at - # - # http://www.apache.org/licenses/LICENSE-2.0 - # - # Unless required by applicable law or agreed to in writing, software - # distributed under the License is distributed on an "AS IS" BASIS, - # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - # See the License for the specific language governing permissions and - # limitations under the License. --%> -func flatten<%= prefix -%><%= titlelize_property(property) -%>(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - return d.Get("gce_setup.0.boot_disk.0.kms_key") -} diff --git a/mmv1/templates/terraform/custom_flatten/workbench_instance_data_disk_encryption_flatten.go.erb b/mmv1/templates/terraform/custom_flatten/workbench_instance_data_disk_encryption_flatten.go.erb deleted file mode 100644 index d93558a3db76..000000000000 --- a/mmv1/templates/terraform/custom_flatten/workbench_instance_data_disk_encryption_flatten.go.erb +++ /dev/null @@ -1,17 +0,0 @@ -<%# The license inside this block applies to this file. - # Copyright 2023 Google Inc. - # Licensed under the Apache License, Version 2.0 (the "License"); - # you may not use this file except in compliance with the License. - # You may obtain a copy of the License at - # - # http://www.apache.org/licenses/LICENSE-2.0 - # - # Unless required by applicable law or agreed to in writing, software - # distributed under the License is distributed on an "AS IS" BASIS, - # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - # See the License for the specific language governing permissions and - # limitations under the License. --%> -func flatten<%= prefix -%><%= titlelize_property(property) -%>(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - return d.Get("gce_setup.0.data_disks.0.disk_encryption") -} diff --git a/mmv1/templates/terraform/custom_flatten/workbench_instance_data_disk_kms_flatten.go.erb b/mmv1/templates/terraform/custom_flatten/workbench_instance_data_disk_kms_flatten.go.erb deleted file mode 100644 index b6a7b9649db0..000000000000 --- a/mmv1/templates/terraform/custom_flatten/workbench_instance_data_disk_kms_flatten.go.erb +++ /dev/null @@ -1,17 +0,0 @@ -<%# The license inside this block applies to this file. - # Copyright 2023 Google Inc. - # Licensed under the Apache License, Version 2.0 (the "License"); - # you may not use this file except in compliance with the License. - # You may obtain a copy of the License at - # - # http://www.apache.org/licenses/LICENSE-2.0 - # - # Unless required by applicable law or agreed to in writing, software - # distributed under the License is distributed on an "AS IS" BASIS, - # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - # See the License for the specific language governing permissions and - # limitations under the License. --%> -func flatten<%= prefix -%><%= titlelize_property(property) -%>(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - return d.Get("gce_setup.0.data_disks.0.kms_key") -} diff --git a/mmv1/templates/terraform/examples/clouddeploy_custom_target_type_basic.tf.erb b/mmv1/templates/terraform/examples/clouddeploy_custom_target_type_basic.tf.erb new file mode 100644 index 000000000000..77e7541f05fb --- /dev/null +++ b/mmv1/templates/terraform/examples/clouddeploy_custom_target_type_basic.tf.erb @@ -0,0 +1,17 @@ +resource "google_clouddeploy_custom_target_type" "<%= ctx[:primary_resource_id] %>" { + location = "us-central1" + name = "<%= ctx[:vars]['custom_target_type_name'] %>" + description = "My custom target type" + annotations = { + my_first_annotation = "example-annotation-1" + my_second_annotation = "example-annotation-2" + } + labels = { + my_first_label = "example-label-1" + my_second_label = "example-label-2" + } + custom_actions { + render_action = "renderAction" + deploy_action = "deployAction" + } +} \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/clouddeploy_custom_target_type_gcs_skaffold_modules.tf.erb b/mmv1/templates/terraform/examples/clouddeploy_custom_target_type_gcs_skaffold_modules.tf.erb new file mode 100644 index 000000000000..3dc84a768fdf --- /dev/null +++ b/mmv1/templates/terraform/examples/clouddeploy_custom_target_type_gcs_skaffold_modules.tf.erb @@ -0,0 +1,16 @@ +resource "google_clouddeploy_custom_target_type" "<%= ctx[:primary_resource_id] %>" { + location = "us-central1" + name = "<%= ctx[:vars]['custom_target_type_name'] %>" + description = "My custom target type" + custom_actions { + render_action = "renderAction" + deploy_action = "deployAction" + include_skaffold_modules { + configs = ["my-config"] + google_cloud_storage { + source = "gs://example-bucket/dir/configs/*" + path = "skaffold.yaml" + } + } + } +} \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/clouddeploy_custom_target_type_git_skaffold_modules.tf.erb b/mmv1/templates/terraform/examples/clouddeploy_custom_target_type_git_skaffold_modules.tf.erb new file mode 100644 index 000000000000..c5955e2258a0 --- /dev/null +++ b/mmv1/templates/terraform/examples/clouddeploy_custom_target_type_git_skaffold_modules.tf.erb @@ -0,0 +1,17 @@ +resource "google_clouddeploy_custom_target_type" "<%= ctx[:primary_resource_id] %>" { + location = "us-central1" + name = "<%= ctx[:vars]['custom_target_type_name'] %>" + description = "My custom target type" + custom_actions { + render_action = "renderAction" + deploy_action = "deployAction" + include_skaffold_modules { + configs = ["my-config"] + git { + repo = "http://github.com/example/example-repo.git" + path = "configs/skaffold.yaml" + ref = "main" + } + } + } +} \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/firebase_app_check_app_attest_config_full.tf.erb b/mmv1/templates/terraform/examples/firebase_app_check_app_attest_config_full.tf.erb new file mode 100644 index 000000000000..134ecacfc5f8 --- /dev/null +++ b/mmv1/templates/terraform/examples/firebase_app_check_app_attest_config_full.tf.erb @@ -0,0 +1,28 @@ +resource "google_firebase_apple_app" "default" { + project = "<%= ctx[:test_env_vars]['project_id'] %>" + display_name = "Apple app" + bundle_id = "<%= ctx[:vars]['bundle_id'] %>" + team_id = "<%= ctx[:vars]['team_id'] %>" +} + +# It takes a while for App Check to recognize the new app +# If your app already exists, you don't have to wait 30 seconds. +resource "time_sleep" "wait_30s" { + depends_on = [google_firebase_apple_app.default] + create_duration = "30s" +} + +resource "google_firebase_app_check_app_attest_config" "default" { + project = "<%= ctx[:test_env_vars]['project_id'] %>" + app_id = google_firebase_apple_app.default.app_id + token_ttl = "<%= ctx[:vars]['token_ttl'] %>" + + depends_on = [time_sleep.wait_30s] + + lifecycle { + precondition { + condition = google_firebase_apple_app.default.team_id != "" + error_message = "Provide a Team ID on the Apple App to use App Check" + } + } +} diff --git a/mmv1/templates/terraform/examples/firebase_app_check_app_attest_config_minimal.tf.erb b/mmv1/templates/terraform/examples/firebase_app_check_app_attest_config_minimal.tf.erb new file mode 100644 index 000000000000..a25a7f2d698f --- /dev/null +++ b/mmv1/templates/terraform/examples/firebase_app_check_app_attest_config_minimal.tf.erb @@ -0,0 +1,27 @@ +resource "google_firebase_apple_app" "default" { + project = "<%= ctx[:test_env_vars]['project_id'] %>" + display_name = "Apple app" + bundle_id = "<%= ctx[:vars]['bundle_id'] %>" + team_id = "<%= ctx[:vars]['team_id'] %>" +} + +# It takes a while for App Check to recognize the new app +# If your app already exists, you don't have to wait 30 seconds. +resource "time_sleep" "wait_30s" { + depends_on = [google_firebase_apple_app.default] + create_duration = "30s" +} + +resource "google_firebase_app_check_app_attest_config" "default" { + project = "<%= ctx[:test_env_vars]['project_id'] %>" + app_id = google_firebase_apple_app.default.app_id + + depends_on = [time_sleep.wait_30s] + + lifecycle { + precondition { + condition = google_firebase_apple_app.default.team_id != "" + error_message = "Provide a Team ID on the Apple App to use App Check" + } + } +} diff --git a/mmv1/templates/terraform/examples/firebase_app_check_play_integrity_config_full.tf.erb b/mmv1/templates/terraform/examples/firebase_app_check_play_integrity_config_full.tf.erb new file mode 100644 index 000000000000..78559763e319 --- /dev/null +++ b/mmv1/templates/terraform/examples/firebase_app_check_play_integrity_config_full.tf.erb @@ -0,0 +1,29 @@ +resource "google_firebase_android_app" "default" { + project = "<%= ctx[:test_env_vars]['project_id'] %>" + display_name = "Play Integrity app" + package_name = "<%= ctx[:vars]['package_name'] %>" + sha1_hashes = ["2145bdf698b8715039bd0e83f2069bed435ac21c"] + sha256_hashes = ["2145bdf698b8715039bd0e83f2069bed435ac21ca1b2c3d4e5f6123456789abc"] +} + +# It takes a while for App Check to recognize the new app +# If your app already exists, you don't have to wait 30 seconds. +resource "time_sleep" "wait_30s" { + depends_on = [google_firebase_android_app.default] + create_duration = "30s" +} + +resource "google_firebase_app_check_play_integrity_config" "default" { + project = "<%= ctx[:test_env_vars]['project_id'] %>" + app_id = google_firebase_android_app.default.app_id + token_ttl = "<%= ctx[:vars]['token_ttl'] %>" + + depends_on = [time_sleep.wait_30s] + + lifecycle { + precondition { + condition = length(google_firebase_android_app.default.sha256_hashes) > 0 + error_message = "Provide a SHA-256 certificate on the Android App to use App Check" + } + } +} diff --git a/mmv1/templates/terraform/examples/firebase_app_check_play_integrity_config_minimal.tf.erb b/mmv1/templates/terraform/examples/firebase_app_check_play_integrity_config_minimal.tf.erb new file mode 100644 index 000000000000..4aacde367414 --- /dev/null +++ b/mmv1/templates/terraform/examples/firebase_app_check_play_integrity_config_minimal.tf.erb @@ -0,0 +1,28 @@ +resource "google_firebase_android_app" "default" { + project = "<%= ctx[:test_env_vars]['project_id'] %>" + display_name = "Play Integrity app" + package_name = "<%= ctx[:vars]['package_name'] %>" + sha1_hashes = ["2145bdf698b8715039bd0e83f2069bed435ac21c"] + sha256_hashes = ["2145bdf698b8715039bd0e83f2069bed435ac21ca1b2c3d4e5f6123456789abc"] +} + +# It takes a while for App Check to recognize the new app +# If your app already exists, you don't have to wait 30 seconds. +resource "time_sleep" "wait_30s" { + depends_on = [google_firebase_android_app.default] + create_duration = "30s" +} + +resource "google_firebase_app_check_play_integrity_config" "default" { + project = "<%= ctx[:test_env_vars]['project_id'] %>" + app_id = google_firebase_android_app.default.app_id + + depends_on = [time_sleep.wait_30s] + + lifecycle { + precondition { + condition = length(google_firebase_android_app.default.sha256_hashes) > 0 + error_message = "Provide a SHA-256 certificate on the Android App to use App Check" + } + } +} diff --git a/mmv1/templates/terraform/examples/healthcare_hl7_v2_store_basic.tf.erb b/mmv1/templates/terraform/examples/healthcare_hl7_v2_store_basic.tf.erb index 42460e90eec3..a75931fd02ae 100644 --- a/mmv1/templates/terraform/examples/healthcare_hl7_v2_store_basic.tf.erb +++ b/mmv1/templates/terraform/examples/healthcare_hl7_v2_store_basic.tf.erb @@ -1,6 +1,7 @@ resource "google_healthcare_hl7_v2_store" "store" { name = "<%= ctx[:vars]['hl7_v2_store_name'] %>" dataset = google_healthcare_dataset.dataset.id + reject_duplicate_message = true notification_configs { pubsub_topic = google_pubsub_topic.topic.id diff --git a/mmv1/templates/terraform/examples/iap_app_engine_service.tf.erb b/mmv1/templates/terraform/examples/iap_app_engine_service.tf.erb index 6727559e4325..37f79449f02b 100644 --- a/mmv1/templates/terraform/examples/iap_app_engine_service.tf.erb +++ b/mmv1/templates/terraform/examples/iap_app_engine_service.tf.erb @@ -39,6 +39,10 @@ resource "google_app_engine_standard_app_version" "version" { runtime = "nodejs10" noop_on_destroy = true + // TODO: Removed basic scaling once automatic_scaling refresh behavior is fixed. + basic_scaling { + max_instances = 5 + } entrypoint { shell = "node ./app.js" } diff --git a/mmv1/templates/terraform/examples/looker_instance_custom_domain.tf.erb b/mmv1/templates/terraform/examples/looker_instance_custom_domain.tf.erb new file mode 100644 index 000000000000..1a69e027ad91 --- /dev/null +++ b/mmv1/templates/terraform/examples/looker_instance_custom_domain.tf.erb @@ -0,0 +1,12 @@ +resource "google_looker_instance" "<%= ctx[:primary_resource_id] %>" { + name = "<%= ctx[:vars]["instance_name"] %>" + platform_edition = "LOOKER_CORE_STANDARD" + region = "us-central1" + oauth_config { + client_id = "<%= ctx[:vars]["client_id"] %>" + client_secret = "<%= ctx[:vars]["client_secret"] %>" + } + custom_domain { + domain = "<%= ctx[:vars]["custom_domain"] %>.com" + } +} diff --git a/mmv1/templates/terraform/examples/volume_basic.tf.erb b/mmv1/templates/terraform/examples/netapp_volume_basic.tf.erb similarity index 100% rename from mmv1/templates/terraform/examples/volume_basic.tf.erb rename to mmv1/templates/terraform/examples/netapp_volume_basic.tf.erb diff --git a/mmv1/templates/terraform/examples/network_security_security_profile_group_basic.tf.erb b/mmv1/templates/terraform/examples/network_security_security_profile_group_basic.tf.erb new file mode 100644 index 000000000000..bfbca6c2c107 --- /dev/null +++ b/mmv1/templates/terraform/examples/network_security_security_profile_group_basic.tf.erb @@ -0,0 +1,19 @@ +resource "google_network_security_security_profile_group" "<%= ctx[:primary_resource_id] %>" { + provider = google-beta + name = "<%= ctx[:vars]['security_profile_group_name'] %>" + parent = "organizations/<%= ctx[:test_env_vars]['org_id'] %>" + description = "my description" + threat_prevention_profile = google_network_security_security_profile.security_profile.id + + labels = { + foo = "bar" + } +} + +resource "google_network_security_security_profile" "security_profile" { + provider = google-beta + name = "<%= ctx[:vars]['security_profile_name'] %>" + type = "THREAT_PREVENTION" + parent = "organizations/<%= ctx[:test_env_vars]['org_id'] %>" + location = "global" +} diff --git a/mmv1/templates/terraform/examples/notebook_instance_basic_stopped.tf.erb b/mmv1/templates/terraform/examples/notebook_instance_basic_stopped.tf.erb new file mode 100644 index 000000000000..41130bcca3ef --- /dev/null +++ b/mmv1/templates/terraform/examples/notebook_instance_basic_stopped.tf.erb @@ -0,0 +1,10 @@ +resource "google_notebooks_instance" "<%= ctx[:primary_resource_id] %>" { + name = "<%= ctx[:vars]["instance_name"] %>" + location = "us-west1-a" + machine_type = "e2-medium" + vm_image { + project = "deeplearning-platform-release" + image_family = "tf-latest-cpu" + } + desired_state = "STOPPED" +} diff --git a/mmv1/templates/terraform/examples/notebook_instance_full.tf.erb b/mmv1/templates/terraform/examples/notebook_instance_full.tf.erb index 0137529b0d1a..31f2af55ca3c 100644 --- a/mmv1/templates/terraform/examples/notebook_instance_full.tf.erb +++ b/mmv1/templates/terraform/examples/notebook_instance_full.tf.erb @@ -36,6 +36,7 @@ resource "google_notebooks_instance" "<%= ctx[:primary_resource_id] %>" { ] disk_encryption = "CMEK" kms_key = "<%= ctx[:vars]['key_name'] %>" + desired_state = "ACTIVE" } data "google_compute_network" "my_network" { diff --git a/mmv1/templates/terraform/examples/preference_set_basic.tf.erb b/mmv1/templates/terraform/examples/preference_set_basic.tf.erb new file mode 100644 index 000000000000..ebe5c10263fa --- /dev/null +++ b/mmv1/templates/terraform/examples/preference_set_basic.tf.erb @@ -0,0 +1,13 @@ +resource "google_migration_center_preference_set" "<%= ctx[:primary_resource_id] %>" { + location = "us-central1" + preference_set_id = "<%= ctx[:vars]['set_name'] %>" + description = "Terraform integration test description" + display_name = "Terraform integration test display" + virtual_machine_preferences { + vmware_engine_preferences { + cpu_overcommit_ratio = 1.5 + } + sizing_optimization_strategy = "SIZING_OPTIMIZATION_STRATEGY_SAME_AS_SOURCE" + target_product = "COMPUTE_MIGRATION_TARGET_PRODUCT_COMPUTE_ENGINE" + } +} diff --git a/mmv1/templates/terraform/examples/preference_set_full.tf.erb b/mmv1/templates/terraform/examples/preference_set_full.tf.erb new file mode 100644 index 000000000000..61acf00f5540 --- /dev/null +++ b/mmv1/templates/terraform/examples/preference_set_full.tf.erb @@ -0,0 +1,35 @@ +resource "google_migration_center_preference_set" "<%= ctx[:primary_resource_id] %>" { + location = "us-central1" + preference_set_id = "<%= ctx[:vars]['set_name'] %>" + description = "Terraform integration test description" + display_name = "Terraform integration test display" + virtual_machine_preferences { + vmware_engine_preferences { + cpu_overcommit_ratio = 1.5 + storage_deduplication_compression_ratio = 1.3 + commitment_plan = "ON_DEMAND" + } + sizing_optimization_strategy = "SIZING_OPTIMIZATION_STRATEGY_SAME_AS_SOURCE" + target_product = "COMPUTE_MIGRATION_TARGET_PRODUCT_COMPUTE_ENGINE" + commitment_plan = "COMMITMENT_PLAN_ONE_YEAR" + region_preferences { + preferred_regions = ["us-central1"] + } + sole_tenancy_preferences { + commitment_plan = "ON_DEMAND" + cpu_overcommit_ratio = 1.2 + host_maintenance_policy = "HOST_MAINTENANCE_POLICY_DEFAULT" + node_types { + node_name = "tf-test" + } + } + compute_engine_preferences { + license_type = "LICENSE_TYPE_BRING_YOUR_OWN_LICENSE" + machine_preferences { + allowed_machine_series { + code = "C3" + } + } + } + } +} diff --git a/mmv1/templates/terraform/examples/scc_event_threat_detection_custom_module.tf.erb b/mmv1/templates/terraform/examples/scc_event_threat_detection_custom_module.tf.erb index 34bb64ac066f..8f7c08b49f8c 100644 --- a/mmv1/templates/terraform/examples/scc_event_threat_detection_custom_module.tf.erb +++ b/mmv1/templates/terraform/examples/scc_event_threat_detection_custom_module.tf.erb @@ -4,7 +4,7 @@ resource "google_scc_event_threat_detection_custom_module" "<%= ctx[:primary_res enablement_state = "ENABLED" type = "<%= ctx[:vars]['type'] %>" description = "My Event Threat Detection Custom Module" - cofig = jsonencode({ + config = jsonencode({ "metadata": { "severity": "LOW", "description": "Flagged by Forcepoint as malicious", diff --git a/mmv1/templates/terraform/examples/securityposture_posture_basic.tf.erb b/mmv1/templates/terraform/examples/securityposture_posture_basic.tf.erb index 5d865222aa4e..9101034c48c2 100644 --- a/mmv1/templates/terraform/examples/securityposture_posture_basic.tf.erb +++ b/mmv1/templates/terraform/examples/securityposture_posture_basic.tf.erb @@ -14,6 +14,11 @@ resource "google_securityposture_posture" "<%= ctx[:primary_resource_id] %>"{ canned_constraint_id = "storage.uniformBucketLevelAccess" policy_rules { enforce = true + condition { + description = "condition description" + expression = "resource.matchTag('org_id/tag_key_short_name,'tag_value_short_name')" + title = "a CEL condition" + } } } } @@ -33,6 +38,11 @@ resource "google_securityposture_posture" "<%= ctx[:primary_resource_id] %>"{ } policy_rules { enforce = true + condition { + description = "condition description" + expression = "resource.matchTagId('tagKeys/key_id','tagValues/value_id')" + title = "a CEL condition" + } } } } diff --git a/mmv1/templates/terraform/examples/vertex_ai_featureonlinestore_featureview_feature_registry.tf.erb b/mmv1/templates/terraform/examples/vertex_ai_featureonlinestore_featureview_feature_registry.tf.erb new file mode 100644 index 000000000000..e82ea2a0c104 --- /dev/null +++ b/mmv1/templates/terraform/examples/vertex_ai_featureonlinestore_featureview_feature_registry.tf.erb @@ -0,0 +1,93 @@ +resource "google_vertex_ai_feature_online_store" "featureonlinestore" { + name = "<%= ctx[:vars]['name'] %>" + labels = { + foo = "bar" + } + region = "us-central1" + bigtable { + auto_scaling { + min_node_count = 1 + max_node_count = 2 + cpu_utilization_target = 80 + } + } +} + +resource "google_bigquery_dataset" "sample_dataset" { + dataset_id = "<%= ctx[:vars]['name'] %>" + friendly_name = "test" + description = "This is a test description" + location = "US" +} + +resource "google_bigquery_table" "sample_table" { + deletion_protection = false + dataset_id = google_bigquery_dataset.sample_dataset.dataset_id + table_id = "<%= ctx[:vars]['name'] %>" + + schema = <", + "type": "STRING", + "mode": "NULLABLE" + }, + { + "name": "feature_timestamp", + "type": "TIMESTAMP", + "mode": "NULLABLE" + } +] +EOF +} + +resource "google_vertex_ai_feature_group" "sample_feature_group" { + name = "<%= ctx[:vars]['name'] %>" + description = "A sample feature group" + region = "us-central1" + labels = { + label-one = "value-one" + } + big_query { + big_query_source { + # The source table must have a column named 'feature_timestamp' of type TIMESTAMP. + input_uri = "bq://${google_bigquery_table.sample_table.project}.${google_bigquery_table.sample_table.dataset_id}.${google_bigquery_table.sample_table.table_id}" + } + entity_id_columns = ["feature_id"] + } +} + + + +resource "google_vertex_ai_feature_group_feature" "sample_feature" { + name = "<%= ctx[:vars]['name'] %>" + region = "us-central1" + feature_group = google_vertex_ai_feature_group.sample_feature_group.name + description = "A sample feature" + labels = { + label-one = "value-one" + } +} + + +resource "google_vertex_ai_feature_online_store_featureview" "<%= ctx[:primary_resource_id] %>" { + name = "<%= ctx[:vars]['name'] %>" + region = "us-central1" + feature_online_store = google_vertex_ai_feature_online_store.featureonlinestore.name + sync_config { + cron = "0 0 * * *" + } + feature_registry_source { + + feature_groups { + feature_group_id = google_vertex_ai_feature_group.sample_feature_group.name + feature_ids = [google_vertex_ai_feature_group_feature.sample_feature.name] + } + } +} + diff --git a/mmv1/templates/terraform/examples/workbench_instance_full.tf.erb b/mmv1/templates/terraform/examples/workbench_instance_full.tf.erb index 2ec579cba8dc..dd1122901e18 100644 --- a/mmv1/templates/terraform/examples/workbench_instance_full.tf.erb +++ b/mmv1/templates/terraform/examples/workbench_instance_full.tf.erb @@ -21,6 +21,12 @@ resource "google_workbench_instance" "<%= ctx[:primary_resource_id] %>" { core_count = 1 } + shielded_instance_config { + enable_secure_boot = true + enable_vtpm = true + enable_integrity_monitoring = true + } + disable_public_ip = false service_accounts { @@ -30,14 +36,14 @@ resource "google_workbench_instance" "<%= ctx[:primary_resource_id] %>" { boot_disk { disk_size_gb = 310 disk_type = "PD_SSD" - disk_encryption = "GMEK" + disk_encryption = "CMEK" kms_key = "<%= ctx[:vars]['key_name'] %>" } data_disks { disk_size_gb = 330 disk_type = "PD_SSD" - disk_encryption = "GMEK" + disk_encryption = "CMEK" kms_key = "<%= ctx[:vars]['key_name'] %>" } @@ -65,4 +71,6 @@ resource "google_workbench_instance" "<%= ctx[:primary_resource_id] %>" { k = "val" } + desired_state = "ACTIVE" + } diff --git a/mmv1/templates/terraform/examples/workbench_instance_labels.tf.erb b/mmv1/templates/terraform/examples/workbench_instance_labels_stopped.tf.erb similarity index 72% rename from mmv1/templates/terraform/examples/workbench_instance_labels.tf.erb rename to mmv1/templates/terraform/examples/workbench_instance_labels_stopped.tf.erb index 3a0d9afda853..151053d0ef49 100644 --- a/mmv1/templates/terraform/examples/workbench_instance_labels.tf.erb +++ b/mmv1/templates/terraform/examples/workbench_instance_labels_stopped.tf.erb @@ -5,6 +5,12 @@ resource "google_workbench_instance" "<%= ctx[:primary_resource_id] %>" { gce_setup { machine_type = "e2-standard-4" + shielded_instance_config { + enable_secure_boot = false + enable_vtpm = false + enable_integrity_monitoring = false + } + service_accounts { email = "<%= ctx[:test_env_vars]["service_account"] %>" } @@ -21,4 +27,6 @@ resource "google_workbench_instance" "<%= ctx[:primary_resource_id] %>" { k = "val" } + desired_state = "STOPPED" + } diff --git a/mmv1/templates/terraform/post_create/notebooks_instance.go.erb b/mmv1/templates/terraform/post_create/notebooks_instance.go.erb new file mode 100644 index 000000000000..a70d7cb313a5 --- /dev/null +++ b/mmv1/templates/terraform/post_create/notebooks_instance.go.erb @@ -0,0 +1,13 @@ +if err := waitForNotebooksInstanceActive(d, config, d.Timeout(schema.TimeoutCreate) - time.Minute); err != nil { + return fmt.Errorf("Notebook instance %q did not reach ACTIVE state: %q", d.Get("name").(string), err) +} + +if p, ok := d.GetOk("desired_state"); ok && p.(string) == "STOPPED" { + dRes, err := modifyNotebooksInstanceState(config, d, project, billingProject, userAgent, "stop") + if err != nil { + return err + } + if err := waitForNotebooksOperation(config, d, project, billingProject, userAgent, dRes); err != nil { + return fmt.Errorf("Error stopping Notebook Instance: %s", err) + } +} diff --git a/mmv1/templates/terraform/post_create/workbench_instance.go.erb b/mmv1/templates/terraform/post_create/workbench_instance.go.erb index e3a968e4a6b2..25128f95f821 100644 --- a/mmv1/templates/terraform/post_create/workbench_instance.go.erb +++ b/mmv1/templates/terraform/post_create/workbench_instance.go.erb @@ -1,3 +1,13 @@ if err := waitForWorkbenchInstanceActive(d, config, d.Timeout(schema.TimeoutCreate) - time.Minute); err != nil { return fmt.Errorf("Workbench instance %q did not reach ACTIVE state: %q", d.Get("name").(string), err) } + +if p, ok := d.GetOk("desired_state"); ok && p.(string) == "STOPPED" { + dRes, err := modifyWorkbenchInstanceState(config, d, project, billingProject, userAgent, "stop") + if err != nil { + return err + } + if err := waitForWorkbenchOperation(config, d, project, billingProject, userAgent, dRes); err != nil { + return fmt.Errorf("Error stopping Workbench Instance: %s", err) + } +} diff --git a/mmv1/templates/terraform/post_update/notebooks_instance.go.erb b/mmv1/templates/terraform/post_update/notebooks_instance.go.erb new file mode 100644 index 000000000000..819cff2d1dd2 --- /dev/null +++ b/mmv1/templates/terraform/post_update/notebooks_instance.go.erb @@ -0,0 +1,21 @@ +name := d.Get("name").(string) +state := d.Get("state").(string) +desired_state := d.Get("desired_state").(string) + +if state != desired_state { + verb := "start" + if desired_state == "STOPPED" { + verb = "stop" + } + pRes, err := modifyNotebooksInstanceState(config, d, project, billingProject, userAgent, verb) + if err != nil { + return err + } + + if err := waitForNotebooksOperation(config, d, project, billingProject, userAgent, pRes); err != nil { + return fmt.Errorf("Error waiting to modify Notebook Instance state: %s", err) + } + +} else { + log.Printf("[DEBUG] Notebook Instance %q has state %q.", name, state) +} diff --git a/mmv1/templates/terraform/post_update/workbench_instance.go.erb b/mmv1/templates/terraform/post_update/workbench_instance.go.erb index fa994a3aacb4..cd216f018387 100644 --- a/mmv1/templates/terraform/post_update/workbench_instance.go.erb +++ b/mmv1/templates/terraform/post_update/workbench_instance.go.erb @@ -1,33 +1,18 @@ state := d.Get("state").(string) +desired_state := d.Get("desired_state").(string) -if state != "ACTIVE" { - startURL, err := tpgresource.ReplaceVars(d, config, "{{WorkbenchBasePath}}projects/{{project}}/locations/{{location}}/instances/{{name}}:start") - if err != nil { - return err +if state != desired_state { + verb := "start" + if desired_state == "STOPPED" { + verb = "stop" } - - log.Printf("[DEBUG] Starting Workbench Instance: %q", name) - - emptyReqBody := make(map[string]interface{}) - - pRes, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ - Config: config, - Method: "POST", - Project: billingProject, - RawURL: startURL, - UserAgent: userAgent, - Body: emptyReqBody, - }) + pRes, err := modifyWorkbenchInstanceState(config, d, project, billingProject, userAgent, verb) if err != nil { - return fmt.Errorf("Error Starting Workbench Instance: %s", err) + return err } - var opResp map[string]interface{} - err = WorkbenchOperationWaitTimeWithResponse( - config, pRes, &opResp, project, "Starting Workbench Instance", userAgent, - d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("Error waiting to start Workbench Instance: %s", err) + if err := waitForWorkbenchOperation(config, d, project, billingProject, userAgent, pRes); err != nil { + return fmt.Errorf("Error waiting to modify Workbench Instance state: %s", err) } } else { diff --git a/mmv1/templates/terraform/pre_update/workbench_instance.go.erb b/mmv1/templates/terraform/pre_update/workbench_instance.go.erb index 2b33205942a7..847a0bcd1311 100644 --- a/mmv1/templates/terraform/pre_update/workbench_instance.go.erb +++ b/mmv1/templates/terraform/pre_update/workbench_instance.go.erb @@ -1,34 +1,15 @@ name := d.Get("name").(string) -if d.HasChange("gce_setup.0.machine_type") || d.HasChange("gce_setup.0.accelerator_configs") { +if d.HasChange("gce_setup.0.machine_type") || d.HasChange("gce_setup.0.accelerator_configs") || d.HasChange("gce_setup.0.shielded_instance_config"){ state := d.Get("state").(string) - if state != "STOPPED" { - stopURL, err := tpgresource.ReplaceVars(d, config, "{{WorkbenchBasePath}}projects/{{project}}/locations/{{location}}/instances/{{name}}:stop") - if err != nil { - return err - } - - log.Printf("[DEBUG] Stopping Workbench Instance: %q", name) - - emptyReqBody := make(map[string]interface{}) - dRes, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ - Config: config, - Method: "POST", - Project: billingProject, - RawURL: stopURL, - UserAgent: userAgent, - Body: emptyReqBody, - }) + if state != "STOPPED" { + dRes, err := modifyWorkbenchInstanceState(config, d, project, billingProject, userAgent, "stop") if err != nil { - return fmt.Errorf("Error Stopping Workbench Instance: %s", err) + return err } - var opRes map[string]interface{} - err = WorkbenchOperationWaitTimeWithResponse( - config, dRes, &opRes, project, "Stopping Workbench Instance", userAgent, - d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("Error waiting to stop Workbench Instance: %s", err) + if err := waitForWorkbenchOperation(config, d, project, billingProject, userAgent, dRes); err != nil { + return fmt.Errorf("Error stopping Workbench Instance: %s", err) } } else { @@ -47,6 +28,9 @@ if d.HasChange("gce_setup.0.machine_type") { if d.HasChange("gce_setup.0.accelerator_configs") { newUpdateMask = append(newUpdateMask, "gce_setup.accelerator_configs") } +if d.HasChange("gce_setup.0.shielded_instance_config") { + newUpdateMask = append(newUpdateMask, "gce_setup.shielded_instance_config") +} if d.HasChange("gce_setup.0.metadata") { newUpdateMask = append(newUpdateMask, "gceSetup.metadata") } diff --git a/mmv1/third_party/cai2hcl/converter_map.go b/mmv1/third_party/cai2hcl/converter_map.go index ecd304e54eb6..4990368e4f10 100644 --- a/mmv1/third_party/cai2hcl/converter_map.go +++ b/mmv1/third_party/cai2hcl/converter_map.go @@ -15,7 +15,8 @@ var AssetTypeToConverter = map[string]string{ compute.ComputeInstanceAssetType: "google_compute_instance", compute.ComputeForwardingRuleAssetType: "google_compute_forwarding_rule", - compute.ComputeBackendServiceAssetType: "google_compute_backend_service", + compute.ComputeBackendServiceAssetType: "google_compute_backend_service", + compute.ComputeRegionBackendServiceAssetType: "google_compute_region_backend_service", resourcemanager.ProjectAssetType: "google_project", resourcemanager.ProjectBillingAssetType: "google_project", @@ -25,7 +26,9 @@ var AssetTypeToConverter = map[string]string{ var ConverterMap = map[string]common.Converter{ "google_compute_instance": compute.NewComputeInstanceConverter(provider), "google_compute_forwarding_rule": compute.NewComputeForwardingRuleConverter(provider), - "google_compute_backend_service": compute.NewComputeBackendServiceConverter(provider), + + "google_compute_backend_service": compute.NewComputeBackendServiceConverter(provider), + "google_compute_region_backend_service": compute.NewComputeRegionBackendServiceConverter(provider), "google_project": resourcemanager.NewProjectConverter(provider), } diff --git a/mmv1/third_party/cai2hcl/services/compute/compute_region_backend_service.go b/mmv1/third_party/cai2hcl/services/compute/compute_region_backend_service.go new file mode 100644 index 000000000000..6112e1773412 --- /dev/null +++ b/mmv1/third_party/cai2hcl/services/compute/compute_region_backend_service.go @@ -0,0 +1,1398 @@ +package compute + +import ( + "context" + "fmt" + "reflect" + + "github.com/GoogleCloudPlatform/terraform-google-conversion/v5/cai2hcl/common" + "github.com/GoogleCloudPlatform/terraform-google-conversion/v5/caiasset" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google-beta/google-beta/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google-beta/google-beta/transport" + "google.golang.org/api/googleapi" +) + +// Fields in "backends" that are not allowed for non-managed backend services +// (loadBalancingScheme) - the API returns an error if they are set at all +// in the request. +var backendServiceOnlyManagedFieldNames = []string{ + "capacity_scaler", + "max_connections", + "max_connections_per_instance", + "max_connections_per_endpoint", + "max_rate", + "max_rate_per_instance", + "max_rate_per_endpoint", + "max_utilization", +} + +// validateManagedBackendServiceBackends ensures capacity_scaler is set for each backend in a managed +// backend service. To prevent a permadiff, we decided to override the API behavior and require the +// capacity_scaler value in this case. +// +// The API: +// - requires the sum of the backends' capacity_scalers be > 0 +// - defaults to 1 if capacity_scaler is omitted from the request +// +// However, the schema.Set Hash function defaults to 0 if not given, which we chose because it's the default +// float and because non-managed backends can't have the value set, so there will be a permadiff for a +// situational non-zero default returned from the API. We can't diff suppress or customdiff a +// field inside a set object in ResourceDiff, since the value also determines the hash for that set object. +func validateManagedBackendServiceBackends(backends []interface{}, d *schema.ResourceDiff) error { + sum := 0.0 + + for _, b := range backends { + if b == nil { + continue + } + backend := b.(map[string]interface{}) + if v, ok := backend["capacity_scaler"]; ok && v != nil { + sum += v.(float64) + } else { + return fmt.Errorf("capacity_scaler is required for each backend in managed backend service") + } + } + if sum == 0.0 { + return fmt.Errorf("managed backend service must have at least one non-zero capacity_scaler for backends") + } + return nil +} + +// If INTERNAL or EXTERNAL, make sure the user did not provide values for any of the fields that cannot be sent. +// We ignore these values regardless when sent to the API, but this adds plan-time validation if a +// user sets the value to non-zero. We can't validate for empty but set because +// of how the SDK handles set objects (on any read, nil fields will get set to empty values) +func validateNonManagedBackendServiceBackends(backends []interface{}, d *schema.ResourceDiff) error { + for _, b := range backends { + if b == nil { + continue + } + backend := b.(map[string]interface{}) + for _, fn := range backendServiceOnlyManagedFieldNames { + if v, ok := backend[fn]; ok && !tpgresource.IsEmptyValue(reflect.ValueOf(v)) { + return fmt.Errorf("%q cannot be set for non-managed backend service, found value %v", fn, v) + } + } + } + return nil +} + +func customDiffRegionBackendService(_ context.Context, d *schema.ResourceDiff, meta interface{}) error { + v, ok := d.GetOk("backend") + if !ok { + return nil + } + if v == nil { + return nil + } + + backends := v.(*schema.Set).List() + if len(backends) == 0 { + return nil + } + + switch d.Get("load_balancing_scheme").(string) { + case "INTERNAL", "EXTERNAL": + return validateNonManagedBackendServiceBackends(backends, d) + case "INTERNAL_MANAGED": + return nil + default: + return validateManagedBackendServiceBackends(backends, d) + } +} + +// ComputeRegionBackendServiceAssetType is the CAI asset type name. +const ComputeRegionBackendServiceAssetType string = "compute.googleapis.com/RegionBackendService" + +// ComputeRegionBackendServiceSchemaName is a TF resource schema name. +const ComputeRegionBackendServiceSchemaName string = "google_compute_region_backend_service" + +type ComputeRegionBackendServiceConverter struct { + name string + schema map[string]*schema.Schema +} + +// NewComputeRegionBackendServiceConverter returns an HCL converter for compute backend service. +func NewComputeRegionBackendServiceConverter(provider *schema.Provider) common.Converter { + schema := provider.ResourcesMap[ComputeRegionBackendServiceSchemaName].Schema + + return &ComputeRegionBackendServiceConverter{ + name: ComputeRegionBackendServiceSchemaName, + schema: schema, + } +} + +func (c *ComputeRegionBackendServiceConverter) Convert(assets []*caiasset.Asset) ([]*common.HCLResourceBlock, error) { + var blocks []*common.HCLResourceBlock + config := common.NewConfig() + + for _, asset := range assets { + if asset == nil { + continue + } + if asset.Resource != nil && asset.Resource.Data != nil { + block, err := c.convertResourceData(asset, config) + if err != nil { + return nil, err + } + blocks = append(blocks, block) + } + } + return blocks, nil +} + +func (c *ComputeRegionBackendServiceConverter) convertResourceData(asset *caiasset.Asset, config *transport_tpg.Config) (*common.HCLResourceBlock, error) { + if asset == nil || asset.Resource == nil || asset.Resource.Data == nil { + return nil, fmt.Errorf("asset resource data is nil") + } + + assetResourceData := asset.Resource.Data + + hcl, _ := resourceComputeRegionBackendServiceRead(assetResourceData, config) + + ctyVal, err := common.MapToCtyValWithSchema(hcl, c.schema) + if err != nil { + return nil, err + } + + resourceName := assetResourceData["name"].(string) + + return &common.HCLResourceBlock{ + Labels: []string{c.name, resourceName}, + Value: ctyVal, + }, nil +} + +func resourceComputeRegionBackendServiceRead(resource map[string]interface{}, config *transport_tpg.Config) (map[string]interface{}, error) { + result := make(map[string]interface{}) + var resource_data *schema.ResourceData = nil + + result["affinity_cookie_ttl_sec"] = flattenComputeRegionBackendServiceAffinityCookieTtlSec(resource["affinityCookieTtlSec"], resource_data, config) + result["backend"] = flattenComputeRegionBackendServiceBackend(resource["backends"], resource_data, config) + result["circuit_breakers"] = flattenComputeRegionBackendServiceCircuitBreakers(resource["circuitBreakers"], resource_data, config) + result["consistent_hash"] = flattenComputeRegionBackendServiceConsistentHash(resource["consistentHash"], resource_data, config) + result["cdn_policy"] = flattenComputeRegionBackendServiceCdnPolicy(resource["cdnPolicy"], resource_data, config) + if flattenedProp := flattenComputeRegionBackendServiceConnectionDraining(resource["connectionDraining"], resource_data, config); flattenedProp != nil { + if gerr, ok := flattenedProp.(*googleapi.Error); ok { + return nil, fmt.Errorf("Error reading RegionBackendService: %s", gerr) + } + casted := flattenedProp.([]interface{})[0] + if casted != nil { + for k, v := range casted.(map[string]interface{}) { + result[k] = v + } + } + } + result["creation_timestamp"] = flattenComputeRegionBackendServiceCreationTimestamp(resource["creationTimestamp"], resource_data, config) + result["description"] = flattenComputeRegionBackendServiceDescription(resource["description"], resource_data, config) + result["failover_policy"] = flattenComputeRegionBackendServiceFailoverPolicy(resource["failoverPolicy"], resource_data, config) + result["enable_cdn"] = flattenComputeRegionBackendServiceEnableCDN(resource["enableCDN"], resource_data, config) + result["fingerprint"] = flattenComputeRegionBackendServiceFingerprint(resource["fingerprint"], resource_data, config) + result["health_checks"] = flattenComputeRegionBackendServiceHealthChecks(resource["healthChecks"], resource_data, config) + result["iap"] = flattenComputeRegionBackendServiceIap(resource["iap"], resource_data, config) + result["load_balancing_scheme"] = flattenComputeRegionBackendServiceLoadBalancingScheme(resource["loadBalancingScheme"], resource_data, config) + result["locality_lb_policy"] = flattenComputeRegionBackendServiceLocalityLbPolicy(resource["localityLbPolicy"], resource_data, config) + result["name"] = flattenComputeRegionBackendServiceName(resource["name"], resource_data, config) + result["outlier_detection"] = flattenComputeRegionBackendServiceOutlierDetection(resource["outlierDetection"], resource_data, config) + result["port_name"] = flattenComputeRegionBackendServicePortName(resource["portName"], resource_data, config) + result["protocol"] = flattenComputeRegionBackendServiceProtocol(resource["protocol"], resource_data, config) + result["security_policy"] = flattenComputeRegionBackendServiceSecurityPolicy(resource["securityPolicy"], resource_data, config) + result["session_affinity"] = flattenComputeRegionBackendServiceSessionAffinity(resource["sessionAffinity"], resource_data, config) + result["connection_tracking_policy"] = flattenComputeRegionBackendServiceConnectionTrackingPolicy(resource["connectionTrackingPolicy"], resource_data, config) + result["timeout_sec"] = flattenComputeRegionBackendServiceTimeoutSec(resource["timeoutSec"], resource_data, config) + result["log_config"] = flattenComputeRegionBackendServiceLogConfig(resource["logConfig"], resource_data, config) + result["network"] = flattenComputeRegionBackendServiceNetwork(resource["network"], resource_data, config) + result["subsetting"] = flattenComputeRegionBackendServiceSubsetting(resource["subsetting"], resource_data, config) + result["region"] = flattenComputeRegionBackendServiceRegion(resource["region"], resource_data, config) + + return result, nil +} + +func flattenComputeRegionBackendServiceAffinityCookieTtlSec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeRegionBackendServiceBackend(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := schema.NewSet(resourceGoogleComputeBackendServiceBackendHash, []interface{}{}) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed.Add(map[string]interface{}{ + "balancing_mode": flattenComputeRegionBackendServiceBackendBalancingMode(original["balancingMode"], d, config), + "capacity_scaler": flattenComputeRegionBackendServiceBackendCapacityScaler(original["capacityScaler"], d, config), + "description": flattenComputeRegionBackendServiceBackendDescription(original["description"], d, config), + "failover": flattenComputeRegionBackendServiceBackendFailover(original["failover"], d, config), + "group": flattenComputeRegionBackendServiceBackendGroup(original["group"], d, config), + "max_connections": flattenComputeRegionBackendServiceBackendMaxConnections(original["maxConnections"], d, config), + "max_connections_per_instance": flattenComputeRegionBackendServiceBackendMaxConnectionsPerInstance(original["maxConnectionsPerInstance"], d, config), + "max_connections_per_endpoint": flattenComputeRegionBackendServiceBackendMaxConnectionsPerEndpoint(original["maxConnectionsPerEndpoint"], d, config), + "max_rate": flattenComputeRegionBackendServiceBackendMaxRate(original["maxRate"], d, config), + "max_rate_per_instance": flattenComputeRegionBackendServiceBackendMaxRatePerInstance(original["maxRatePerInstance"], d, config), + "max_rate_per_endpoint": flattenComputeRegionBackendServiceBackendMaxRatePerEndpoint(original["maxRatePerEndpoint"], d, config), + "max_utilization": flattenComputeRegionBackendServiceBackendMaxUtilization(original["maxUtilization"], d, config), + }) + } + return transformed +} +func flattenComputeRegionBackendServiceBackendBalancingMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionBackendServiceBackendCapacityScaler(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionBackendServiceBackendDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionBackendServiceBackendFailover(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionBackendServiceBackendGroup(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenComputeRegionBackendServiceBackendMaxConnections(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeRegionBackendServiceBackendMaxConnectionsPerInstance(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeRegionBackendServiceBackendMaxConnectionsPerEndpoint(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeRegionBackendServiceBackendMaxRate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeRegionBackendServiceBackendMaxRatePerInstance(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionBackendServiceBackendMaxRatePerEndpoint(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionBackendServiceBackendMaxUtilization(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionBackendServiceCircuitBreakers(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["connect_timeout"] = + flattenComputeRegionBackendServiceCircuitBreakersConnectTimeout(original["connectTimeout"], d, config) + transformed["max_requests_per_connection"] = + flattenComputeRegionBackendServiceCircuitBreakersMaxRequestsPerConnection(original["maxRequestsPerConnection"], d, config) + transformed["max_connections"] = + flattenComputeRegionBackendServiceCircuitBreakersMaxConnections(original["maxConnections"], d, config) + transformed["max_pending_requests"] = + flattenComputeRegionBackendServiceCircuitBreakersMaxPendingRequests(original["maxPendingRequests"], d, config) + transformed["max_requests"] = + flattenComputeRegionBackendServiceCircuitBreakersMaxRequests(original["maxRequests"], d, config) + transformed["max_retries"] = + flattenComputeRegionBackendServiceCircuitBreakersMaxRetries(original["maxRetries"], d, config) + return []interface{}{transformed} +} +func flattenComputeRegionBackendServiceCircuitBreakersConnectTimeout(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["seconds"] = + flattenComputeRegionBackendServiceCircuitBreakersConnectTimeoutSeconds(original["seconds"], d, config) + transformed["nanos"] = + flattenComputeRegionBackendServiceCircuitBreakersConnectTimeoutNanos(original["nanos"], d, config) + return []interface{}{transformed} +} +func flattenComputeRegionBackendServiceCircuitBreakersConnectTimeoutSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeRegionBackendServiceCircuitBreakersConnectTimeoutNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeRegionBackendServiceCircuitBreakersMaxRequestsPerConnection(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeRegionBackendServiceCircuitBreakersMaxConnections(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeRegionBackendServiceCircuitBreakersMaxPendingRequests(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeRegionBackendServiceCircuitBreakersMaxRequests(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeRegionBackendServiceCircuitBreakersMaxRetries(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeRegionBackendServiceConsistentHash(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["http_cookie"] = + flattenComputeRegionBackendServiceConsistentHashHttpCookie(original["httpCookie"], d, config) + transformed["http_header_name"] = + flattenComputeRegionBackendServiceConsistentHashHttpHeaderName(original["httpHeaderName"], d, config) + transformed["minimum_ring_size"] = + flattenComputeRegionBackendServiceConsistentHashMinimumRingSize(original["minimumRingSize"], d, config) + return []interface{}{transformed} +} +func flattenComputeRegionBackendServiceConsistentHashHttpCookie(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["ttl"] = + flattenComputeRegionBackendServiceConsistentHashHttpCookieTtl(original["ttl"], d, config) + transformed["name"] = + flattenComputeRegionBackendServiceConsistentHashHttpCookieName(original["name"], d, config) + transformed["path"] = + flattenComputeRegionBackendServiceConsistentHashHttpCookiePath(original["path"], d, config) + return []interface{}{transformed} +} +func flattenComputeRegionBackendServiceConsistentHashHttpCookieTtl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["seconds"] = + flattenComputeRegionBackendServiceConsistentHashHttpCookieTtlSeconds(original["seconds"], d, config) + transformed["nanos"] = + flattenComputeRegionBackendServiceConsistentHashHttpCookieTtlNanos(original["nanos"], d, config) + return []interface{}{transformed} +} +func flattenComputeRegionBackendServiceConsistentHashHttpCookieTtlSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeRegionBackendServiceConsistentHashHttpCookieTtlNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeRegionBackendServiceConsistentHashHttpCookieName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionBackendServiceConsistentHashHttpCookiePath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionBackendServiceConsistentHashHttpHeaderName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionBackendServiceConsistentHashMinimumRingSize(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeRegionBackendServiceCdnPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["cache_key_policy"] = + flattenComputeRegionBackendServiceCdnPolicyCacheKeyPolicy(original["cacheKeyPolicy"], d, config) + transformed["signed_url_cache_max_age_sec"] = + flattenComputeRegionBackendServiceCdnPolicySignedUrlCacheMaxAgeSec(original["signedUrlCacheMaxAgeSec"], d, config) + transformed["default_ttl"] = + flattenComputeRegionBackendServiceCdnPolicyDefaultTtl(original["defaultTtl"], d, config) + transformed["max_ttl"] = + flattenComputeRegionBackendServiceCdnPolicyMaxTtl(original["maxTtl"], d, config) + transformed["client_ttl"] = + flattenComputeRegionBackendServiceCdnPolicyClientTtl(original["clientTtl"], d, config) + transformed["negative_caching"] = + flattenComputeRegionBackendServiceCdnPolicyNegativeCaching(original["negativeCaching"], d, config) + transformed["negative_caching_policy"] = + flattenComputeRegionBackendServiceCdnPolicyNegativeCachingPolicy(original["negativeCachingPolicy"], d, config) + transformed["cache_mode"] = + flattenComputeRegionBackendServiceCdnPolicyCacheMode(original["cacheMode"], d, config) + transformed["serve_while_stale"] = + flattenComputeRegionBackendServiceCdnPolicyServeWhileStale(original["serveWhileStale"], d, config) + return []interface{}{transformed} +} +func flattenComputeRegionBackendServiceCdnPolicyCacheKeyPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["include_host"] = + flattenComputeRegionBackendServiceCdnPolicyCacheKeyPolicyIncludeHost(original["includeHost"], d, config) + transformed["include_protocol"] = + flattenComputeRegionBackendServiceCdnPolicyCacheKeyPolicyIncludeProtocol(original["includeProtocol"], d, config) + transformed["include_query_string"] = + flattenComputeRegionBackendServiceCdnPolicyCacheKeyPolicyIncludeQueryString(original["includeQueryString"], d, config) + transformed["query_string_blacklist"] = + flattenComputeRegionBackendServiceCdnPolicyCacheKeyPolicyQueryStringBlacklist(original["queryStringBlacklist"], d, config) + transformed["query_string_whitelist"] = + flattenComputeRegionBackendServiceCdnPolicyCacheKeyPolicyQueryStringWhitelist(original["queryStringWhitelist"], d, config) + transformed["include_named_cookies"] = + flattenComputeRegionBackendServiceCdnPolicyCacheKeyPolicyIncludeNamedCookies(original["includeNamedCookies"], d, config) + return []interface{}{transformed} +} +func flattenComputeRegionBackendServiceCdnPolicyCacheKeyPolicyIncludeHost(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionBackendServiceCdnPolicyCacheKeyPolicyIncludeProtocol(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionBackendServiceCdnPolicyCacheKeyPolicyIncludeQueryString(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionBackendServiceCdnPolicyCacheKeyPolicyQueryStringBlacklist(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenComputeRegionBackendServiceCdnPolicyCacheKeyPolicyQueryStringWhitelist(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenComputeRegionBackendServiceCdnPolicyCacheKeyPolicyIncludeNamedCookies(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionBackendServiceCdnPolicySignedUrlCacheMaxAgeSec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeRegionBackendServiceCdnPolicyDefaultTtl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeRegionBackendServiceCdnPolicyMaxTtl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeRegionBackendServiceCdnPolicyClientTtl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeRegionBackendServiceCdnPolicyNegativeCaching(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionBackendServiceCdnPolicyNegativeCachingPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "code": flattenComputeRegionBackendServiceCdnPolicyNegativeCachingPolicyCode(original["code"], d, config), + "ttl": flattenComputeRegionBackendServiceCdnPolicyNegativeCachingPolicyTtl(original["ttl"], d, config), + }) + } + return transformed +} +func flattenComputeRegionBackendServiceCdnPolicyNegativeCachingPolicyCode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeRegionBackendServiceCdnPolicyNegativeCachingPolicyTtl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeRegionBackendServiceCdnPolicyCacheMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionBackendServiceCdnPolicyServeWhileStale(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeRegionBackendServiceConnectionDraining(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["connection_draining_timeout_sec"] = + flattenComputeRegionBackendServiceConnectionDrainingConnectionDrainingTimeoutSec(original["drainingTimeoutSec"], d, config) + return []interface{}{transformed} +} +func flattenComputeRegionBackendServiceConnectionDrainingConnectionDrainingTimeoutSec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeRegionBackendServiceCreationTimestamp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionBackendServiceDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionBackendServiceFailoverPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["disable_connection_drain_on_failover"] = + flattenComputeRegionBackendServiceFailoverPolicyDisableConnectionDrainOnFailover(original["disableConnectionDrainOnFailover"], d, config) + transformed["drop_traffic_if_unhealthy"] = + flattenComputeRegionBackendServiceFailoverPolicyDropTrafficIfUnhealthy(original["dropTrafficIfUnhealthy"], d, config) + transformed["failover_ratio"] = + flattenComputeRegionBackendServiceFailoverPolicyFailoverRatio(original["failoverRatio"], d, config) + return []interface{}{transformed} +} +func flattenComputeRegionBackendServiceFailoverPolicyDisableConnectionDrainOnFailover(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionBackendServiceFailoverPolicyDropTrafficIfUnhealthy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionBackendServiceFailoverPolicyFailoverRatio(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionBackendServiceEnableCDN(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionBackendServiceFingerprint(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionBackendServiceHealthChecks(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertAndMapStringArr(v.([]interface{}), tpgresource.ConvertSelfLinkToV1) +} + +func flattenComputeRegionBackendServiceIap(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["oauth2_client_id"] = + flattenComputeRegionBackendServiceIapOauth2ClientId(original["oauth2ClientId"], d, config) + transformed["oauth2_client_secret"] = + flattenComputeRegionBackendServiceIapOauth2ClientSecret(original["oauth2ClientSecret"], d, config) + transformed["oauth2_client_secret_sha256"] = + flattenComputeRegionBackendServiceIapOauth2ClientSecretSha256(original["oauth2ClientSecretSha256"], d, config) + return []interface{}{transformed} +} +func flattenComputeRegionBackendServiceIapOauth2ClientId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionBackendServiceIapOauth2ClientSecret(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return d.Get("iap.0.oauth2_client_secret") +} + +func flattenComputeRegionBackendServiceIapOauth2ClientSecretSha256(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionBackendServiceLoadBalancingScheme(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionBackendServiceLocalityLbPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionBackendServiceName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionBackendServiceOutlierDetection(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["base_ejection_time"] = + flattenComputeRegionBackendServiceOutlierDetectionBaseEjectionTime(original["baseEjectionTime"], d, config) + transformed["consecutive_errors"] = + flattenComputeRegionBackendServiceOutlierDetectionConsecutiveErrors(original["consecutiveErrors"], d, config) + transformed["consecutive_gateway_failure"] = + flattenComputeRegionBackendServiceOutlierDetectionConsecutiveGatewayFailure(original["consecutiveGatewayFailure"], d, config) + transformed["enforcing_consecutive_errors"] = + flattenComputeRegionBackendServiceOutlierDetectionEnforcingConsecutiveErrors(original["enforcingConsecutiveErrors"], d, config) + transformed["enforcing_consecutive_gateway_failure"] = + flattenComputeRegionBackendServiceOutlierDetectionEnforcingConsecutiveGatewayFailure(original["enforcingConsecutiveGatewayFailure"], d, config) + transformed["enforcing_success_rate"] = + flattenComputeRegionBackendServiceOutlierDetectionEnforcingSuccessRate(original["enforcingSuccessRate"], d, config) + transformed["interval"] = + flattenComputeRegionBackendServiceOutlierDetectionInterval(original["interval"], d, config) + transformed["max_ejection_percent"] = + flattenComputeRegionBackendServiceOutlierDetectionMaxEjectionPercent(original["maxEjectionPercent"], d, config) + transformed["success_rate_minimum_hosts"] = + flattenComputeRegionBackendServiceOutlierDetectionSuccessRateMinimumHosts(original["successRateMinimumHosts"], d, config) + transformed["success_rate_request_volume"] = + flattenComputeRegionBackendServiceOutlierDetectionSuccessRateRequestVolume(original["successRateRequestVolume"], d, config) + transformed["success_rate_stdev_factor"] = + flattenComputeRegionBackendServiceOutlierDetectionSuccessRateStdevFactor(original["successRateStdevFactor"], d, config) + return []interface{}{transformed} +} +func flattenComputeRegionBackendServiceOutlierDetectionBaseEjectionTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["seconds"] = + flattenComputeRegionBackendServiceOutlierDetectionBaseEjectionTimeSeconds(original["seconds"], d, config) + transformed["nanos"] = + flattenComputeRegionBackendServiceOutlierDetectionBaseEjectionTimeNanos(original["nanos"], d, config) + return []interface{}{transformed} +} +func flattenComputeRegionBackendServiceOutlierDetectionBaseEjectionTimeSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeRegionBackendServiceOutlierDetectionBaseEjectionTimeNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeRegionBackendServiceOutlierDetectionConsecutiveErrors(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeRegionBackendServiceOutlierDetectionConsecutiveGatewayFailure(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeRegionBackendServiceOutlierDetectionEnforcingConsecutiveErrors(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeRegionBackendServiceOutlierDetectionEnforcingConsecutiveGatewayFailure(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeRegionBackendServiceOutlierDetectionEnforcingSuccessRate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeRegionBackendServiceOutlierDetectionInterval(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["seconds"] = + flattenComputeRegionBackendServiceOutlierDetectionIntervalSeconds(original["seconds"], d, config) + transformed["nanos"] = + flattenComputeRegionBackendServiceOutlierDetectionIntervalNanos(original["nanos"], d, config) + return []interface{}{transformed} +} +func flattenComputeRegionBackendServiceOutlierDetectionIntervalSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeRegionBackendServiceOutlierDetectionIntervalNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeRegionBackendServiceOutlierDetectionMaxEjectionPercent(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeRegionBackendServiceOutlierDetectionSuccessRateMinimumHosts(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeRegionBackendServiceOutlierDetectionSuccessRateRequestVolume(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeRegionBackendServiceOutlierDetectionSuccessRateStdevFactor(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeRegionBackendServicePortName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionBackendServiceProtocol(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionBackendServiceSecurityPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionBackendServiceSessionAffinity(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionBackendServiceConnectionTrackingPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["idle_timeout_sec"] = + flattenComputeRegionBackendServiceConnectionTrackingPolicyIdleTimeoutSec(original["idleTimeoutSec"], d, config) + transformed["tracking_mode"] = + flattenComputeRegionBackendServiceConnectionTrackingPolicyTrackingMode(original["trackingMode"], d, config) + transformed["connection_persistence_on_unhealthy_backends"] = + flattenComputeRegionBackendServiceConnectionTrackingPolicyConnectionPersistenceOnUnhealthyBackends(original["connectionPersistenceOnUnhealthyBackends"], d, config) + transformed["enable_strong_affinity"] = + flattenComputeRegionBackendServiceConnectionTrackingPolicyEnableStrongAffinity(original["enableStrongAffinity"], d, config) + return []interface{}{transformed} +} +func flattenComputeRegionBackendServiceConnectionTrackingPolicyIdleTimeoutSec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeRegionBackendServiceConnectionTrackingPolicyTrackingMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionBackendServiceConnectionTrackingPolicyConnectionPersistenceOnUnhealthyBackends(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionBackendServiceConnectionTrackingPolicyEnableStrongAffinity(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionBackendServiceTimeoutSec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeRegionBackendServiceLogConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["enable"] = + flattenComputeRegionBackendServiceLogConfigEnable(original["enable"], d, config) + transformed["sample_rate"] = + flattenComputeRegionBackendServiceLogConfigSampleRate(original["sampleRate"], d, config) + return []interface{}{transformed} +} +func flattenComputeRegionBackendServiceLogConfigEnable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionBackendServiceLogConfigSampleRate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionBackendServiceNetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenComputeRegionBackendServiceSubsetting(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["policy"] = + flattenComputeRegionBackendServiceSubsettingPolicy(original["policy"], d, config) + return []interface{}{transformed} +} +func flattenComputeRegionBackendServiceSubsettingPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionBackendServiceRegion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} diff --git a/mmv1/third_party/cai2hcl/services/compute/compute_region_backend_service_test.go b/mmv1/third_party/cai2hcl/services/compute/compute_region_backend_service_test.go new file mode 100644 index 000000000000..ed086e2d7e13 --- /dev/null +++ b/mmv1/third_party/cai2hcl/services/compute/compute_region_backend_service_test.go @@ -0,0 +1,14 @@ +package compute_test + +import ( + "testing" + + cai2hcl_testing "github.com/GoogleCloudPlatform/terraform-google-conversion/v5/cai2hcl/testing" +) + +func TestComputeRegionBackendService(t *testing.T) { + cai2hcl_testing.AssertTestFiles( + t, + "./testdata", + []string{"compute_region_backend_service"}) +} diff --git a/mmv1/third_party/cai2hcl/services/compute/testdata/compute_region_backend_service.json b/mmv1/third_party/cai2hcl/services/compute/testdata/compute_region_backend_service.json new file mode 100644 index 000000000000..20435f078323 --- /dev/null +++ b/mmv1/third_party/cai2hcl/services/compute/testdata/compute_region_backend_service.json @@ -0,0 +1,101 @@ +[ + { + "name": "//compute.googleapis.com/projects/myproj/regions/us-central1/backendServices/bs-1", + "asset_type": "compute.googleapis.com/RegionBackendService", + "ancestry_path": "organizations/123/folders/456/project/myproj", + "resource": { + "version": "v1", + "discovery_document_uri": "https://www.googleapis.com/discovery/v1/apis/compute/v1/rest", + "discovery_name": "RegionBackendService", + "parent": "//cloudresourcemanager.googleapis.com/projects/myproj", + "data": { + "backends": [ + { + "balancingMode": "CONNECTION", + "failover": false, + "group": "projects/myproj/zones/us-central1-a/instanceGroups/ig-1" + } + ], + "connectionDraining": { + "drainingTimeoutSec": 30 + }, + "description": "bs-1 description", + "failoverPolicy": {}, + "healthChecks": [ + "projects/myproj/global/healthChecks/hc-1" + ], + "loadBalancingScheme": "INTERNAL", + "logConfig": { + "enable": true, + "optionalMode": "INCLUDE_ALL_OPTIONAL", + "sampleRate": 0.2 + }, + "name": "bs-1", + "network": "projects/myproj/global/networks/default", + "protocol": "TCP", + "region": "projects/myproj/regions/us-central1", + "sessionAffinity": "NONE" + } + } + }, + { + "name": "//compute.googleapis.com/projects/myproj/regions/us-central1/backendServices/bs-2", + "asset_type": "compute.googleapis.com/RegionBackendService", + "ancestry_path": "organizations/123/folders/456/project/myproj", + "resource": { + "version": "v1", + "discovery_document_uri": "https://www.googleapis.com/discovery/v1/apis/compute/v1/rest", + "discovery_name": "RegionBackendService", + "parent": "//cloudresourcemanager.googleapis.com/projects/myproj", + "data": { + "backends": [ + { + "balancingMode": "CONNECTION", + "capacityScaler": 0.1, + "group": "projects/myproj/zones/us-central1-c/networkEndpointGroups/neg-1", + "maxConnections": 2 + } + ], + "circuitBreakers": { + "maxRetries": 1 + }, + "connectionDraining": { + "drainingTimeoutSec": 300 + }, + "description": "", + "healthChecks": [ + "projects/myproj/regions/us-central1/healthChecks/hc-1" + ], + "loadBalancingScheme": "EXTERNAL_MANAGED", + "localityLbPolicy": "RING_HASH", + "logConfig": { + "enable": false + }, + "name": "bs-2", + "outlierDetection": { + "baseEjectionTime": { + "nanos": 0, + "seconds": "30" + }, + "consecutiveErrors": 5, + "consecutiveGatewayFailure": 3, + "enforcingConsecutiveErrors": 0, + "enforcingConsecutiveGatewayFailure": 100, + "enforcingSuccessRate": 100, + "interval": { + "nanos": 0, + "seconds": "1" + }, + "maxEjectionPercent": 50, + "successRateMinimumHosts": 5, + "successRateRequestVolume": 100, + "successRateStdevFactor": 1900 + }, + "protocol": "TCP", + "region": "projects/myproj/regions/us-central1", + "sessionAffinity": "CLIENT_IP", + "timeoutSec": 30 + } + } + } +] \ No newline at end of file diff --git a/mmv1/third_party/cai2hcl/services/compute/testdata/compute_region_backend_service.tf b/mmv1/third_party/cai2hcl/services/compute/testdata/compute_region_backend_service.tf new file mode 100644 index 000000000000..700d3a5ae3ff --- /dev/null +++ b/mmv1/third_party/cai2hcl/services/compute/testdata/compute_region_backend_service.tf @@ -0,0 +1,75 @@ +resource "google_compute_region_backend_service" "bs-1" { + backend { + balancing_mode = "CONNECTION" + failover = false + group = "projects/myproj/zones/us-central1-a/instanceGroups/ig-1" + } + + connection_draining_timeout_sec = 30 + description = "bs-1 description" + health_checks = ["projects/myproj/global/healthChecks/hc-1"] + load_balancing_scheme = "INTERNAL" + + log_config { + enable = true + sample_rate = 0.2 + } + + name = "bs-1" + network = "projects/myproj/global/networks/default" + protocol = "TCP" + region = "us-central1" + session_affinity = "NONE" +} + +resource "google_compute_region_backend_service" "bs-2" { + backend { + balancing_mode = "CONNECTION" + capacity_scaler = 0.1 + group = "projects/myproj/zones/us-central1-c/networkEndpointGroups/neg-1" + max_connections = 2 + } + + circuit_breakers { + max_retries = 1 + } + + connection_draining_timeout_sec = 300 + health_checks = ["projects/myproj/regions/us-central1/healthChecks/hc-1"] + load_balancing_scheme = "EXTERNAL_MANAGED" + locality_lb_policy = "RING_HASH" + + log_config { + enable = false + } + + name = "bs-2" + + outlier_detection { + base_ejection_time { + nanos = 0 + seconds = 30 + } + + consecutive_errors = 5 + consecutive_gateway_failure = 3 + enforcing_consecutive_errors = 0 + enforcing_consecutive_gateway_failure = 100 + enforcing_success_rate = 100 + + interval { + nanos = 0 + seconds = 1 + } + + max_ejection_percent = 50 + success_rate_minimum_hosts = 5 + success_rate_request_volume = 100 + success_rate_stdev_factor = 1900 + } + + protocol = "TCP" + region = "us-central1" + session_affinity = "CLIENT_IP" + timeout_sec = 30 +} diff --git a/mmv1/third_party/terraform/.teamcity/components/builds/build_configuration_per_package.kt b/mmv1/third_party/terraform/.teamcity/components/builds/build_configuration_per_package.kt index da9f65181fba..21d956030d68 100644 --- a/mmv1/third_party/terraform/.teamcity/components/builds/build_configuration_per_package.kt +++ b/mmv1/third_party/terraform/.teamcity/components/builds/build_configuration_per_package.kt @@ -1,3 +1,8 @@ +/* + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + // This file is controlled by MMv1, any changes made here will be overwritten package builds diff --git a/mmv1/third_party/terraform/.teamcity/components/builds/build_configuration_sweepers.kt b/mmv1/third_party/terraform/.teamcity/components/builds/build_configuration_sweepers.kt index 493d8c51d8f8..5d31f45db4bf 100644 --- a/mmv1/third_party/terraform/.teamcity/components/builds/build_configuration_sweepers.kt +++ b/mmv1/third_party/terraform/.teamcity/components/builds/build_configuration_sweepers.kt @@ -1,3 +1,8 @@ +/* + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + // This file is controlled by MMv1, any changes made here will be overwritten package builds diff --git a/mmv1/third_party/terraform/.teamcity/components/builds/build_configuration_vcr_recording.kt b/mmv1/third_party/terraform/.teamcity/components/builds/build_configuration_vcr_recording.kt index 347edb324cf5..d647e3294737 100644 --- a/mmv1/third_party/terraform/.teamcity/components/builds/build_configuration_vcr_recording.kt +++ b/mmv1/third_party/terraform/.teamcity/components/builds/build_configuration_vcr_recording.kt @@ -1,3 +1,8 @@ +/* + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + // This file is controlled by MMv1, any changes made here will be overwritten package builds diff --git a/mmv1/third_party/terraform/.teamcity/components/builds/build_features.kt b/mmv1/third_party/terraform/.teamcity/components/builds/build_features.kt index 0922eca6a28c..50734428b55f 100644 --- a/mmv1/third_party/terraform/.teamcity/components/builds/build_features.kt +++ b/mmv1/third_party/terraform/.teamcity/components/builds/build_features.kt @@ -1,3 +1,8 @@ +/* + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + // This file is controlled by MMv1, any changes made here will be overwritten package builds diff --git a/mmv1/third_party/terraform/.teamcity/components/builds/build_parameters.kt b/mmv1/third_party/terraform/.teamcity/components/builds/build_parameters.kt index dabe4fe5adcf..88928ed37a23 100644 --- a/mmv1/third_party/terraform/.teamcity/components/builds/build_parameters.kt +++ b/mmv1/third_party/terraform/.teamcity/components/builds/build_parameters.kt @@ -1,3 +1,8 @@ +/* + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + // This file is controlled by MMv1, any changes made here will be overwritten package builds diff --git a/mmv1/third_party/terraform/.teamcity/components/builds/build_steps.kt b/mmv1/third_party/terraform/.teamcity/components/builds/build_steps.kt index db01380265db..4faed1046b65 100644 --- a/mmv1/third_party/terraform/.teamcity/components/builds/build_steps.kt +++ b/mmv1/third_party/terraform/.teamcity/components/builds/build_steps.kt @@ -1,3 +1,8 @@ +/* + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + // This file is controlled by MMv1, any changes made here will be overwritten package builds diff --git a/mmv1/third_party/terraform/.teamcity/components/builds/build_triggers.kt b/mmv1/third_party/terraform/.teamcity/components/builds/build_triggers.kt index f56235a9f0a5..95127f85e439 100644 --- a/mmv1/third_party/terraform/.teamcity/components/builds/build_triggers.kt +++ b/mmv1/third_party/terraform/.teamcity/components/builds/build_triggers.kt @@ -1,3 +1,8 @@ +/* + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + // This file is controlled by MMv1, any changes made here will be overwritten package builds diff --git a/mmv1/third_party/terraform/.teamcity/components/builds/vcr_build_steps.kt b/mmv1/third_party/terraform/.teamcity/components/builds/vcr_build_steps.kt index c723f5087025..4453c4fd73a8 100644 --- a/mmv1/third_party/terraform/.teamcity/components/builds/vcr_build_steps.kt +++ b/mmv1/third_party/terraform/.teamcity/components/builds/vcr_build_steps.kt @@ -1,3 +1,8 @@ +/* + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + // This file is controlled by MMv1, any changes made here will be overwritten package builds diff --git a/mmv1/third_party/terraform/.teamcity/components/constants.kt b/mmv1/third_party/terraform/.teamcity/components/constants.kt index fbf3ded5d3f9..7ecf16baa052 100644 --- a/mmv1/third_party/terraform/.teamcity/components/constants.kt +++ b/mmv1/third_party/terraform/.teamcity/components/constants.kt @@ -1,3 +1,8 @@ +/* + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + // This file is controlled by MMv1, any changes made here will be overwritten // Provider name that matches the name in the Registry diff --git a/mmv1/third_party/terraform/.teamcity/components/inputs/packages.kt b/mmv1/third_party/terraform/.teamcity/components/inputs/packages.kt index 4ff6379fb1be..19b107e2e72d 100644 --- a/mmv1/third_party/terraform/.teamcity/components/inputs/packages.kt +++ b/mmv1/third_party/terraform/.teamcity/components/inputs/packages.kt @@ -1,3 +1,8 @@ +/* + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + // This file is controlled by MMv1, any changes made here will be overwritten package generated diff --git a/mmv1/third_party/terraform/.teamcity/components/inputs/per_service_parallelism.kt b/mmv1/third_party/terraform/.teamcity/components/inputs/per_service_parallelism.kt index 8d1c9e43dd84..a57270c35250 100644 --- a/mmv1/third_party/terraform/.teamcity/components/inputs/per_service_parallelism.kt +++ b/mmv1/third_party/terraform/.teamcity/components/inputs/per_service_parallelism.kt @@ -1,3 +1,8 @@ +/* + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + // This file is controlled by MMv1, any changes made here will be overwritten package generated diff --git a/mmv1/third_party/terraform/.teamcity/components/inputs/services_beta.kt b/mmv1/third_party/terraform/.teamcity/components/inputs/services_beta.kt index 663cebe9095b..27550dabce13 100644 --- a/mmv1/third_party/terraform/.teamcity/components/inputs/services_beta.kt +++ b/mmv1/third_party/terraform/.teamcity/components/inputs/services_beta.kt @@ -1,3 +1,8 @@ +/* + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + // This file is controlled by MMv1, any changes made here will be overwritten package generated diff --git a/mmv1/third_party/terraform/.teamcity/components/inputs/services_ga.kt b/mmv1/third_party/terraform/.teamcity/components/inputs/services_ga.kt index cb7b661c36b6..51f98ed08a05 100644 --- a/mmv1/third_party/terraform/.teamcity/components/inputs/services_ga.kt +++ b/mmv1/third_party/terraform/.teamcity/components/inputs/services_ga.kt @@ -1,3 +1,8 @@ +/* + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + // This file is controlled by MMv1, any changes made here will be overwritten package generated diff --git a/mmv1/third_party/terraform/.teamcity/components/projects/google_beta_subproject.kt b/mmv1/third_party/terraform/.teamcity/components/projects/google_beta_subproject.kt index cbf92feae3da..3824dcbfa97d 100644 --- a/mmv1/third_party/terraform/.teamcity/components/projects/google_beta_subproject.kt +++ b/mmv1/third_party/terraform/.teamcity/components/projects/google_beta_subproject.kt @@ -1,3 +1,8 @@ +/* + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + // This file is controlled by MMv1, any changes made here will be overwritten package projects diff --git a/mmv1/third_party/terraform/.teamcity/components/projects/google_ga_subproject.kt b/mmv1/third_party/terraform/.teamcity/components/projects/google_ga_subproject.kt index cc325e91b6b5..da23e7767c9e 100644 --- a/mmv1/third_party/terraform/.teamcity/components/projects/google_ga_subproject.kt +++ b/mmv1/third_party/terraform/.teamcity/components/projects/google_ga_subproject.kt @@ -1,3 +1,8 @@ +/* + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + // This file is controlled by MMv1, any changes made here will be overwritten package projects diff --git a/mmv1/third_party/terraform/.teamcity/components/projects/project_sweeper_project.kt b/mmv1/third_party/terraform/.teamcity/components/projects/project_sweeper_project.kt index 535e808b5923..f8da47ef5290 100644 --- a/mmv1/third_party/terraform/.teamcity/components/projects/project_sweeper_project.kt +++ b/mmv1/third_party/terraform/.teamcity/components/projects/project_sweeper_project.kt @@ -1,3 +1,8 @@ +/* + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + // This file is controlled by MMv1, any changes made here will be overwritten package projects diff --git a/mmv1/third_party/terraform/.teamcity/components/projects/reused/mm_upstream.kt b/mmv1/third_party/terraform/.teamcity/components/projects/reused/mm_upstream.kt index 1c11675a2d74..3b57d2fc12c4 100644 --- a/mmv1/third_party/terraform/.teamcity/components/projects/reused/mm_upstream.kt +++ b/mmv1/third_party/terraform/.teamcity/components/projects/reused/mm_upstream.kt @@ -1,3 +1,8 @@ +/* + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + // This file is controlled by MMv1, any changes made here will be overwritten package projects.reused diff --git a/mmv1/third_party/terraform/.teamcity/components/projects/reused/nightly_tests.kt b/mmv1/third_party/terraform/.teamcity/components/projects/reused/nightly_tests.kt index 971050ee9ad3..0c531f952e40 100644 --- a/mmv1/third_party/terraform/.teamcity/components/projects/reused/nightly_tests.kt +++ b/mmv1/third_party/terraform/.teamcity/components/projects/reused/nightly_tests.kt @@ -1,3 +1,8 @@ +/* + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + // This file is controlled by MMv1, any changes made here will be overwritten package projects.reused diff --git a/mmv1/third_party/terraform/.teamcity/components/projects/reused/vcr_recording.kt b/mmv1/third_party/terraform/.teamcity/components/projects/reused/vcr_recording.kt index 5fafdc14aa5e..c9916b27a722 100644 --- a/mmv1/third_party/terraform/.teamcity/components/projects/reused/vcr_recording.kt +++ b/mmv1/third_party/terraform/.teamcity/components/projects/reused/vcr_recording.kt @@ -1,3 +1,8 @@ +/* + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + // This file is controlled by MMv1, any changes made here will be overwritten package projects.reused diff --git a/mmv1/third_party/terraform/.teamcity/components/projects/root_project.kt b/mmv1/third_party/terraform/.teamcity/components/projects/root_project.kt index 2e26e7948ba9..c3ffd869756f 100644 --- a/mmv1/third_party/terraform/.teamcity/components/projects/root_project.kt +++ b/mmv1/third_party/terraform/.teamcity/components/projects/root_project.kt @@ -1,3 +1,8 @@ +/* + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + // This file is controlled by MMv1, any changes made here will be overwritten package projects diff --git a/mmv1/third_party/terraform/.teamcity/components/unique_id.kt b/mmv1/third_party/terraform/.teamcity/components/unique_id.kt index 3843780c1632..00635a8f783c 100644 --- a/mmv1/third_party/terraform/.teamcity/components/unique_id.kt +++ b/mmv1/third_party/terraform/.teamcity/components/unique_id.kt @@ -1,3 +1,8 @@ +/* + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + // This file is controlled by MMv1, any changes made here will be overwritten fun replaceCharsId(id: String): String{ diff --git a/mmv1/third_party/terraform/.teamcity/components/vcs_roots/vcs_roots.kt b/mmv1/third_party/terraform/.teamcity/components/vcs_roots/vcs_roots.kt index cffbe34cbd3b..3ee70f25f1ef 100644 --- a/mmv1/third_party/terraform/.teamcity/components/vcs_roots/vcs_roots.kt +++ b/mmv1/third_party/terraform/.teamcity/components/vcs_roots/vcs_roots.kt @@ -1,3 +1,8 @@ +/* + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + // This file is controlled by MMv1, any changes made here will be overwritten package vcs_roots diff --git a/mmv1/third_party/terraform/.teamcity/settings.kts b/mmv1/third_party/terraform/.teamcity/settings.kts index c0bbf1eccf85..7e748003d8f7 100644 --- a/mmv1/third_party/terraform/.teamcity/settings.kts +++ b/mmv1/third_party/terraform/.teamcity/settings.kts @@ -1,3 +1,8 @@ +/* + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + // This file is controlled by MMv1, any changes made here will be overwritten import projects.googleCloudRootProject diff --git a/mmv1/third_party/terraform/.teamcity/tests/build_configuration_features.kt b/mmv1/third_party/terraform/.teamcity/tests/build_configuration_features.kt index c2c5c290a5dd..ffde72f5ae79 100644 --- a/mmv1/third_party/terraform/.teamcity/tests/build_configuration_features.kt +++ b/mmv1/third_party/terraform/.teamcity/tests/build_configuration_features.kt @@ -1,3 +1,8 @@ +/* + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + // This file is controlled by MMv1, any changes made here will be overwritten package tests diff --git a/mmv1/third_party/terraform/.teamcity/tests/context_parameters.kt b/mmv1/third_party/terraform/.teamcity/tests/context_parameters.kt index 33b852fc8a00..c9f373786be8 100644 --- a/mmv1/third_party/terraform/.teamcity/tests/context_parameters.kt +++ b/mmv1/third_party/terraform/.teamcity/tests/context_parameters.kt @@ -1,3 +1,8 @@ +/* + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + // This file is controlled by MMv1, any changes made here will be overwritten package tests diff --git a/mmv1/third_party/terraform/.teamcity/tests/nightly_tests_project.kt b/mmv1/third_party/terraform/.teamcity/tests/nightly_tests_project.kt index c61533da3113..57764788a5ac 100644 --- a/mmv1/third_party/terraform/.teamcity/tests/nightly_tests_project.kt +++ b/mmv1/third_party/terraform/.teamcity/tests/nightly_tests_project.kt @@ -1,3 +1,8 @@ +/* + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + // This file is controlled by MMv1, any changes made here will be overwritten package tests diff --git a/mmv1/third_party/terraform/.teamcity/tests/sweepers.kt b/mmv1/third_party/terraform/.teamcity/tests/sweepers.kt index 18167227ab5b..27e55f737a1f 100644 --- a/mmv1/third_party/terraform/.teamcity/tests/sweepers.kt +++ b/mmv1/third_party/terraform/.teamcity/tests/sweepers.kt @@ -1,3 +1,8 @@ +/* + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + // This file is controlled by MMv1, any changes made here will be overwritten package tests diff --git a/mmv1/third_party/terraform/.teamcity/tests/test_utils.kt b/mmv1/third_party/terraform/.teamcity/tests/test_utils.kt index bdc116b1dcc3..747384496441 100644 --- a/mmv1/third_party/terraform/.teamcity/tests/test_utils.kt +++ b/mmv1/third_party/terraform/.teamcity/tests/test_utils.kt @@ -1,3 +1,8 @@ +/* + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + // This file is controlled by MMv1, any changes made here will be overwritten package tests diff --git a/mmv1/third_party/terraform/.teamcity/tests/vcs_roots.kt b/mmv1/third_party/terraform/.teamcity/tests/vcs_roots.kt index 957214fe90de..a46abc0a89af 100644 --- a/mmv1/third_party/terraform/.teamcity/tests/vcs_roots.kt +++ b/mmv1/third_party/terraform/.teamcity/tests/vcs_roots.kt @@ -1,3 +1,8 @@ +/* + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + // This file is controlled by MMv1, any changes made here will be overwritten package tests diff --git a/mmv1/third_party/terraform/services/alloydb/resource_alloydb_secondary_cluster_test.go b/mmv1/third_party/terraform/services/alloydb/resource_alloydb_secondary_cluster_test.go index 3a96a74f1e7c..4ea15246ddbd 100644 --- a/mmv1/third_party/terraform/services/alloydb/resource_alloydb_secondary_cluster_test.go +++ b/mmv1/third_party/terraform/services/alloydb/resource_alloydb_secondary_cluster_test.go @@ -30,7 +30,7 @@ func TestAccAlloydbCluster_secondaryClusterMandatoryFields(t *testing.T) { ResourceName: "google_alloydb_cluster.secondary", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "labels", "annotations", "terraform_labels"}, + ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "labels", "annotations", "terraform_labels", "reconciling"}, }, }, }) @@ -293,7 +293,7 @@ func TestAccAlloydbCluster_secondaryClusterUpdate(t *testing.T) { ResourceName: "google_alloydb_cluster.secondary", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "labels", "annotations", "terraform_labels"}, + ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "labels", "annotations", "terraform_labels", "reconciling"}, }, { Config: testAccAlloydbCluster_secondaryClusterUpdate(context), @@ -302,7 +302,7 @@ func TestAccAlloydbCluster_secondaryClusterUpdate(t *testing.T) { ResourceName: "google_alloydb_cluster.secondary", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "labels", "annotations", "terraform_labels"}, + ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "labels", "annotations", "terraform_labels", "reconciling"}, }, }, }) @@ -377,7 +377,7 @@ func TestAccAlloydbCluster_secondaryClusterAddAutomatedBackupPolicy(t *testing.T ResourceName: "google_alloydb_cluster.secondary", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "labels", "annotations", "terraform_labels"}, + ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "labels", "annotations", "terraform_labels", "reconciling"}, }, { // Invalid input check - can not add automated backup policy to a secondary cluster @@ -477,7 +477,7 @@ func TestAccAlloydbCluster_secondaryClusterUsingCMEK(t *testing.T) { ResourceName: "google_alloydb_cluster.secondary", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "labels", "annotations", "terraform_labels"}, + ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "labels", "annotations", "terraform_labels", "reconciling"}, }, }, }) @@ -570,7 +570,7 @@ func TestAccAlloydbCluster_secondaryClusterWithNetworkConfig(t *testing.T) { ResourceName: "google_alloydb_cluster.secondary", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "labels", "annotations", "terraform_labels"}, + ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "labels", "annotations", "terraform_labels", "reconciling"}, }, }, }) @@ -645,7 +645,7 @@ func TestAccAlloydbCluster_secondaryClusterWithNetworkConfigAndAllocatedIPRange( ResourceName: "google_alloydb_cluster.secondary", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "labels", "annotations", "terraform_labels"}, + ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "labels", "annotations", "terraform_labels", "reconciling"}, }, }, }) @@ -726,7 +726,7 @@ func TestAccAlloydbCluster_secondaryClusterPromote(t *testing.T) { ResourceName: "google_alloydb_cluster.secondary", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels"}, + ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels", "reconciling"}, }, { Config: testAccAlloydbCluster_secondaryClusterPromote(context), @@ -735,7 +735,7 @@ func TestAccAlloydbCluster_secondaryClusterPromote(t *testing.T) { ResourceName: "google_alloydb_cluster.secondary", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels"}, + ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels", "reconciling"}, }, }, }) @@ -873,7 +873,7 @@ func TestAccAlloydbCluster_secondaryClusterPromoteAndSimultaneousUpdate(t *testi ResourceName: "google_alloydb_cluster.secondary", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels"}, + ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels", "reconciling"}, }, { Config: testAccAlloydbCluster_secondaryClusterPromoteAndSimultaneousUpdate(context), @@ -882,7 +882,7 @@ func TestAccAlloydbCluster_secondaryClusterPromoteAndSimultaneousUpdate(t *testi ResourceName: "google_alloydb_cluster.secondary", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels"}, + ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels", "reconciling"}, }, }, }) @@ -965,7 +965,7 @@ func TestAccAlloydbCluster_secondaryClusterPromoteAndDeleteOriginalPrimary(t *te ResourceName: "google_alloydb_cluster.secondary", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels"}, + ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels", "reconciling"}, }, { Config: testAccAlloydbCluster_secondaryClusterPromote(context), @@ -974,7 +974,7 @@ func TestAccAlloydbCluster_secondaryClusterPromoteAndDeleteOriginalPrimary(t *te ResourceName: "google_alloydb_cluster.secondary", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels"}, + ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels", "reconciling"}, }, { Config: testAccAlloydbCluster_secondaryClusterPromoteAndDeleteOriginalPrimary(context), @@ -983,7 +983,7 @@ func TestAccAlloydbCluster_secondaryClusterPromoteAndDeleteOriginalPrimary(t *te ResourceName: "google_alloydb_cluster.secondary", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels"}, + ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels", "reconciling"}, }, }, }) @@ -1046,7 +1046,7 @@ func TestAccAlloydbCluster_secondaryClusterPromoteAndUpdate(t *testing.T) { ResourceName: "google_alloydb_cluster.secondary", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels"}, + ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels", "reconciling"}, }, { Config: testAccAlloydbCluster_secondaryClusterPromote(context), @@ -1055,7 +1055,7 @@ func TestAccAlloydbCluster_secondaryClusterPromoteAndUpdate(t *testing.T) { ResourceName: "google_alloydb_cluster.secondary", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels"}, + ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels", "reconciling"}, }, { Config: testAccAlloydbCluster_secondaryClusterPromoteAndUpdate(context), @@ -1064,7 +1064,7 @@ func TestAccAlloydbCluster_secondaryClusterPromoteAndUpdate(t *testing.T) { ResourceName: "google_alloydb_cluster.secondary", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels"}, + ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels", "reconciling"}, }, }, }) @@ -1316,7 +1316,7 @@ func TestAccAlloydbCluster_secondaryClusterPromoteAndAddAndDeleteAutomatedBackup ResourceName: "google_alloydb_cluster.secondary", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels"}, + ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels", "reconciling"}, }, { Config: testAccAlloydbCluster_secondaryClusterPromote(context), @@ -1325,7 +1325,7 @@ func TestAccAlloydbCluster_secondaryClusterPromoteAndAddAndDeleteAutomatedBackup ResourceName: "google_alloydb_cluster.secondary", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels"}, + ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels", "reconciling"}, }, { Config: testAccAlloydbCluster_secondaryClusterPromoteAndAddAutomatedBackupPolicyAndInitialUser(context), @@ -1334,7 +1334,7 @@ func TestAccAlloydbCluster_secondaryClusterPromoteAndAddAndDeleteAutomatedBackup ResourceName: "google_alloydb_cluster.secondary", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels"}, + ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels", "reconciling"}, }, { Config: testAccAlloydbCluster_secondaryClusterPromote(context), @@ -1343,7 +1343,7 @@ func TestAccAlloydbCluster_secondaryClusterPromoteAndAddAndDeleteAutomatedBackup ResourceName: "google_alloydb_cluster.secondary", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels"}, + ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels", "reconciling"}, }, }, }) @@ -1452,7 +1452,7 @@ func TestAccAlloydbCluster_secondaryClusterPromoteAndDeleteTimeBasedRetentionPol ResourceName: "google_alloydb_cluster.secondary", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels"}, + ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels", "reconciling"}, }, { Config: testAccAlloydbCluster_secondaryClusterPromote(context), @@ -1461,7 +1461,7 @@ func TestAccAlloydbCluster_secondaryClusterPromoteAndDeleteTimeBasedRetentionPol ResourceName: "google_alloydb_cluster.secondary", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels"}, + ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels", "reconciling"}, }, { Config: testAccAlloydbCluster_secondaryClusterPromoteWithTimeBasedRetentionPolicy(context), @@ -1470,7 +1470,7 @@ func TestAccAlloydbCluster_secondaryClusterPromoteAndDeleteTimeBasedRetentionPol ResourceName: "google_alloydb_cluster.secondary", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels"}, + ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels", "reconciling"}, }, { Config: testAccAlloydbCluster_secondaryClusterPromoteWithoutTimeBasedRetentionPolicy(context), @@ -1479,7 +1479,7 @@ func TestAccAlloydbCluster_secondaryClusterPromoteAndDeleteTimeBasedRetentionPol ResourceName: "google_alloydb_cluster.secondary", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels"}, + ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels", "reconciling"}, }, }, }) @@ -1666,7 +1666,7 @@ func TestAccAlloydbCluster_secondaryClusterPromoteAndAddContinuousBackupConfig(t ResourceName: "google_alloydb_cluster.secondary", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels"}, + ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels", "reconciling"}, }, { Config: testAccAlloydbCluster_secondaryClusterPromote(context), @@ -1675,7 +1675,7 @@ func TestAccAlloydbCluster_secondaryClusterPromoteAndAddContinuousBackupConfig(t ResourceName: "google_alloydb_cluster.secondary", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels"}, + ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels", "reconciling"}, }, { Config: testAccAlloydbCluster_secondaryClusterPromoteAndAddContinuousBackupConfig(context), @@ -1684,7 +1684,7 @@ func TestAccAlloydbCluster_secondaryClusterPromoteAndAddContinuousBackupConfig(t ResourceName: "google_alloydb_cluster.secondary", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels"}, + ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels", "reconciling"}, }, }, }) diff --git a/mmv1/third_party/terraform/services/bigquery/iam_bigquery_dataset.go b/mmv1/third_party/terraform/services/bigquery/iam_bigquery_dataset.go index f6966ac4d2d2..740da92602ad 100644 --- a/mmv1/third_party/terraform/services/bigquery/iam_bigquery_dataset.go +++ b/mmv1/third_party/terraform/services/bigquery/iam_bigquery_dataset.go @@ -214,7 +214,6 @@ func iamMemberToAccess(member string) (string, string, error) { if strings.HasPrefix(member, "deleted:") { return "", "", fmt.Errorf("BigQuery Dataset IAM member is deleted: %s", member) } - pieces := strings.SplitN(member, ":", 2) if len(pieces) > 1 { switch pieces[0] { @@ -222,19 +221,19 @@ func iamMemberToAccess(member string) (string, string, error) { return "groupByEmail", pieces[1], nil case "domain": return "domain", pieces[1], nil + case "iamMember": + return "iamMember", pieces[1], nil case "user": return "userByEmail", pieces[1], nil case "serviceAccount": return "userByEmail", pieces[1], nil - default: - return "", "", fmt.Errorf("Failed to parse BigQuery Dataset IAM member type: %s", member) } } if member == "projectOwners" || member == "projectReaders" || member == "projectWriters" || member == "allAuthenticatedUsers" { // These are special BigQuery Dataset permissions return "specialGroup", member, nil } - return "iamMember", member, nil + return "", "", fmt.Errorf("Failed to parse BigQuery Dataset IAM member type: %s", member) } func accessToIamMember(access map[string]interface{}) (string, error) { @@ -249,7 +248,7 @@ func accessToIamMember(access map[string]interface{}) (string, error) { return member.(string), nil } if member, ok := access["iamMember"]; ok { - return member.(string), nil + return fmt.Sprintf("iamMember:%s", member.(string)), nil } if _, ok := access["view"]; ok { // view does not map to an IAM member, use access instead diff --git a/mmv1/third_party/terraform/services/bigquery/resource_bigquery_dataset_iam_member_test.go b/mmv1/third_party/terraform/services/bigquery/resource_bigquery_dataset_iam_member_test.go index d138457f702d..0ab43e371d9d 100644 --- a/mmv1/third_party/terraform/services/bigquery/resource_bigquery_dataset_iam_member_test.go +++ b/mmv1/third_party/terraform/services/bigquery/resource_bigquery_dataset_iam_member_test.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-provider-google/google/envvar" ) -func TestAccBigqueryDatasetIamMember_basic(t *testing.T) { +func TestAccBigqueryDatasetIamMember_serviceAccount(t *testing.T) { t.Parallel() datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) @@ -25,27 +25,55 @@ func TestAccBigqueryDatasetIamMember_basic(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), Steps: []resource.TestStep{ { - Config: testAccBigqueryDatasetIamMember_basic(datasetID, saID), + Config: testAccBigqueryDatasetIamMember_serviceAccount(datasetID, saID), Check: testAccCheckBigQueryDatasetAccessPresent(t, "google_bigquery_dataset.dataset", expected), }, { // Destroy step instead of CheckDestroy so we can check the access is removed without deleting the dataset - Config: testAccBigqueryDatasetIamMember_destroy(datasetID, "dataset"), + Config: testAccBigqueryDatasetIamMember_destroy(datasetID), Check: testAccCheckBigQueryDatasetAccessAbsent(t, "google_bigquery_dataset.dataset", expected), }, }, }) } -func testAccBigqueryDatasetIamMember_destroy(datasetID, rs string) string { +func TestAccBigqueryDatasetIamMember_iamMember(t *testing.T) { + t.Parallel() + + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + wifIDs := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + expected := map[string]interface{}{ + "role": "roles/viewer", + "iamMember": fmt.Sprintf("principal://iam.googleapis.com/projects/%s/locations/global/workloadIdentityPools/%s/subject/test", envvar.GetTestProjectNumberFromEnv(), wifIDs), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccBigqueryDatasetIamMember_iamMember(datasetID, wifIDs), + Check: testAccCheckBigQueryDatasetAccessPresent(t, "google_bigquery_dataset.dataset", expected), + }, + { + // Destroy step instead of CheckDestroy so we can check the access is removed without deleting the dataset + Config: testAccBigqueryDatasetIamMember_destroy(datasetID), + Check: testAccCheckBigQueryDatasetAccessAbsent(t, "google_bigquery_dataset.dataset", expected), + }, + }, + }) +} + +func testAccBigqueryDatasetIamMember_destroy(datasetID string) string { return fmt.Sprintf(` -resource "google_bigquery_dataset" "%s" { +resource "google_bigquery_dataset" "dataset" { dataset_id = "%s" } -`, rs, datasetID) +`, datasetID) } -func testAccBigqueryDatasetIamMember_basic(datasetID, saID string) string { +func testAccBigqueryDatasetIamMember_serviceAccount(datasetID, saID string) string { return fmt.Sprintf(` resource "google_bigquery_dataset_iam_member" "access" { dataset_id = google_bigquery_dataset.dataset.dataset_id @@ -62,3 +90,32 @@ resource "google_service_account" "bqviewer" { } `, datasetID, saID) } + +func testAccBigqueryDatasetIamMember_iamMember(datasetID, wifIDs string) string { + return fmt.Sprintf(` +resource "google_bigquery_dataset_iam_member" "access" { + dataset_id = google_bigquery_dataset.dataset.dataset_id + role = "roles/viewer" + member = "iamMember:principal://iam.googleapis.com/${google_iam_workload_identity_pool.wif_pool.name}/subject/test" +} + +resource "google_bigquery_dataset" "dataset" { + dataset_id = "%s" +} + +resource "google_iam_workload_identity_pool" "wif_pool" { + workload_identity_pool_id = "%s" +} + +resource "google_iam_workload_identity_pool_provider" "wif_provider" { + workload_identity_pool_id = google_iam_workload_identity_pool.wif_pool.workload_identity_pool_id + workload_identity_pool_provider_id = "%s" + attribute_mapping = { + "google.subject" = "assertion.sub" + } + oidc { + issuer_uri = "https://issuer-uri.com" + } +} +`, datasetID, wifIDs, wifIDs) +} diff --git a/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_custom_target_type_test.go b/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_custom_target_type_test.go new file mode 100644 index 000000000000..bbef81a81b4e --- /dev/null +++ b/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_custom_target_type_test.go @@ -0,0 +1,86 @@ +package clouddeploy_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccClouddeployCustomTargetType_update(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckClouddeployCustomTargetTypeDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccClouddeployCustomTargetType_basic(context), + }, + { + ResourceName: "google_clouddeploy_custom_target_type.custom-target-type", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "location", "annotations", "labels", "terraform_labels"}, + }, + { + Config: testAccClouddeployCustomTargetType_update(context), + }, + { + ResourceName: "google_clouddeploy_custom_target_type.custom-target-type", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "location", "annotations", "labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccClouddeployCustomTargetType_basic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_clouddeploy_custom_target_type" "custom-target-type" { + location = "us-central1" + name = "tf-test-my-custom-target-type%{random_suffix}" + description = "My custom target type" + custom_actions { + render_action = "renderAction" + deploy_action = "deployAction" + } +} +`, context) +} + +func testAccClouddeployCustomTargetType_update(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_clouddeploy_custom_target_type" "custom-target-type" { + location = "us-central1" + name = "tf-test-my-custom-target-type%{random_suffix}" + description = "My custom target type" + custom_actions { + render_action = "renderAction" + deploy_action = "deployAction" + include_skaffold_modules { + configs = ["my-config"] + google_cloud_storage { + source = "gs://example-bucket/dir/configs/*" + path = "skaffold.yaml" + } + } + include_skaffold_modules { + configs = ["my-config2"] + git { + repo = "http://github.com/example/example-repo.git" + path = "configs/skaffold.yaml" + ref = "main" + } + } + } +} +`, context) +} diff --git a/mmv1/third_party/terraform/services/cloudfunctions/resource_cloudfunctions_function.go b/mmv1/third_party/terraform/services/cloudfunctions/resource_cloudfunctions_function.go index ac86bac73dbf..b6d3d77906e2 100644 --- a/mmv1/third_party/terraform/services/cloudfunctions/resource_cloudfunctions_function.go +++ b/mmv1/third_party/terraform/services/cloudfunctions/resource_cloudfunctions_function.go @@ -490,6 +490,11 @@ func ResourceCloudFunctionsFunction() *schema.Resource { Computed: true, Description: `Describes the current stage of a deployment.`, }, + "version_id": { + Type: schema.TypeString, + Computed: true, + Description: `The version identifier of the Cloud Function. Each deployment attempt results in a new version of a function being created.`, + }, }, UseJSONNumber: true, } @@ -786,6 +791,9 @@ func resourceCloudFunctionsRead(d *schema.ResourceData, meta interface{}) error if err := d.Set("project", cloudFuncId.Project); err != nil { return fmt.Errorf("Error setting project: %s", err) } + if err := d.Set("version_id", strconv.FormatInt(function.VersionId, 10)); err != nil { + return fmt.Errorf("Error setting version_id: %s", err) + } return nil } diff --git a/mmv1/third_party/terraform/services/cloudfunctions/resource_cloudfunctions_function_test.go.erb b/mmv1/third_party/terraform/services/cloudfunctions/resource_cloudfunctions_function_test.go.erb index 0c2be3a3a1a3..61a267c0fcd8 100644 --- a/mmv1/third_party/terraform/services/cloudfunctions/resource_cloudfunctions_function_test.go.erb +++ b/mmv1/third_party/terraform/services/cloudfunctions/resource_cloudfunctions_function_test.go.erb @@ -71,6 +71,8 @@ func TestAccCloudFunctionsFunction_basic(t *testing.T) { "entry_point", "helloGET"), resource.TestCheckResourceAttr(funcResourceName, "trigger_http", "true"), + resource.TestCheckResourceAttr(funcResourceName, + "version_id", "1"), testAccCloudFunctionsFunctionHasLabel("my-label", "my-label-value", &function), testAccCloudFunctionsFunctionHasEnvironmentVariable("TEST_ENV_VARIABLE", "test-env-variable-value", &function), @@ -110,6 +112,8 @@ func TestAccCloudFunctionsFunction_update(t *testing.T) { t, funcResourceName, &function), resource.TestCheckResourceAttr(funcResourceName, "available_memory_mb", "128"), + resource.TestCheckResourceAttr(funcResourceName, + "version_id", "1"), testAccCloudFunctionsFunctionHasLabel("my-label", "my-label-value", &function), ), }, @@ -138,6 +142,8 @@ func TestAccCloudFunctionsFunction_update(t *testing.T) { "min_instances", "5"), resource.TestCheckResourceAttr(funcResourceName, "ingress_settings", "ALLOW_ALL"), + resource.TestCheckResourceAttr(funcResourceName, + "version_id", "2"), testAccCloudFunctionsFunctionHasLabel("my-label", "my-updated-label-value", &function), testAccCloudFunctionsFunctionHasLabel("a-new-label", "a-new-label-value", &function), testAccCloudFunctionsFunctionHasEnvironmentVariable("TEST_ENV_VARIABLE", diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb index 986d82e2dc5a..1a6ad62d42cf 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb @@ -251,8 +251,7 @@ func ResourceComputeInstance() *schema.Resource { AtLeastOneOf: initializeParamsKeys, Computed: true, ForceNew: true, - ValidateFunc: validation.IntBetween(10000, 120000), - Description: `Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. Values must be between 10,000 and 120,000.`, + Description: `Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle.`, }, "provisioned_throughput": { @@ -261,8 +260,7 @@ func ResourceComputeInstance() *schema.Resource { AtLeastOneOf: initializeParamsKeys, Computed: true, ForceNew: true, - ValidateFunc: validation.IntBetween(1, 7124), - Description: `Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be between 1 and 7,124.`, + Description: `Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle.`, }, "enable_confidential_compute": { @@ -446,7 +444,7 @@ func ResourceComputeInstance() *schema.Resource { "ipv6_access_config": { Type: schema.TypeList, - Optional: true, + Optional: true, Description: `An array of IPv6 access configurations for this interface. Currently, only one IPv6 access config, DIRECT_IPV6, is supported. If there is no ipv6AccessConfig specified, then this instance will have no external IPv6 Internet access.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -2069,6 +2067,23 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err } } + if !updateDuringStop && d.HasChange(prefix+".stack_type") { + + networkInterfacePatchObj := &compute.NetworkInterface{ + StackType: d.Get(prefix+".stack_type").(string), + Fingerprint: instNetworkInterface.Fingerprint, + } + updateCall := config.NewComputeClient(userAgent).Instances.UpdateNetworkInterface(project, zone, instance.Name, networkName, networkInterfacePatchObj).Do + op, err := updateCall() + if err != nil { + return errwrap.Wrapf("Error updating network interface: {{err}}", err) + } + opErr := ComputeOperationWaitTime(config, op, project, "network interface to update", userAgent, d.Timeout(schema.TimeoutUpdate)) + if opErr != nil { + return opErr + } + } + if !updateDuringStop && d.HasChange(prefix+".ipv6_address") { networkInterfacePatchObj := &compute.NetworkInterface{ diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance_group_manager.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance_group_manager.go.erb index 3dc61dfc1d17..d61f2457be99 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance_group_manager.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance_group_manager.go.erb @@ -904,17 +904,10 @@ func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta inte } <% end -%> - <% unless version == "ga" -%> if d.HasChange("stateful_internal_ip") || d.HasChange("stateful_external_ip") || d.HasChange("stateful_disk") { updatedManager.StatefulPolicy = expandStatefulPolicy(d) change = true } - <% else -%> - if d.HasChange("stateful_disk") { - updatedManager.StatefulPolicy = expandStatefulPolicy(d) - change = true - } - <% end -%> if d.HasChange("list_managed_instances_results") { updatedManager.ListManagedInstancesResults = d.Get("list_managed_instances_results").(string) diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance_template.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance_template.go.erb index fda847e7e891..56aec134f0ae 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance_template.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance_template.go.erb @@ -628,7 +628,7 @@ Google Cloud KMS.`, "resource_manager_tags": { Type: schema.TypeMap, Optional: true, - ForceNew: false, + ForceNew: true, Elem: &schema.Schema{Type: schema.TypeString}, Set: schema.HashString, Description: `A map of resource manager tags. diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb index c86796109098..bbed47e387b2 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb @@ -3279,6 +3279,33 @@ func TestAccComputeInstance_NetworkAttachmentUpdate(t *testing.T) { } <% end %> +func TestAccComputeInstance_NicStackTypeUpdate(t *testing.T) { + t.Parallel() + suffix := acctest.RandString(t, 10) + envRegion := envvar.GetTestRegionFromEnv() + instanceName := fmt.Sprintf("tf-test-compute-instance-%s", suffix) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_nicStackTypeUpdate(suffix, envRegion, "IPV4_ONLY", instanceName), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{"allow_stopping_for_update"}), + { + Config: testAccComputeInstance_nicStackTypeUpdate(suffix, envRegion, "IPV4_IPV6", instanceName), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{"allow_stopping_for_update"}), + { + Config: testAccComputeInstance_nicStackTypeUpdate(suffix, envRegion, "IPV4_ONLY", instanceName), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{"allow_stopping_for_update"}), + }, + }) +} + func testAccCheckComputeInstanceDestroyProducer(t *testing.T) func(s *terraform.State) error { return func(s *terraform.State) error { config := acctest.GoogleProviderConfig(t) @@ -8964,3 +8991,50 @@ resource "google_compute_instance" "foobar" { } <% end %> +func testAccComputeInstance_nicStackTypeUpdate(suffix, region, stack_type, instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_network" "net" { + name = "tf-test-network-%s" + enable_ula_internal_ipv6 = true + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "subnet-ipv6" { + region = "%s" + name = "tf-test-subnet-ip6-%s" + ip_cidr_range = "10.0.0.0/22" + purpose = "PRIVATE" + stack_type = "IPV4_IPV6" + ipv6_access_type = "INTERNAL" + network = google_compute_network.net.id +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "%s-a" + tags = ["foo", "bar"] + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = google_compute_network.net.self_link + subnetwork = google_compute_subnetwork.subnet-ipv6.self_link + stack_type = "%s" + } + + metadata = { + foo = "bar" + } +} +`, suffix, region, suffix, instance, region, stack_type) +} diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_group_manager.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_group_manager.go.erb index 871dd0314275..8530b1e3e6b5 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_group_manager.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_group_manager.go.erb @@ -847,17 +847,10 @@ func resourceComputeRegionInstanceGroupManagerUpdate(d *schema.ResourceData, met change = true } - <% unless version == "ga" -%> if d.HasChange("stateful_internal_ip") || d.HasChange("stateful_external_ip") || d.HasChange("stateful_disk") { updatedManager.StatefulPolicy = expandStatefulPolicy(d) change = true } - <% else -%> - if d.HasChange("stateful_disk") { - updatedManager.StatefulPolicy = expandStatefulPolicy(d) - change = true - } - <% end -%> <% unless version == "ga" -%> if d.HasChange("all_instances_config") { diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_group_manager_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_group_manager_test.go.erb index 85416a6ebdcb..a5470d09b032 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_group_manager_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_group_manager_test.go.erb @@ -323,8 +323,6 @@ func TestAccRegionInstanceGroupManager_distributionPolicy(t *testing.T) { } func TestAccRegionInstanceGroupManager_stateful(t *testing.T) { - // TODO: Flaky test due to ordering of IPs https://github.com/hashicorp/terraform-provider-google/issues/13430 - t.Skip() t.Parallel() template := fmt.Sprintf("tf-test-rigm-%s", acctest.RandString(t, 10)) diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_template.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_template.go.erb index cd6a01f4b7bf..02d0298ffd57 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_template.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_template.go.erb @@ -590,7 +590,7 @@ Google Cloud KMS.`, "resource_manager_tags": { Type: schema.TypeMap, Optional: true, - ForceNew: false, + ForceNew: true, Elem: &schema.Schema{Type: schema.TypeString}, Set: schema.HashString, Description: `A map of resource manager tags. diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_security_policy.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_security_policy.go.erb index 59db357b97a6..2222dd74ab0d 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_security_policy.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_security_policy.go.erb @@ -5,9 +5,7 @@ import ( "context" "fmt" "log" -<% unless version == 'ga' -%> "strings" -<% end -%> "time" @@ -466,14 +464,12 @@ func ResourceComputeSecurityPolicy() *schema.Resource { ValidateFunc: validation.StringInSlice([]string{"NORMAL", "VERBOSE"}, false), Description: `Logging level. Supported values include: "NORMAL", "VERBOSE".`, }, - <% unless version == 'ga' -%> "user_ip_request_headers": { Type: schema.TypeSet, Optional: true, Description: `An optional list of case-insensitive request header names to use for resolving the callers client IP address.`, Elem: &schema.Schema{Type: schema.TypeString}, }, - <% end -%> }, }, }, @@ -742,9 +738,7 @@ func resourceComputeSecurityPolicyUpdate(d *schema.ResourceData, meta interface{ Fingerprint: d.Get("fingerprint").(string), } - <% unless version == 'ga' -%> updateMask := []string{} - <% end -%> if d.HasChange("type") { securityPolicy.Type = d.Get("type").(string) @@ -759,13 +753,11 @@ func resourceComputeSecurityPolicyUpdate(d *schema.ResourceData, meta interface{ if d.HasChange("advanced_options_config") { securityPolicy.AdvancedOptionsConfig = expandSecurityPolicyAdvancedOptionsConfig(d.Get("advanced_options_config").([]interface{})) securityPolicy.ForceSendFields = append(securityPolicy.ForceSendFields, "AdvancedOptionsConfig", "advancedOptionsConfig.jsonParsing", "advancedOptionsConfig.jsonCustomConfig", "advancedOptionsConfig.logLevel") - <% unless version == 'ga' -%> securityPolicy.ForceSendFields = append(securityPolicy.ForceSendFields, "advanceOptionConfig.userIpRequestHeaders") if len(securityPolicy.AdvancedOptionsConfig.UserIpRequestHeaders) == 0 { // to clean this list we must send the updateMask of this field on the request. updateMask = append(updateMask, "advanced_options_config.user_ip_request_headers") } - <% end -%> } if d.HasChange("adaptive_protection_config") { @@ -784,11 +776,7 @@ func resourceComputeSecurityPolicyUpdate(d *schema.ResourceData, meta interface{ if len(securityPolicy.ForceSendFields) > 0 { client := config.NewComputeClient(userAgent) - <% if version == 'ga' -%> - op, err := client.SecurityPolicies.Patch(project, sp, securityPolicy).Do() - <% else -%> op, err := client.SecurityPolicies.Patch(project, sp, securityPolicy).UpdateMask(strings.Join(updateMask, ",")).Do() - <% end -%> if err != nil { return errwrap.Wrapf(fmt.Sprintf("Error updating SecurityPolicy %q: {{err}}", sp), err) @@ -1230,9 +1218,7 @@ func expandSecurityPolicyAdvancedOptionsConfig(configured []interface{}) *comput JsonParsing: data["json_parsing"].(string), JsonCustomConfig: expandSecurityPolicyAdvancedOptionsConfigJsonCustomConfig(data["json_custom_config"].([]interface{})), LogLevel: data["log_level"].(string), - <% unless version == 'ga' -%> UserIpRequestHeaders: tpgresource.ConvertStringArr(data["user_ip_request_headers"].(*schema.Set).List()), - <% end %> } } @@ -1245,9 +1231,7 @@ func flattenSecurityPolicyAdvancedOptionsConfig(conf *compute.SecurityPolicyAdva "json_parsing": conf.JsonParsing, "json_custom_config": flattenSecurityPolicyAdvancedOptionsConfigJsonCustomConfig(conf.JsonCustomConfig), "log_level": conf.LogLevel, - <% unless version == 'ga' -%> "user_ip_request_headers": schema.NewSet(schema.HashString, tpgresource.ConvertStringArrToInterface(conf.UserIpRequestHeaders)), - <% end -%> } return []map[string]interface{}{data} diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_security_policy_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_security_policy_test.go.erb index b3dcaac75f47..310985e5536e 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_security_policy_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_security_policy_test.go.erb @@ -189,7 +189,6 @@ func TestAccComputeSecurityPolicy_withAdvancedOptionsConfig(t *testing.T) { ImportState: true, ImportStateVerify: true, }, - <% unless version == 'ga' -%> { Config: testAccComputeSecurityPolicy_withAdvancedOptionsConfig_update(spName), }, @@ -216,7 +215,6 @@ func TestAccComputeSecurityPolicy_withAdvancedOptionsConfig(t *testing.T) { ImportState: true, ImportStateVerify: true, }, - <% end -%> { Config: testAccComputeSecurityPolicy_basic(spName), }, @@ -1108,19 +1106,15 @@ resource "google_compute_security_policy" "policy" { ] } log_level = "VERBOSE" - <% unless version == 'ga' -%> user_ip_request_headers = [ "True-Client-IP", "x-custom-ip" ] - <% end -%> - } } `, spName) } -<% unless version == 'ga' -%> func testAccComputeSecurityPolicy_withAdvancedOptionsConfig_update(spName string) string { return fmt.Sprintf(` resource "google_compute_security_policy" "policy" { @@ -1189,7 +1183,6 @@ resource "google_compute_security_policy" "policy" { } `, spName) } -<% end -%> func testAccComputeSecurityPolicy_withAdaptiveProtection(spName string) string { return fmt.Sprintf(` diff --git a/mmv1/third_party/terraform/services/container/resource_container_cluster.go.erb b/mmv1/third_party/terraform/services/container/resource_container_cluster.go.erb index dff096a832ce..00a3f3aaab84 100644 --- a/mmv1/third_party/terraform/services/container/resource_container_cluster.go.erb +++ b/mmv1/third_party/terraform/services/container/resource_container_cluster.go.erb @@ -1198,7 +1198,7 @@ func ResourceContainerCluster() *schema.Resource { Type: schema.TypeList, Optional: true, Computed: true, - MaxItems: 2, + MaxItems: 1, Description: `Configuration of Advanced Datapath Observability features.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -1207,12 +1207,21 @@ func ResourceContainerCluster() *schema.Resource { Required: true, Description: `Whether or not the advanced datapath metrics are enabled.`, }, + "enable_relay": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether or not Relay is enabled.`, + Default: false, + ConflictsWith: []string{"monitoring_config.0.advanced_datapath_observability_config.0.relay_mode"}, + }, "relay_mode": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: `Mode used to make Relay available.`, - ValidateFunc: validation.StringInSlice([]string{"DISABLED", "INTERNAL_VPC_LB", "EXTERNAL_LB"}, false), + Type: schema.TypeString, + Optional: true, + Computed: true, + Deprecated: "Deprecated in favor of enable_relay field. Remove this attribute's configuration as this field will be removed in the next major release and enable_relay will become a required field.", + Description: `Mode used to make Relay available.`, + ValidateFunc: validation.StringInSlice([]string{"DISABLED", "INTERNAL_VPC_LB", "EXTERNAL_LB"}, false), + ConflictsWith: []string{"monitoring_config.0.advanced_datapath_observability_config.0.enable_relay"}, }, }, }, @@ -2061,16 +2070,26 @@ func ResourceContainerCluster() *schema.Resource { Optional: true, Description: `The Fleet host project of the cluster.`, }, - "membership": { - Type: schema.TypeString, - Computed: true, - Description: `Full resource name of the registered fleet membership of the cluster.`, - }, - "pre_registered": { - Type: schema.TypeBool, - Computed: true, - Description: `Whether the cluster has been registered via the fleet API.`, - }, + "membership": { + Type: schema.TypeString, + Computed: true, + Description: `Full resource name of the registered fleet membership of the cluster.`, + }, + "pre_registered": { + Type: schema.TypeBool, + Computed: true, + Description: `Whether the cluster has been registered via the fleet API.`, + }, + "membership_id": { + Type: schema.TypeString, + Computed: true, + Description: `Short name of the fleet membership, for example "member-1".`, + }, + "membership_location": { + Type: schema.TypeString, + Computed: true, + Description: `Location of the fleet membership, for example "us-central1".`, + }, }, }, }, @@ -5317,7 +5336,18 @@ func expandMonitoringConfig(configured interface{}) *container.MonitoringConfig mc.AdvancedDatapathObservabilityConfig = &container.AdvancedDatapathObservabilityConfig{ EnableMetrics: advanced_datapath_observability_config["enable_metrics"].(bool), - RelayMode: advanced_datapath_observability_config["relay_mode"].(string), + } + + enable_relay := advanced_datapath_observability_config["enable_relay"].(bool) + relay_mode := advanced_datapath_observability_config["relay_mode"].(string) + if enable_relay { + mc.AdvancedDatapathObservabilityConfig.EnableRelay = enable_relay + } else if relay_mode == "INTERNAL_VPC_LB" || relay_mode == "EXTERNAL_LB" { + mc.AdvancedDatapathObservabilityConfig.RelayMode = relay_mode + } else { + mc.AdvancedDatapathObservabilityConfig.EnableRelay = enable_relay + mc.AdvancedDatapathObservabilityConfig.RelayMode = "DISABLED" + mc.AdvancedDatapathObservabilityConfig.ForceSendFields = []string{"EnableRelay"} } } @@ -6090,16 +6120,27 @@ func flattenGatewayApiConfig(c *container.GatewayAPIConfig) []map[string]interfa } func flattenFleet(c *container.Fleet) []map[string]interface{} { - if c == nil { - return nil - } - return []map[string]interface{}{ - { - "project": c.Project, - "membership": c.Membership, - "pre_registered": c.PreRegistered, - }, - } + if c == nil { + return nil + } + + // Parse membership_id and membership_location from full membership name. + var membership_id, membership_location string + membershipRE := regexp.MustCompile(`^(//[a-zA-Z0-9\.\-]+)?/?projects/([^/]+)/locations/([a-zA-Z0-9\-]+)/memberships/([^/]+)$`) + if match := membershipRE.FindStringSubmatch(c.Membership); match != nil { + membership_id = match[4] + membership_location = match[3] + } + + return []map[string]interface{}{ + { + "project": c.Project, + "membership": c.Membership, + "membership_id": membership_id, + "membership_location": membership_location, + "pre_registered": c.PreRegistered, + }, + } } func flattenEnableK8sBetaApis(c *container.K8sBetaAPIConfig) []map[string]interface{} { @@ -6145,10 +6186,33 @@ func flattenMonitoringConfig(c *container.MonitoringConfig) []map[string]interfa } func flattenAdvancedDatapathObservabilityConfig(c *container.AdvancedDatapathObservabilityConfig) []map[string]interface{} { + if c == nil { + return nil + } + + if c.EnableRelay { + return []map[string]interface{}{ + { + "enable_metrics": c.EnableMetrics, + "enable_relay": c.EnableRelay, + }, + } + } + + if c.RelayMode == "INTERNAL_VPC_LB" || c.RelayMode == "EXTERNAL_LB" { + return []map[string]interface{}{ + { + "enable_metrics": c.EnableMetrics, + "relay_mode": c.RelayMode, + }, + } + } + return []map[string]interface{}{ { "enable_metrics": c.EnableMetrics, - "relay_mode": c.RelayMode, + "enable_relay": false, + "relay_mode": "DISABLED", }, } } diff --git a/mmv1/third_party/terraform/services/container/resource_container_cluster_migratev1.go.erb b/mmv1/third_party/terraform/services/container/resource_container_cluster_migratev1.go.erb index 2bce542a3925..350451a0e97b 100644 --- a/mmv1/third_party/terraform/services/container/resource_container_cluster_migratev1.go.erb +++ b/mmv1/third_party/terraform/services/container/resource_container_cluster_migratev1.go.erb @@ -996,6 +996,11 @@ func resourceContainerClusterResourceV1() *schema.Resource { Required: true, Description: `Whether or not the advanced datapath metrics are enabled.`, }, + "enable_relay": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether or not Relay is enabled.`, + }, "relay_mode": { Type: schema.TypeString, Optional: true, diff --git a/mmv1/third_party/terraform/services/container/resource_container_cluster_test.go.erb b/mmv1/third_party/terraform/services/container/resource_container_cluster_test.go.erb index ff7e8655a6a1..d29ab75db60b 100644 --- a/mmv1/third_party/terraform/services/container/resource_container_cluster_test.go.erb +++ b/mmv1/third_party/terraform/services/container/resource_container_cluster_test.go.erb @@ -3162,6 +3162,24 @@ func TestAccContainerCluster_withMonitoringConfigAdvancedDatapathObservabilityCo ImportStateVerify: true, ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, }, + { + Config: testAccContainerCluster_withMonitoringConfigAdvancedDatapathObservabilityConfigEnabledOld(clusterName), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_withMonitoringConfigAdvancedDatapathObservabilityConfigDisabledOld(clusterName), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, }, }) } @@ -8886,6 +8904,56 @@ resource "google_compute_subnetwork" "container_subnetwork" { } } +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + datapath_provider = "ADVANCED_DATAPATH" + + network = google_compute_network.container_network.name + subnetwork = google_compute_subnetwork.container_subnetwork.name + ip_allocation_policy { + cluster_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[0].range_name + services_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[1].range_name + } + + monitoring_config { + enable_components = [] + advanced_datapath_observability_config { + enable_metrics = true + enable_relay = true + } + } + deletion_protection = false +} +`, name, name) +} + +func testAccContainerCluster_withMonitoringConfigAdvancedDatapathObservabilityConfigEnabledOld(name string) string { + return fmt.Sprintf(` +resource "google_compute_network" "container_network" { + name = "%s-nw" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "container_subnetwork" { + name = google_compute_network.container_network.name + network = google_compute_network.container_network.name + ip_cidr_range = "10.0.36.0/24" + region = "us-central1" + private_ip_google_access = true + + secondary_ip_range { + range_name = "services-range" + ip_cidr_range = "192.168.1.0/24" + } + + secondary_ip_range { + range_name = "pod-ranges" + ip_cidr_range = "192.168.64.0/22" + } +} + resource "google_container_cluster" "primary" { name = "%s" location = "us-central1-a" @@ -8936,6 +9004,56 @@ resource "google_compute_subnetwork" "container_subnetwork" { } } +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + datapath_provider = "ADVANCED_DATAPATH" + + network = google_compute_network.container_network.name + subnetwork = google_compute_subnetwork.container_subnetwork.name + ip_allocation_policy { + cluster_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[0].range_name + services_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[1].range_name + } + + monitoring_config { + enable_components = [] + advanced_datapath_observability_config { + enable_metrics = false + enable_relay = false + } + } + deletion_protection = false +} +`, name, name) +} + +func testAccContainerCluster_withMonitoringConfigAdvancedDatapathObservabilityConfigDisabledOld(name string) string { + return fmt.Sprintf(` +resource "google_compute_network" "container_network" { + name = "%s-nw" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "container_subnetwork" { + name = google_compute_network.container_network.name + network = google_compute_network.container_network.name + ip_cidr_range = "10.0.36.0/24" + region = "us-central1" + private_ip_google_access = true + + secondary_ip_range { + range_name = "services-range" + ip_cidr_range = "192.168.1.0/24" + } + + secondary_ip_range { + range_name = "pod-ranges" + ip_cidr_range = "192.168.64.0/22" + } +} + resource "google_container_cluster" "primary" { name = "%s" location = "us-central1-a" diff --git a/mmv1/third_party/terraform/services/firebaseappcheck/resource_firebase_app_check_app_attest_config_test.go b/mmv1/third_party/terraform/services/firebaseappcheck/resource_firebase_app_check_app_attest_config_test.go new file mode 100644 index 000000000000..a1b05b852feb --- /dev/null +++ b/mmv1/third_party/terraform/services/firebaseappcheck/resource_firebase_app_check_app_attest_config_test.go @@ -0,0 +1,59 @@ +package firebaseappcheck_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" +) + +func TestAccFirebaseAppCheckAppAttestConfig_firebaseAppCheckAppAttestConfigUpdate(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_id": envvar.GetTestProjectFromEnv(), + "team_id": "9987654321", + "random_suffix": acctest.RandString(t, 10), + "token_ttl": "7200s", + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + ExternalProviders: map[string]resource.ExternalProvider{ + "random": {}, + "time": {}, + }, + Steps: []resource.TestStep{ + { + Config: testAccFirebaseAppCheckAppAttestConfig_firebaseAppCheckAppAttestConfigMinimalExample(context), + }, + { + ResourceName: "google_firebase_app_check_app_attest_config.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"app_id"}, + }, + { + Config: testAccFirebaseAppCheckAppAttestConfig_firebaseAppCheckAppAttestConfigFullExample(context), + }, + { + ResourceName: "google_firebase_app_check_app_attest_config.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"app_id"}, + }, + { + Config: testAccFirebaseAppCheckAppAttestConfig_firebaseAppCheckAppAttestConfigMinimalExample(context), + }, + { + ResourceName: "google_firebase_app_check_app_attest_config.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"app_id"}, + }, + }, + }) +} diff --git a/mmv1/third_party/terraform/services/firebaseappcheck/resource_firebase_app_check_play_integrity_config_test.go b/mmv1/third_party/terraform/services/firebaseappcheck/resource_firebase_app_check_play_integrity_config_test.go new file mode 100644 index 000000000000..c97df9fd1fab --- /dev/null +++ b/mmv1/third_party/terraform/services/firebaseappcheck/resource_firebase_app_check_play_integrity_config_test.go @@ -0,0 +1,58 @@ +package firebaseappcheck_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" +) + +func TestAccFirebaseAppCheckPlayIntegrityConfig_firebaseAppCheckPlayIntegrityConfigUpdate(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_id": envvar.GetTestProjectFromEnv(), + "random_suffix": acctest.RandString(t, 10), + "token_ttl": "7200s", + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + ExternalProviders: map[string]resource.ExternalProvider{ + "random": {}, + "time": {}, + }, + Steps: []resource.TestStep{ + { + Config: testAccFirebaseAppCheckPlayIntegrityConfig_firebaseAppCheckPlayIntegrityConfigMinimalExample(context), + }, + { + ResourceName: "google_firebase_app_check_play_integrity_config.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"app_id"}, + }, + { + Config: testAccFirebaseAppCheckPlayIntegrityConfig_firebaseAppCheckPlayIntegrityConfigFullExample(context), + }, + { + ResourceName: "google_firebase_app_check_play_integrity_config.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"app_id"}, + }, + { + Config: testAccFirebaseAppCheckPlayIntegrityConfig_firebaseAppCheckPlayIntegrityConfigMinimalExample(context), + }, + { + ResourceName: "google_firebase_app_check_play_integrity_config.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"app_id"}, + }, + }, + }) +} diff --git a/mmv1/third_party/terraform/services/firebaseappcheck/resource_firebase_app_check_service_config_test.go b/mmv1/third_party/terraform/services/firebaseappcheck/resource_firebase_app_check_service_config_test.go index 2ea26c572329..aba5a807434f 100644 --- a/mmv1/third_party/terraform/services/firebaseappcheck/resource_firebase_app_check_service_config_test.go +++ b/mmv1/third_party/terraform/services/firebaseappcheck/resource_firebase_app_check_service_config_test.go @@ -14,7 +14,7 @@ func TestAccFirebaseAppCheckServiceConfig_firebaseAppCheckServiceConfigUpdate(t context := map[string]interface{}{ "project_id": envvar.GetTestProjectFromEnv(), - "service_id": "firestore.googleapis.com", + "service_id": "identitytoolkit.googleapis.com", "random_suffix": acctest.RandString(t, 10), } diff --git a/mmv1/third_party/terraform/services/gkehub2/resource_gke_hub_membership_binding_test.go b/mmv1/third_party/terraform/services/gkehub2/resource_gke_hub_membership_binding_test.go index e70ad26785a7..92cd069eb444 100644 --- a/mmv1/third_party/terraform/services/gkehub2/resource_gke_hub_membership_binding_test.go +++ b/mmv1/third_party/terraform/services/gkehub2/resource_gke_hub_membership_binding_test.go @@ -49,7 +49,7 @@ func TestAccGKEHub2MembershipBinding_gkehubMembershipBindingBasicExample_update( func testAccGKEHub2MembershipBinding_gkehubMembershipBindingBasicExample_basic(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_container_cluster" "primary" { - name = "basiccluster%{random_suffix}" + name = "tf-test-basic-cluster%{random_suffix}" location = "us-central1-a" initial_node_count = 1 deletion_protection = false @@ -93,7 +93,7 @@ resource "google_gke_hub_membership_binding" "example" { func testAccGKEHub2MembershipBinding_gkehubMembershipBindingBasicExample_update(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_container_cluster" "primary" { - name = "basiccluster%{random_suffix}" + name = "tf-test-basic-cluster%{random_suffix}" location = "us-central1-a" initial_node_count = 1 deletion_protection = false diff --git a/mmv1/third_party/terraform/services/healthcare/resource_healthcare_hl7_v2_store_test.go.erb b/mmv1/third_party/terraform/services/healthcare/resource_healthcare_hl7_v2_store_test.go.erb index aec48d059681..4491a090fb29 100644 --- a/mmv1/third_party/terraform/services/healthcare/resource_healthcare_hl7_v2_store_test.go.erb +++ b/mmv1/third_party/terraform/services/healthcare/resource_healthcare_hl7_v2_store_test.go.erb @@ -162,6 +162,7 @@ func testGoogleHealthcareHl7V2Store_basic(hl7_v2StoreName, datasetName string) s resource "google_healthcare_hl7_v2_store" "default" { name = "%s" dataset = google_healthcare_dataset.dataset.id + reject_duplicate_message = true } resource "google_healthcare_dataset" "dataset" { diff --git a/mmv1/third_party/terraform/services/migrationcenter/resource_migration_center_preference_set_test.go b/mmv1/third_party/terraform/services/migrationcenter/resource_migration_center_preference_set_test.go new file mode 100644 index 000000000000..739265f52527 --- /dev/null +++ b/mmv1/third_party/terraform/services/migrationcenter/resource_migration_center_preference_set_test.go @@ -0,0 +1,88 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package migrationcenter_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccMigrationCenterPreferenceSet_preferenceSetUpdate(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckMigrationCenterPreferenceSetDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccMigrationCenterPreferenceSet_preferenceSetStart(context), + }, + { + ResourceName: "google_migration_center_preference_set.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "preference_set_id"}, + }, + }, + }) +} + +func testAccMigrationCenterPreferenceSet_preferenceSetStart(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_migration_center_preference_set" "default" { + location = "us-central1" + preference_set_id = "tf-test-preference-set-test%{random_suffix}" + description = "Terraform integration test description" + display_name = "Terraform integration test display" + virtual_machine_preferences { + vmware_engine_preferences { + cpu_overcommit_ratio = 1.5 + memory_overcommit_ratio = 2.0 + } + sizing_optimization_strategy = "SIZING_OPTIMIZATION_STRATEGY_SAME_AS_SOURCE" + } +} +`, context) +} + +func testAccMigrationCenterPreferenceSet_preferenceSetUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_migration_center_preference_set" "default" { + location = "us-central1" + preference_set_id = "tf-test-preference-set-test%{random_suffix}" + description = "Terraform integration test updated description" + display_name = "Terraform integration test updated display" + virtual_machine_preferences { + vmware_engine_preferences { + cpu_overcommit_ratio = 1.4 + } + sizing_optimization_strategy = "SIZING_OPTIMIZATION_STRATEGY_MODERATE" + commitment_plan = "COMMITMENT_PLAN_ONE_YEAR" + preferred_regions = ["us-central1"] + } +} +`, context) +} diff --git a/mmv1/third_party/terraform/services/netapp/resource_netapp_volume_test.go b/mmv1/third_party/terraform/services/netapp/resource_netapp_volume_test.go index 386e29eb7f42..81574ec62d00 100644 --- a/mmv1/third_party/terraform/services/netapp/resource_netapp_volume_test.go +++ b/mmv1/third_party/terraform/services/netapp/resource_netapp_volume_test.go @@ -1,20 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - package netapp_test import ( @@ -25,7 +11,7 @@ import ( "github.com/hashicorp/terraform-provider-google/google/acctest" ) -func TestAccNetappVolume_volumeBasicExample_update(t *testing.T) { +func TestAccNetappVolume_netappVolumeBasicExample_update(t *testing.T) { t.Parallel() context := map[string]interface{}{ @@ -45,7 +31,7 @@ func TestAccNetappVolume_volumeBasicExample_update(t *testing.T) { ResourceName: "google_netapp_volume.test_volume", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"location", "name", "labels", "terraform_labels", "deletion_policy"}, + ImportStateVerifyIgnore: []string{"restore_parameters", "location", "name", "deletion_policy", "labels", "terraform_labels"}, }, { Config: testAccNetappVolume_volumeBasicExample_full(context), }, @@ -53,7 +39,7 @@ func TestAccNetappVolume_volumeBasicExample_update(t *testing.T) { ResourceName: "google_netapp_volume.test_volume", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"location", "name", "labels", "terraform_labels", "deletion_policy"}, + ImportStateVerifyIgnore: []string{"restore_parameters", "location", "name", "deletion_policy", "labels", "terraform_labels"}, }, { Config: testAccNetappVolume_volumeBasicExample_update(context), @@ -62,7 +48,7 @@ func TestAccNetappVolume_volumeBasicExample_update(t *testing.T) { ResourceName: "google_netapp_volume.test_volume", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"location", "name", "labels", "terraform_labels", "deletion_policy"}, + ImportStateVerifyIgnore: []string{"restore_parameters", "location", "name", "deletion_policy", "labels", "terraform_labels"}, }, { Config: testAccNetappVolume_volumeBasicExample_updatesnapshot(context), @@ -71,7 +57,16 @@ func TestAccNetappVolume_volumeBasicExample_update(t *testing.T) { ResourceName: "google_netapp_volume.test_volume", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"location", "name", "labels", "terraform_labels", "deletion_policy"}, + ImportStateVerifyIgnore: []string{"restore_parameters", "location", "name", "deletion_policy", "labels", "terraform_labels"}, + }, + { + Config: testAccNetappVolume_volumeBasicExample_createclonevolume(context), + }, + { + ResourceName: "google_netapp_volume.test_volume_clone", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"restore_parameters", "location", "name", "deletion_policy", "labels", "terraform_labels"}, }, }, }) @@ -80,24 +75,24 @@ func TestAccNetappVolume_volumeBasicExample_update(t *testing.T) { func testAccNetappVolume_volumeBasicExample_basic(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_netapp_storage_pool" "default" { - name = "tf-test-test-pool%{random_suffix}" - location = "us-west2" - service_level = "PREMIUM" - capacity_gib = "2048" - network = data.google_compute_network.default.id + name = "tf-test-test-pool%{random_suffix}" + location = "us-west2" + service_level = "PREMIUM" + capacity_gib = "2048" + network = data.google_compute_network.default.id } resource "google_netapp_volume" "test_volume" { - location = "us-west2" - name = "tf-test-test-volume%{random_suffix}" - capacity_gib = "100" - share_name = "tf-test-test-volume%{random_suffix}" - storage_pool = google_netapp_storage_pool.default.name - protocols = ["NFSV3"] + location = "us-west2" + name = "tf-test-test-volume%{random_suffix}" + capacity_gib = "100" + share_name = "tf-test-test-volume%{random_suffix}" + storage_pool = google_netapp_storage_pool.default.name + protocols = ["NFSV3"] } data "google_compute_network" "default" { - name = "%{network_name}" + name = "%{network_name}" } `, context) } @@ -105,237 +100,298 @@ data "google_compute_network" "default" { func testAccNetappVolume_volumeBasicExample_full(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_netapp_storage_pool" "default" { - name = "tf-test-test-pool%{random_suffix}" - location = "us-west2" - service_level = "PREMIUM" - capacity_gib = "2048" - network = data.google_compute_network.default.id + name = "tf-test-test-pool%{random_suffix}" + location = "us-west2" + service_level = "PREMIUM" + capacity_gib = "2048" + network = data.google_compute_network.default.id } - + resource "google_netapp_storage_pool" "default2" { - name = "tf-test-pool%{random_suffix}" - location = "us-west2" - service_level = "EXTREME" - capacity_gib = "2048" - network = data.google_compute_network.default.id + name = "tf-test-pool%{random_suffix}" + location = "us-west2" + service_level = "EXTREME" + capacity_gib = "2048" + network = data.google_compute_network.default.id } - + resource "google_netapp_volume" "test_volume" { - location = "us-west2" - name = "tf-test-test-volume%{random_suffix}" - capacity_gib = "100" - share_name = "tf-test-test-volume%{random_suffix}" - storage_pool = google_netapp_storage_pool.default.name - protocols = ["NFSV3"] - smb_settings = [] - unix_permissions = "0770" - labels = { - key= "test" - value= "pool" - } - description = "This is a test description" - snapshot_directory = false - security_style = "UNIX" - kerberos_enabled = false - export_policy { - rules { - access_type = "READ_ONLY" - allowed_clients = "0.0.0.0/0" - has_root_access = "false" - kerberos5_read_only = false - kerberos5_read_write = false - kerberos5i_read_only = false - kerberos5i_read_write = false - kerberos5p_read_only = false - kerberos5p_read_write = false - nfsv3 = true - nfsv4 = false - } - rules { - access_type = "READ_WRITE" - allowed_clients = "10.2.3.4,10.2.3.5" - has_root_access = "true" - kerberos5_read_only = false - kerberos5_read_write = false - kerberos5i_read_only = false - kerberos5i_read_write = false - kerberos5p_read_only = false - kerberos5p_read_write = false - nfsv3 = true - nfsv4 = false - } - } - restricted_actions = [] - snapshot_policy { - daily_schedule { - snapshots_to_keep = 2 - } - enabled = true - hourly_schedule { - snapshots_to_keep = 2 - } - monthly_schedule { - snapshots_to_keep = 4 - } - weekly_schedule { - snapshots_to_keep = 2 - } - } + location = "us-west2" + name = "tf-test-test-volume%{random_suffix}" + capacity_gib = "100" + share_name = "tf-test-test-volume%{random_suffix}" + storage_pool = google_netapp_storage_pool.default.name + protocols = ["NFSV3"] + smb_settings = [] + unix_permissions = "0770" + labels = { + key= "test" + value= "pool" + } + description = "This is a test description" + snapshot_directory = false + security_style = "UNIX" + kerberos_enabled = false + export_policy { + rules { + access_type = "READ_ONLY" + allowed_clients = "0.0.0.0/0" + has_root_access = "false" + kerberos5_read_only = false + kerberos5_read_write = false + kerberos5i_read_only = false + kerberos5i_read_write = false + kerberos5p_read_only = false + kerberos5p_read_write = false + nfsv3 = true + nfsv4 = false + } + rules { + access_type = "READ_WRITE" + allowed_clients = "10.2.3.4,10.2.3.5" + has_root_access = "true" + kerberos5_read_only = false + kerberos5_read_write = false + kerberos5i_read_only = false + kerberos5i_read_write = false + kerberos5p_read_only = false + kerberos5p_read_write = false + nfsv3 = true + nfsv4 = false + } + } + restricted_actions = [] + snapshot_policy { + daily_schedule { + snapshots_to_keep = 2 + } + enabled = true + hourly_schedule { + snapshots_to_keep = 2 + } + monthly_schedule { + snapshots_to_keep = 4 + } + weekly_schedule { + snapshots_to_keep = 2 + } + } } data "google_compute_network" "default" { - name = "%{network_name}" + name = "%{network_name}" } `, context) } func testAccNetappVolume_volumeBasicExample_update(context map[string]interface{}) string { return acctest.Nprintf(` - resource "google_netapp_storage_pool" "default" { - name = "tf-test-test-pool%{random_suffix}" - location = "us-west2" - service_level = "PREMIUM" - capacity_gib = "2048" - network = data.google_compute_network.default.id + name = "tf-test-test-pool%{random_suffix}" + location = "us-west2" + service_level = "PREMIUM" + capacity_gib = "2048" + network = data.google_compute_network.default.id } - + resource "google_netapp_storage_pool" "default2" { - name = "tf-test-pool%{random_suffix}" - location = "us-west2" - service_level = "EXTREME" - capacity_gib = "2048" - network = data.google_compute_network.default.id + name = "tf-test-pool%{random_suffix}" + location = "us-west2" + service_level = "EXTREME" + capacity_gib = "2048" + network = data.google_compute_network.default.id } resource "google_netapp_volume" "test_volume" { - location = "us-west2" - name = "tf-test-test-volume%{random_suffix}" - capacity_gib = "200" - share_name = "tf-test-test-volume%{random_suffix}" - storage_pool = google_netapp_storage_pool.default2.name - protocols = ["NFSV3"] - smb_settings = [] - unix_permissions = "0740" - labels = {} - description = "" - snapshot_directory = true - security_style = "UNIX" - kerberos_enabled = false - export_policy { - rules { - access_type = "READ_WRITE" - allowed_clients = "0.0.0.0/0" - has_root_access = "true" - kerberos5_read_only = false - kerberos5_read_write = false - kerberos5i_read_only = false - kerberos5i_read_write = false - kerberos5p_read_only = false - kerberos5p_read_write = false - nfsv3 = true - nfsv4 = false - } - } - restricted_actions = ["DELETE"] - snapshot_policy { - enabled = true - daily_schedule { - hour = 1 - minute = 2 - snapshots_to_keep = 1 - } - hourly_schedule { - minute = 10 - snapshots_to_keep = 1 - } - monthly_schedule { - days_of_month = "2" - hour = 3 - minute = 4 - snapshots_to_keep = 1 - } - weekly_schedule { - day = "Monday" - hour = 5 - minute = 6 - snapshots_to_keep = 1 - } - } + location = "us-west2" + name = "tf-test-test-volume%{random_suffix}" + capacity_gib = "200" + share_name = "tf-test-test-volume%{random_suffix}" + storage_pool = google_netapp_storage_pool.default2.name + protocols = ["NFSV3"] + smb_settings = [] + unix_permissions = "0740" + labels = {} + description = "" + snapshot_directory = true + security_style = "UNIX" + kerberos_enabled = false + export_policy { + rules { + access_type = "READ_WRITE" + allowed_clients = "0.0.0.0/0" + has_root_access = "true" + kerberos5_read_only = false + kerberos5_read_write = false + kerberos5i_read_only = false + kerberos5i_read_write = false + kerberos5p_read_only = false + kerberos5p_read_write = false + nfsv3 = true + nfsv4 = false + } + } + # Delete protection only gets active after an NFS client mounts. + # Setting it here is save, volume can still be deleted. + deletion_policy = "FORCE" + snapshot_policy { + enabled = true + daily_schedule { + hour = 1 + minute = 2 + snapshots_to_keep = 1 + } + hourly_schedule { + minute = 10 + snapshots_to_keep = 1 + } + monthly_schedule { + days_of_month = "2" + hour = 3 + minute = 4 + snapshots_to_keep = 1 + } + weekly_schedule { + day = "Monday" + hour = 5 + minute = 6 + snapshots_to_keep = 1 + } + } } data "google_compute_network" "default" { - name = "%{network_name}" + name = "%{network_name}" } `, context) } func testAccNetappVolume_volumeBasicExample_updatesnapshot(context map[string]interface{}) string { return acctest.Nprintf(` +resource "google_netapp_storage_pool" "default2" { + name = "tf-test-pool%{random_suffix}" + location = "us-west2" + service_level = "EXTREME" + capacity_gib = "2048" + network = data.google_compute_network.default.id +} + +resource "google_netapp_volume" "test_volume" { + location = "us-west2" + name = "tf-test-test-volume%{random_suffix}" + capacity_gib = "200" + share_name = "tf-test-test-volume%{random_suffix}" + storage_pool = google_netapp_storage_pool.default2.name + protocols = ["NFSV3"] + smb_settings = [] + unix_permissions = "0740" + labels = {} + description = "" + snapshot_directory = true + security_style = "UNIX" + kerberos_enabled = false + export_policy { + rules { + access_type = "READ_WRITE" + allowed_clients = "0.0.0.0/0" + has_root_access = "true" + kerberos5_read_only = false + kerberos5_read_write = false + kerberos5i_read_only = false + kerberos5i_read_write = false + kerberos5p_read_only = false + kerberos5p_read_write = false + nfsv3 = true + nfsv4 = false + } + } + # Delete protection only gets active after an NFS client mounts. + # Setting it here is save, volume can still be deleted. + restricted_actions = ["DELETE"] + deletion_policy = "FORCE" +} -resource "google_netapp_storage_pool" "default" { - name = "tf-test-test-pool%{random_suffix}" - location = "us-west2" - service_level = "PREMIUM" - capacity_gib = "2048" - network = data.google_compute_network.default.id +resource "google_netapp_volume_snapshot" "test-snapshot" { + depends_on = [google_netapp_volume.test_volume] + location = google_netapp_volume.test_volume.location + volume_name = google_netapp_volume.test_volume.name + name = "test-snapshot" +} + +data "google_compute_network" "default" { + name = "%{network_name}" +} + `, context) } - + +// Tests creating a new volume (clone) from a snapshot created from existing volume +func testAccNetappVolume_volumeBasicExample_createclonevolume(context map[string]interface{}) string { + return acctest.Nprintf(` resource "google_netapp_storage_pool" "default2" { - name = "tf-test-pool%{random_suffix}" - location = "us-west2" - service_level = "EXTREME" - capacity_gib = "2048" - network = data.google_compute_network.default.id + name = "tf-test-pool%{random_suffix}" + location = "us-west2" + service_level = "EXTREME" + capacity_gib = "2048" + network = data.google_compute_network.default.id } - + resource "google_netapp_volume" "test_volume" { - location = "us-west2" - name = "tf-test-test-volume%{random_suffix}" - capacity_gib = "200" - share_name = "tf-test-test-volume%{random_suffix}" - storage_pool = google_netapp_storage_pool.default2.name - protocols = ["NFSV3"] - smb_settings = [] - unix_permissions = "0740" - labels = {} - description = "" - snapshot_directory = true - security_style = "UNIX" - kerberos_enabled = false - export_policy { - rules { - access_type = "READ_WRITE" - allowed_clients = "0.0.0.0/0" - has_root_access = "true" - kerberos5_read_only = false - kerberos5_read_write = false - kerberos5i_read_only = false - kerberos5i_read_write = false - kerberos5p_read_only = false - kerberos5p_read_write = false - nfsv3 = true - nfsv4 = false - } - } - # Delete protection only gets active after an NFS client mounts. - # Setting it here is save, volume can still be deleted. - restricted_actions = ["DELETE"] - deletion_policy = "FORCE" + location = "us-west2" + name = "tf-test-test-volume%{random_suffix}" + capacity_gib = "200" + share_name = "tf-test-test-volume%{random_suffix}" + storage_pool = google_netapp_storage_pool.default2.name + protocols = ["NFSV3"] + smb_settings = [] + unix_permissions = "0740" + labels = {} + description = "" + snapshot_directory = true + security_style = "UNIX" + kerberos_enabled = false + export_policy { + rules { + access_type = "READ_WRITE" + allowed_clients = "0.0.0.0/0" + has_root_access = "true" + kerberos5_read_only = false + kerberos5_read_write = false + kerberos5i_read_only = false + kerberos5i_read_write = false + kerberos5p_read_only = false + kerberos5p_read_write = false + nfsv3 = true + nfsv4 = false + } + } + # Delete protection only gets active after an NFS client mounts. + # Setting it here is save, volume can still be deleted. + restricted_actions = ["DELETE"] + deletion_policy = "FORCE" } -# Add the following snapshot block to the test as soon as snapshot resoruce -# is added to the provider. It will make the test cleanup require -# deletion_policy = "FORCE" on the volume for successful delete. -# resource "google_netapp_volumesnapshot" "test-snapshot" { -# depends_on = [google_netapp_volume.test_volume] -# location = google_netapp_volume.test_volume.location -# volume_name = google_netapp_volume.test_volume.name -# name = "test-snapshot%{random_suffix}" -# } +resource "google_netapp_volume_snapshot" "test-snapshot" { + depends_on = [google_netapp_volume.test_volume] + location = google_netapp_volume.test_volume.location + volume_name = google_netapp_volume.test_volume.name + name = "test-snapshot" +} + +resource "google_netapp_volume" "test_volume_clone" { + location = "us-west2" + name = "tf-test-test-volume-clone%{random_suffix}" + capacity_gib = "200" + share_name = "tf-test-test-volume-clone%{random_suffix}" + storage_pool = google_netapp_storage_pool.default2.name + protocols = ["NFSV3"] + deletion_policy = "FORCE" + restore_parameters { + source_snapshot = google_netapp_volume_snapshot.test-snapshot.id + } +} data "google_compute_network" "default" { - name = "%{network_name}" + name = "%{network_name}" } `, context) } diff --git a/mmv1/third_party/terraform/services/networksecurity/resource_network_security_security_profile_group_test.go.erb b/mmv1/third_party/terraform/services/networksecurity/resource_network_security_security_profile_group_test.go.erb new file mode 100644 index 000000000000..8e56deb5351d --- /dev/null +++ b/mmv1/third_party/terraform/services/networksecurity/resource_network_security_security_profile_group_test.go.erb @@ -0,0 +1,105 @@ +<% autogen_exception -%> +package networksecurity_test +<% unless version == 'ga' -%> + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" +) + +func TestAccNetworkSecuritySecurityProfileGroups_update(t *testing.T) { + t.Parallel() + + orgId := envvar.GetTestOrgFromEnv(t) + randomSuffix := acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckNetworkSecuritySecurityProfileGroupDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccNetworkSecuritySecurityProfileGroups_basic(orgId, randomSuffix), + }, + { + ResourceName: "google_network_security_security_profile_group.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccNetworkSecuritySecurityProfileGroups_update(orgId, randomSuffix), + }, + { + ResourceName: "google_network_security_security_profile_group.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccNetworkSecuritySecurityProfileGroups_basic(orgId string, randomSuffix string) string { + return fmt.Sprintf(` +resource "google_network_security_security_profile" "foobar" { + provider = google-beta + name = "tf-test-my-security-profile%s" + type = "THREAT_PREVENTION" + parent = "organizations/%s" + location = "global" +} + +resource "google_network_security_security_profile_group" "foobar" { + provider = google-beta + name = "tf-test-my-security-profile-group%s" + parent = "organizations/%s" + location = "global" + description = "My security profile group." + threat_prevention_profile = google_network_security_security_profile.foobar.id + + labels = { + foo = "bar" + } +} +`, randomSuffix, orgId, randomSuffix, orgId) +} + +func testAccNetworkSecuritySecurityProfileGroups_update(orgId string, randomSuffix string) string { + return fmt.Sprintf(` +resource "google_network_security_security_profile" "foobar" { + provider = google-beta + name = "tf-test-my-security-profile%s" + type = "THREAT_PREVENTION" + parent = "organizations/%s" + location = "global" +} + +resource "google_network_security_security_profile" "foobar_updated" { + provider = google-beta + name = "tf-test-my-security-profile-updated%s" + type = "THREAT_PREVENTION" + parent = "organizations/%s" + location = "global" +} + +resource "google_network_security_security_profile_group" "foobar" { + provider = google-beta + name = "tf-test-my-security-profile-group%s" + parent = "organizations/%s" + location = "global" + description = "My security profile group. Update" + threat_prevention_profile = google_network_security_security_profile.foobar_updated.id + + labels = { + foo = "foo" + } +} +`, randomSuffix, orgId, randomSuffix, orgId, randomSuffix, orgId) +} + +<% end -%> diff --git a/mmv1/third_party/terraform/services/notebooks/resource_notebooks_instance_state_test.go.erb b/mmv1/third_party/terraform/services/notebooks/resource_notebooks_instance_state_test.go.erb new file mode 100644 index 000000000000..ff99d9c7cc31 --- /dev/null +++ b/mmv1/third_party/terraform/services/notebooks/resource_notebooks_instance_state_test.go.erb @@ -0,0 +1,85 @@ +<% autogen_exception -%> +package notebooks_test + +<% unless version == 'ga' %> +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccNotebooksInstance_state(t *testing.T) { + t.Parallel() + + prefix := fmt.Sprintf("%d", acctest.RandInt(t)) + name := fmt.Sprintf("tf-%s", prefix) + + acctest.VcrTest(t, resource.TestCase{ + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccNotebooksInstance_basic_active(name), + }, + { + ResourceName: "google_notebooks_instance.test", + ImportState: true, + ImportStateVerify: true, + ExpectNonEmptyPlan: true, + ImportStateVerifyIgnore: []string{"container_image", "metadata", "vm_image","desired_state", "update_time"}, + }, + { + Config: testAccNotebooksInstance_basic_stopped(name), + }, + { + ResourceName: "google_notebooks_instance.test", + ImportState: true, + ImportStateVerify: true, + ExpectNonEmptyPlan: true, + ImportStateVerifyIgnore: []string{"container_image", "metadata", "vm_image","desired_state", "update_time"}, + }, + { + Config: testAccNotebooksInstance_basic_active(name), + }, + { + ResourceName: "google_notebooks_instance.test", + ImportState: true, + ImportStateVerify: true, + ExpectNonEmptyPlan: true, + ImportStateVerifyIgnore: []string{"container_image", "metadata", "vm_image","desired_state", "update_time"}, + }, + }, + }) +} + +func testAccNotebooksInstance_basic_active(name string) string { + return fmt.Sprintf(` +resource "google_notebooks_instance" "test" { + name = "%s" + location = "us-west1-a" + machine_type = "e2-medium" + vm_image { + project = "deeplearning-platform-release" + image_family = "tf-latest-cpu" + } + desired_state = "ACTIVE" +} +`, name) +} + +func testAccNotebooksInstance_basic_stopped(name string) string { + return fmt.Sprintf(` +resource "google_notebooks_instance" "test" { + name = "%s" + location = "us-west1-a" + machine_type = "e2-medium" + vm_image { + project = "deeplearning-platform-release" + image_family = "tf-latest-cpu" + } + desired_state = "STOPPED" +} +`, name) +} +<% end -%> diff --git a/mmv1/third_party/terraform/services/vertexai/resource_vertex_ai_feature_online_store_featureview_test.go b/mmv1/third_party/terraform/services/vertexai/resource_vertex_ai_feature_online_store_featureview_test.go index 87d69f45c9fb..4c3eb97c736f 100644 --- a/mmv1/third_party/terraform/services/vertexai/resource_vertex_ai_feature_online_store_featureview_test.go +++ b/mmv1/third_party/terraform/services/vertexai/resource_vertex_ai_feature_online_store_featureview_test.go @@ -198,3 +198,291 @@ func testAccVertexAIFeatureOnlineStoreFeatureview_vertexAiFeatureonlinestoreFeat } `, context) } + +func TestAccVertexAIFeatureOnlineStoreFeatureview_vertexAiFeatureonlinestoreFeatureview_featureRegistry_updated(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckVertexAIFeatureOnlineStoreFeatureviewDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccVertexAIFeatureOnlineStoreFeatureview_vertexAiFeatureonlinestoreFeatureview_featureRegistry_basic(context), + }, + { + ResourceName: "google_vertex_ai_feature_online_store_featureview.featureregistry_featureview", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "etag", "feature_online_store", "labels", "terraform_labels"}, + }, + { + Config: testAccVertexAIFeatureOnlineStoreFeatureview_vertexAiFeatureonlinestoreFeatureview_featureRegistry_update(context), + }, + { + ResourceName: "google_vertex_ai_feature_online_store_featureview.featureregistry_featureview", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "feature_online_store", "labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccVertexAIFeatureOnlineStoreFeatureview_vertexAiFeatureonlinestoreFeatureview_featureRegistry_basic(context map[string]interface{}) string { + return acctest.Nprintf(` + resource "google_vertex_ai_feature_online_store" "featureregistry_featureonlinestore" { + name = "tf_test_featureonlinestore%{random_suffix}" + labels = { + foo = "bar" + } + region = "us-central1" + bigtable { + auto_scaling { + min_node_count = 1 + max_node_count = 2 + cpu_utilization_target = 80 + } + } + } + + resource "google_bigquery_dataset" "featureregistry-tf-test-dataset" { + + dataset_id = "tf_test_dataset1_featureview%{random_suffix}" + friendly_name = "test" + description = "This is a test description" + location = "US" + } + + resource "google_bigquery_table" "sample_table" { + deletion_protection = false + + dataset_id = google_bigquery_dataset.featureregistry-tf-test-dataset.dataset_id + table_id = "tf_test_bq_table%{random_suffix}" + schema = < +package workbench_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccWorkbenchInstance_shielded_config_update(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccWorkbenchInstance_shielded_config_false(context), + }, + { + ResourceName: "google_workbench_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels"}, + }, + { + Config: testAccWorkbenchInstance_shielded_config_true(context), + }, + { + ResourceName: "google_workbench_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccWorkbenchInstance_shielded_config_remove(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccWorkbenchInstance_shielded_config_true(context), + }, + { + ResourceName: "google_workbench_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels"}, + }, + { + Config: testAccWorkbenchInstance_shielded_config_none(context), + }, + { + ResourceName: "google_workbench_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccWorkbenchInstance_shielded_config_double_apply(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccWorkbenchInstance_shielded_config_none(context), + }, + { + ResourceName: "google_workbench_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels"}, + }, + { + Config: testAccWorkbenchInstance_shielded_config_none(context), + }, + { + ResourceName: "google_workbench_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels"}, + }, + { + Config: testAccWorkbenchInstance_shielded_config_false(context), + }, + { + ResourceName: "google_workbench_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels"}, + }, + { + Config: testAccWorkbenchInstance_shielded_config_false(context), + }, + { + ResourceName: "google_workbench_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels"}, + }, + { + Config: testAccWorkbenchInstance_shielded_config_true(context), + }, + { + ResourceName: "google_workbench_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels"}, + }, + { + Config: testAccWorkbenchInstance_shielded_config_true(context), + }, + { + ResourceName: "google_workbench_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccWorkbenchInstance_shielded_config_true(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_workbench_instance" "instance" { + name = "tf-test-workbench-instance%{random_suffix}" + location = "us-central1-a" + + gce_setup { + shielded_instance_config { + enable_secure_boot = true + enable_vtpm = true + enable_integrity_monitoring = true + } + } +} +`, context) +} + +func testAccWorkbenchInstance_shielded_config_false(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_workbench_instance" "instance" { + name = "tf-test-workbench-instance%{random_suffix}" + location = "us-central1-a" + + gce_setup { + shielded_instance_config { + enable_secure_boot = false + enable_vtpm = false + enable_integrity_monitoring = false + } + } + +} +`, context) +} + +func testAccWorkbenchInstance_shielded_config_none(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_workbench_instance" "instance" { + name = "tf-test-workbench-instance%{random_suffix}" + location = "us-central1-a" +} +`, context) +} diff --git a/mmv1/third_party/terraform/services/workbench/resource_workbench_instance_gpu_test.go.erb b/mmv1/third_party/terraform/services/workbench/resource_workbench_instance_test.go.erb similarity index 77% rename from mmv1/third_party/terraform/services/workbench/resource_workbench_instance_gpu_test.go.erb rename to mmv1/third_party/terraform/services/workbench/resource_workbench_instance_test.go.erb index c1914881b2a6..8fbff0b8f080 100644 --- a/mmv1/third_party/terraform/services/workbench/resource_workbench_instance_gpu_test.go.erb +++ b/mmv1/third_party/terraform/services/workbench/resource_workbench_instance_test.go.erb @@ -65,6 +65,12 @@ resource "google_workbench_instance" "instance" { core_count = 1 } + shielded_instance_config { + enable_secure_boot = false + enable_vtpm = true + enable_integrity_monitoring = false + } + metadata = { terraform = "true" } @@ -143,6 +149,12 @@ resource "google_workbench_instance" "instance" { core_count = 1 } + shielded_instance_config { + enable_secure_boot = false + enable_vtpm = true + enable_integrity_monitoring = false + } + } labels = { @@ -270,3 +282,57 @@ resource "google_workbench_instance" "instance" { } `, context) } + +func TestAccWorkbenchInstance_updateState(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccWorkbenchInstance_basic(context), + }, + { + ResourceName: "google_workbench_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels","desired_state"}, + }, + { + Config: testAccWorkbenchInstance_updateState(context), + }, + { + ResourceName: "google_workbench_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels","desired_state"}, + }, + { + Config: testAccWorkbenchInstance_basic(context), + }, + { + ResourceName: "google_workbench_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels","desired_state"}, + }, + }, + }) +} + +func testAccWorkbenchInstance_updateState(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_workbench_instance" "instance" { + name = "tf-test-workbench-instance%{random_suffix}" + location = "us-central1-a" + + desired_state = "STOPPED" + +} +`, context) +} diff --git a/mmv1/third_party/terraform/tpgiamresource/iam.go.erb b/mmv1/third_party/terraform/tpgiamresource/iam.go.erb index 6dcb6c1ba8f7..ce9fce404baf 100644 --- a/mmv1/third_party/terraform/tpgiamresource/iam.go.erb +++ b/mmv1/third_party/terraform/tpgiamresource/iam.go.erb @@ -274,6 +274,11 @@ func normalizeIamMemberCasing(member string) string { if len(pieces) > 2 && !iamMemberIsCaseSensitive(strings.TrimPrefix(member, "deleted:")) { pieces[2] = strings.ToLower(pieces[2]) } + } else if strings.HasPrefix(member, "iamMember:") { + pieces = strings.SplitN(member, ":", 3) + if len(pieces) > 2 && !iamMemberIsCaseSensitive(strings.TrimPrefix(member, "iamMember:")) { + pieces[2] = strings.ToLower(pieces[2]) + } } else if !iamMemberIsCaseSensitive(member) { pieces = strings.SplitN(member, ":", 2) if len(pieces) > 1 { diff --git a/mmv1/third_party/terraform/website/docs/r/bigquery_dataset_iam.html.markdown b/mmv1/third_party/terraform/website/docs/r/bigquery_dataset_iam.html.markdown index e911137bdfae..4702defcded5 100644 --- a/mmv1/third_party/terraform/website/docs/r/bigquery_dataset_iam.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/bigquery_dataset_iam.html.markdown @@ -86,12 +86,13 @@ The following arguments are supported: * `member/members` - (Required) Identities that will be granted the privilege in `role`. Each entry can have one of the following values: - * **allUsers**: A special identifier that represents anyone who is on the internet; with or without a Google account. * **allAuthenticatedUsers**: A special identifier that represents anyone who is authenticated with a Google account or a service account. - * **user:{emailid}**: An email address that represents a specific Google account. For example, alice@gmail.com or joe@example.com. - * **serviceAccount:{emailid}**: An email address that represents a service account. For example, my-other-app@appspot.gserviceaccount.com. - * **group:{emailid}**: An email address that represents a Google group. For example, admins@example.com. + * **allUsers**: A special identifier that represents anyone who is on the internet; with or without a Google account. * **domain:{domain}**: A G Suite domain (primary, instead of alias) name that represents all the users of that domain. For example, google.com or example.com. + * **group:{emailid}**: An email address that represents a Google group. For example, admins@example.com. + * **iamMember:{principal}**: Some other type of member that appears in the IAM Policy but isn't a user, group, domain, or special group. This is used for example for workload/workforce federated identities (principal, principalSet). + * **serviceAccount:{emailid}**: An email address that represents a service account. For example, my-other-app@appspot.gserviceaccount.com. + * **user:{emailid}**: An email address that represents a specific Google account. For example, alice@gmail.com or joe@example.com. * `role` - (Required) The role that should be applied. Only one `google_bigquery_dataset_iam_binding` can be used per role. Note that custom roles must be of the format diff --git a/mmv1/third_party/terraform/website/docs/r/compute_instance.html.markdown b/mmv1/third_party/terraform/website/docs/r/compute_instance.html.markdown index 0eeb6da3b07d..4e124eb7a61b 100644 --- a/mmv1/third_party/terraform/website/docs/r/compute_instance.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/compute_instance.html.markdown @@ -251,8 +251,7 @@ is desired, you will need to modify your state file manually using * `provisioned_iops` - (Optional) Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. - Values must be between 10,000 and 120,000. For more details,see the - [Extreme persistent disk documentation](https://cloud.google.com/compute/docs/disks/extreme-persistent-disk). + For more details,see the [Hyperdisk documentation](https://cloud.google.com/compute/docs/disks/hyperdisks). Note: Updating currently is only supported for hyperdisk skus via disk update api/gcloud without the need to delete and recreate the disk, hyperdisk allows for an update of IOPS every 4 hours. To update your hyperdisk more frequently, @@ -260,10 +259,11 @@ is desired, you will need to modify your state file manually using * `provisioned_throughput` - (Optional) Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. - Values must be between 1 and 7,124. Note: Updating currently is only supported - for hyperdisk skus via disk update api/gcloud without the need to delete and - recreate the disk, hyperdisk allows for an update of throughput every 4 hours. - To update your hyperdisk more frequently, you'll need to manually delete and recreate it. + For more details,see the [Hyperdisk documentation](https://cloud.google.com/compute/docs/disks/hyperdisks). + Note: Updating currently is only supported for hyperdisk skus via disk update + api/gcloud without the need to delete and recreate the disk, hyperdisk allows + for an update of throughput every 4 hours. To update your hyperdisk more + frequently, you'll need to manually delete and recreate it. * `enable_confidential_compute` - (Optional) Whether this disk is using confidential compute mode. Note: Only supported on hyperdisk skus, disk_encryption_key is required when setting to true. diff --git a/mmv1/third_party/terraform/website/docs/r/compute_security_policy.html.markdown b/mmv1/third_party/terraform/website/docs/r/compute_security_policy.html.markdown index 98af03c92186..92d2b5ab5b4f 100644 --- a/mmv1/third_party/terraform/website/docs/r/compute_security_policy.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/compute_security_policy.html.markdown @@ -204,7 +204,7 @@ The following arguments are supported: * `NORMAL` - Normal log level. * `VERBOSE` - Verbose log level. -* `user_ip_request_headers` - (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)) An optional list of case-insensitive request header names to use for resolving the callers client IP address. +* `user_ip_request_headers` - (Optional) An optional list of case-insensitive request header names to use for resolving the callers client IP address. The `json_custom_config` block supports: diff --git a/mmv1/third_party/terraform/website/docs/r/container_cluster.html.markdown b/mmv1/third_party/terraform/website/docs/r/container_cluster.html.markdown index 9bff6ce9e01f..506516412fdc 100644 --- a/mmv1/third_party/terraform/website/docs/r/container_cluster.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/container_cluster.html.markdown @@ -618,6 +618,7 @@ This block also contains several computed attributes, documented below. The `advanced_datapath_observability_config` block supports: * `enable_metrics` - (Required) Whether or not to enable advanced datapath metrics. +* `enable_relay` - (Optional) Whether or not Relay is enabled. * `relay_mode` - (Optional) Mode used to make Relay available. The `maintenance_policy` block supports: @@ -1349,6 +1350,10 @@ exported: * `fleet.0.membership` - The resource name of the fleet Membership resource associated to this cluster with format `//gkehub.googleapis.com/projects/{{project}}/locations/{{location}}/memberships/{{name}}`. See the official doc for [fleet management](https://cloud.google.com/kubernetes-engine/docs/fleets-overview). +* `fleet.0.membership_id` - The short name of the fleet membership, extracted from `fleet.0.membership`. You can use this field to configure `membership_id` under [google_gkehub_feature_membership](https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/gke_hub_feature_membership). + +* `fleet.0.membership_location` - The location of the fleet membership, extracted from `fleet.0.membership`. You can use this field to configure `membership_location` under [google_gkehub_feature_membership](https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/gke_hub_feature_membership). + ## Timeouts This resource provides the following