diff --git a/.changelog/10138.txt b/.changelog/10138.txt new file mode 100644 index 00000000000..42b910df155 --- /dev/null +++ b/.changelog/10138.txt @@ -0,0 +1,3 @@ +```release-note:none + +``` \ No newline at end of file diff --git a/.teamcity/components/builds/build_configuration_per_package.kt b/.teamcity/components/builds/build_configuration_per_package.kt index 21d956030d6..395dc4da4a9 100644 --- a/.teamcity/components/builds/build_configuration_per_package.kt +++ b/.teamcity/components/builds/build_configuration_per_package.kt @@ -15,6 +15,8 @@ import jetbrains.buildServer.configs.kotlin.sharedResources import jetbrains.buildServer.configs.kotlin.vcs.GitVcsRoot import replaceCharsId +// BuildConfigurationsForPackages accepts a map containing details of multiple packages in a provider and returns a list of build configurations for them all. +// Intended to be used in projects where we're testing all packages, e.g. the nightly test projects fun BuildConfigurationsForPackages(packages: Map>, providerName: String, parentProjectName: String, vcsRoot: GitVcsRoot, sharedResources: List, environmentVariables: AccTestConfiguration): List { val list = ArrayList() @@ -31,6 +33,13 @@ fun BuildConfigurationsForPackages(packages: Map>, p return list } +// BuildConfigurationForSinglePackage accepts details of a single package in a provider and returns a build configuration for it +// Intended to be used in short-lived projects where we're testing specific packages, e.g. feature branch testing +fun BuildConfigurationForSinglePackage(packageName: String, packagePath: String, packageDisplayName: String, providerName: String, parentProjectName: String, vcsRoot: GitVcsRoot, sharedResources: List, environmentVariables: AccTestConfiguration): BuildType{ + val pkg = PackageDetails(packageName, packageDisplayName, providerName, parentProjectName) + return pkg.buildConfiguration(packagePath, vcsRoot, sharedResources, environmentVariables) +} + class PackageDetails(private val packageName: String, private val displayName: String, private val providerName: String, private val parentProjectName: String) { // buildConfiguration returns a BuildType for a service package @@ -102,4 +111,4 @@ class PackageDetails(private val packageName: String, private val displayName: S var id = "%s_%s_PACKAGE_%s".format(this.parentProjectName, this.providerName, this.packageName) return replaceCharsId(id) } -} \ No newline at end of file +} diff --git a/.teamcity/components/builds/build_parameters.kt b/.teamcity/components/builds/build_parameters.kt index 88928ed37a2..7641bc85859 100644 --- a/.teamcity/components/builds/build_parameters.kt +++ b/.teamcity/components/builds/build_parameters.kt @@ -252,12 +252,20 @@ fun ParametrizedWithType.readOnlySettings() { } // ParametrizedWithType.terraformCoreBinaryTesting sets environment variables that control what Terraform version is downloaded -// and ensures the testing framework uses that downloaded version -fun ParametrizedWithType.terraformCoreBinaryTesting() { - text("env.TERRAFORM_CORE_VERSION", DefaultTerraformCoreVersion, "The version of Terraform Core which should be used for testing") +// and ensures the testing framework uses that downloaded version. The default Terraform core version is used if no argument is supplied. +fun ParametrizedWithType.terraformCoreBinaryTesting(tfVersion: String = DefaultTerraformCoreVersion) { + text("env.TERRAFORM_CORE_VERSION", tfVersion, "The version of Terraform Core which should be used for testing") hiddenVariable("env.TF_ACC_TERRAFORM_PATH", "%system.teamcity.build.checkoutDir%/tools/terraform", "The path where the Terraform Binary is located. Used by the testing framework.") } +// BuildType.overrideTerraformCoreVersion is used to override the value of TERRAFORM_CORE_VERSION in special cases where we're testing new features +// that rely on a specific version of Terraform we might not want to be used for all our tests in TeamCity. +fun BuildType.overrideTerraformCoreVersion(tfVersion: String){ + params { + terraformCoreBinaryTesting(tfVersion) + } +} + fun ParametrizedWithType.terraformShouldPanicForSchemaErrors() { hiddenVariable("env.TF_SCHEMA_PANIC_ON_ERROR", "1", "Panic if unknown/unmatched fields are set into the state") } diff --git a/.teamcity/components/builds/build_steps.kt b/.teamcity/components/builds/build_steps.kt index 4faed1046b6..4682f1240eb 100644 --- a/.teamcity/components/builds/build_steps.kt +++ b/.teamcity/components/builds/build_steps.kt @@ -64,14 +64,15 @@ fun BuildSteps.downloadTerraformBinary() { // https://releases.hashicorp.com/terraform/0.12.28/terraform_0.12.28_linux_amd64.zip val terraformUrl = "https://releases.hashicorp.com/terraform/%env.TERRAFORM_CORE_VERSION%/terraform_%env.TERRAFORM_CORE_VERSION%_linux_amd64.zip" step(ScriptBuildStep { - name = "Download Terraform version %s".format(DefaultTerraformCoreVersion) + name = "Download Terraform" scriptContent = """ #!/bin/bash + echo "Downloading Terraform version %env.TERRAFORM_CORE_VERSION%" mkdir -p tools - wget -O tf.zip %s + wget -O tf.zip $terraformUrl unzip tf.zip mv terraform tools/ - """.format(terraformUrl).trimIndent() + """.trimIndent() }) } @@ -118,7 +119,7 @@ fun BuildSteps.runAcceptanceTests() { exit 0 fi - export TEST_COUNT=${'$'}(./test-binary -test.list=%TEST_PREFIX% | wc -l) + export TEST_COUNT=${'$'}(./test-binary -test.list="%TEST_PREFIX%" | wc -l) echo "Found ${'$'}{TEST_COUNT} tests that match the given test prefix %TEST_PREFIX%" if test ${'$'}TEST_COUNT -le "0"; then echo "Skipping test execution; no tests to run" diff --git a/.teamcity/components/inputs/packages.kt b/.teamcity/components/inputs/packages.kt index 5ea3d2a5d48..2e4b11ffb51 100644 --- a/.teamcity/components/inputs/packages.kt +++ b/.teamcity/components/inputs/packages.kt @@ -13,6 +13,11 @@ var PackagesListGa = mapOf( "displayName" to "Environment Variables", "path" to "./google/envvar" ), + "functions" to mapOf( + "name" to "functions", + "displayName" to "Provider-Defined Functions", + "path" to "./google/functions" + ), "fwmodels" to mapOf( "name" to "fwmodels", "displayName" to "Framework Models", @@ -64,6 +69,11 @@ var PackagesListBeta = mapOf( "displayName" to "Environment Variables", "path" to "./google-beta/envvar" ), + "functions" to mapOf( + "name" to "functions", + "displayName" to "Provider-Defined Functions", + "path" to "./google-beta/functions" + ), "fwmodels" to mapOf( "name" to "fwmodels", "displayName" to "Framework Models", diff --git a/.teamcity/components/inputs/services_beta.kt b/.teamcity/components/inputs/services_beta.kt index 27550dabce1..443987885a6 100644 --- a/.teamcity/components/inputs/services_beta.kt +++ b/.teamcity/components/inputs/services_beta.kt @@ -48,6 +48,11 @@ var ServicesListBeta = mapOf( "displayName" to "Appengine", "path" to "./google-beta/services/appengine" ), + "apphub" to mapOf( + "name" to "apphub", + "displayName" to "Apphub", + "path" to "./google-beta/services/apphub" + ), "artifactregistry" to mapOf( "name" to "artifactregistry", "displayName" to "Artifactregistry", diff --git a/.teamcity/components/inputs/services_ga.kt b/.teamcity/components/inputs/services_ga.kt index 51f98ed08a0..08ce6c2ee82 100644 --- a/.teamcity/components/inputs/services_ga.kt +++ b/.teamcity/components/inputs/services_ga.kt @@ -48,6 +48,11 @@ var ServicesListGa = mapOf( "displayName" to "Appengine", "path" to "./google/services/appengine" ), + "apphub" to mapOf( + "name" to "apphub", + "displayName" to "Apphub", + "path" to "./google/services/apphub" + ), "artifactregistry" to mapOf( "name" to "artifactregistry", "displayName" to "Artifactregistry", diff --git a/.teamcity/components/projects/feature_branches/FEATURE-BRANCH-provider-functions.kt b/.teamcity/components/projects/feature_branches/FEATURE-BRANCH-provider-functions.kt new file mode 100644 index 00000000000..3c1752e227f --- /dev/null +++ b/.teamcity/components/projects/feature_branches/FEATURE-BRANCH-provider-functions.kt @@ -0,0 +1,102 @@ +/* + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +// This file is controlled by MMv1, any changes made here will be overwritten + +package projects.feature_branches + +import ProviderNameBeta +import ProviderNameGa +import builds.* +import generated.PackagesListBeta +import generated.PackagesListGa +import jetbrains.buildServer.configs.kotlin.Project +import jetbrains.buildServer.configs.kotlin.vcs.GitVcsRoot +import replaceCharsId +import vcs_roots.ModularMagicianVCSRootBeta +import vcs_roots.ModularMagicianVCSRootGa + +const val featureBranchProviderFunctionsName = "FEATURE-BRANCH-provider-functions" +const val providerFunctionsTfCoreVersion = "1.8.0-alpha20240228" + +// VCS Roots specifically for pulling code from the feature branches in the downstream and upstream repos +object HashicorpVCSRootGa_featureBranchProviderFunctions: GitVcsRoot({ + name = "VCS root for the hashicorp/terraform-provider-${ProviderNameGa} repo @ refs/heads/${featureBranchProviderFunctionsName}" + url = "https://github.com/hashicorp/terraform-provider-${ProviderNameGa}" + branch = "refs/heads/${featureBranchProviderFunctionsName}" + branchSpec = "" // empty as we'll access no other branches +}) + +object HashicorpVCSRootBeta_featureBranchProviderFunctions: GitVcsRoot({ + name = "VCS root for the hashicorp/terraform-provider-${ProviderNameBeta} repo @ refs/heads/${featureBranchProviderFunctionsName}" + url = "https://github.com/hashicorp/terraform-provider-${ProviderNameBeta}" + branch = "refs/heads/${featureBranchProviderFunctionsName}" + branchSpec = "" // empty as we'll access no other branches +}) + +fun featureBranchProviderFunctionSubProject(allConfig: AllContextParameters): Project { + + val projectId = replaceCharsId(featureBranchProviderFunctionsName) + + val packageName = "functions" // This project will contain only builds to test this single package + val sharedResourcesEmpty: List = listOf() // No locking when testing functions + val vcrConfig = getVcrAcceptanceTestConfig(allConfig) // Reused below for both MM testing build configs + val trigger = NightlyTriggerConfiguration() // Resued below for running tests against the downstream repos every night. + + var parentId: String // To be overwritten when each build config is generated below. + + // GA + val gaConfig = getGaAcceptanceTestConfig(allConfig) + // How to make only build configuration to the relevant package(s) + val functionPackageGa = PackagesListGa.getValue(packageName) + + // Enable testing using hashicorp/terraform-provider-google + parentId = "${projectId}_HC_GA" + val buildConfigHashiCorpGa = BuildConfigurationForSinglePackage(packageName, functionPackageGa.getValue("path"), "Provider-Defined Functions (GA provider, HashiCorp downstream)", ProviderNameGa, parentId, HashicorpVCSRootGa_featureBranchProviderFunctions, sharedResourcesEmpty, gaConfig) + buildConfigHashiCorpGa.addTrigger(trigger) + + // Enable testing using modular-magician/terraform-provider-google + parentId = "${projectId}_MM_GA" + val buildConfigModularMagicianGa = BuildConfigurationForSinglePackage(packageName, functionPackageGa.getValue("path"), "Provider-Defined Functions (GA provider, MM upstream)", ProviderNameGa, parentId, ModularMagicianVCSRootGa, sharedResourcesEmpty, vcrConfig) + + // Beta + val betaConfig = getBetaAcceptanceTestConfig(allConfig) + val functionPackageBeta = PackagesListBeta.getValue("functions") + + // Enable testing using hashicorp/terraform-provider-google-beta + parentId = "${projectId}_HC_BETA" + val buildConfigHashiCorpBeta = BuildConfigurationForSinglePackage(packageName, functionPackageBeta.getValue("path"), "Provider-Defined Functions (Beta provider, HashiCorp downstream)", ProviderNameBeta, parentId, HashicorpVCSRootBeta_featureBranchProviderFunctions, sharedResourcesEmpty, betaConfig) + buildConfigHashiCorpBeta.addTrigger(trigger) + + // Enable testing using modular-magician/terraform-provider-google-beta + parentId = "${projectId}_MM_BETA" + val buildConfigModularMagicianBeta = BuildConfigurationForSinglePackage(packageName, functionPackageBeta.getValue("path"), "Provider-Defined Functions (Beta provider, MM upstream)", ProviderNameBeta, parentId, ModularMagicianVCSRootBeta, sharedResourcesEmpty, vcrConfig) + + val allBuildConfigs = listOf(buildConfigHashiCorpGa, buildConfigModularMagicianGa, buildConfigHashiCorpBeta, buildConfigModularMagicianBeta) + + // Make these builds use a 1.8.0-ish version of TF core + allBuildConfigs.forEach{ b -> + b.overrideTerraformCoreVersion(providerFunctionsTfCoreVersion) + } + + return Project{ + id(projectId) + name = featureBranchProviderFunctionsName + description = "Subproject for testing feature branch $featureBranchProviderFunctionsName" + + // Register feature branch-specific VCS roots in the project + vcsRoot(HashicorpVCSRootGa_featureBranchProviderFunctions) + vcsRoot(HashicorpVCSRootBeta_featureBranchProviderFunctions) + + // Register all build configs in the project + allBuildConfigs.forEach{ b -> + buildType(b) + } + + params { + readOnlySettings() + } + } +} \ No newline at end of file diff --git a/.teamcity/components/projects/root_project.kt b/.teamcity/components/projects/root_project.kt index 0c130da8eca..a78260dfe65 100644 --- a/.teamcity/components/projects/root_project.kt +++ b/.teamcity/components/projects/root_project.kt @@ -18,6 +18,7 @@ import generated.ServicesListBeta import generated.ServicesListGa import jetbrains.buildServer.configs.kotlin.Project import jetbrains.buildServer.configs.kotlin.sharedResource +import projects.feature_branches.featureBranchProviderFunctionSubProject // googleCloudRootProject returns a root project that contains a subprojects for the GA and Beta version of the // Google provider. There are also resources to help manage the test projects used for acceptance tests. @@ -57,10 +58,14 @@ fun googleCloudRootProject(allConfig: AllContextParameters): Project { } } + // Projects required for nightly testing, testing MM upstreams, and sweepers subProject(googleSubProjectGa(allConfig)) subProject(googleSubProjectBeta(allConfig)) subProject(projectSweeperSubProject(allConfig)) + // Feature branch-testing projects - these will be added and removed as needed + subProject(featureBranchProviderFunctionSubProject(allConfig)) + params { readOnlySettings() } diff --git a/.teamcity/tests/nightly_tests_project.kt b/.teamcity/tests/nightly_tests_project.kt index 57764788a5a..af3d7b8a9b2 100644 --- a/.teamcity/tests/nightly_tests_project.kt +++ b/.teamcity/tests/nightly_tests_project.kt @@ -19,26 +19,13 @@ class NightlyTestProjectsTests { val project = googleCloudRootProject(testContextParameters()) // Find GA nightly test project - var gaProject: Project? = project.subProjects.find { p-> p.name == gaProjectName} - if (gaProject == null) { - Assert.fail("Could not find the Google (GA) project") - } - var gaNightlyTestProject: Project? = gaProject!!.subProjects.find { p-> p.name == nightlyTestsProjectName} - if (gaNightlyTestProject == null) { - Assert.fail("Could not find the Google (GA) Nightly Test project") - } + var gaNightlyTestProject = getSubProject(project, gaProjectName, nightlyTestsProjectName) // Find Beta nightly test project - var betaProject: Project? = project.subProjects.find { p-> p.name == betaProjectName} - if (betaProject == null) { - Assert.fail("Could not find the Google (Beta) project") - } - var betaNightlyTestProject: Project? = betaProject!!.subProjects.find { p-> p.name == nightlyTestsProjectName} - if (betaNightlyTestProject == null) { - Assert.fail("Could not find the Google (GA) Nightly Test project") - } + var betaNightlyTestProject = getSubProject(project, betaProjectName, nightlyTestsProjectName) - (gaNightlyTestProject!!.buildTypes + betaNightlyTestProject!!.buildTypes).forEach{bt -> + // Make assertions about builds in both nightly test projects + (gaNightlyTestProject.buildTypes + betaNightlyTestProject.buildTypes).forEach{bt -> assertTrue("Build configuration `${bt.name}` contains at least one trigger", bt.triggers.items.isNotEmpty()) // Look for at least one CRON trigger var found: Boolean = false diff --git a/.teamcity/tests/sweepers.kt b/.teamcity/tests/sweepers.kt index 1603aaeda77..c80b8ef7d00 100644 --- a/.teamcity/tests/sweepers.kt +++ b/.teamcity/tests/sweepers.kt @@ -18,7 +18,7 @@ import projects.googleCloudRootProject class SweeperTests { @Test - fun projectSweeperProjectDoesNotSkipProjectSweep() { + fun projectSweeperDoesNotSkipProjectSweep() { val project = googleCloudRootProject(testContextParameters()) // Find Project sweeper project @@ -37,30 +37,33 @@ class SweeperTests { } @Test - fun gaNightlyProjectServiceSweeperSkipsProjectSweep() { + fun serviceSweepersSkipProjectSweeper() { val project = googleCloudRootProject(testContextParameters()) // Find GA nightly test project - val gaProject: Project? = project.subProjects.find { p-> p.name == gaProjectName} - if (gaProject == null) { - Assert.fail("Could not find the Google (GA) project") - } - val gaNightlyTestProject: Project? = gaProject!!.subProjects.find { p-> p.name == nightlyTestsProjectName} - if (gaNightlyTestProject == null) { - Assert.fail("Could not find the Google (GA) Nightly Test project") - } + val gaNightlyTestProject = getSubProject(project, gaProjectName, nightlyTestsProjectName) + // Find GA MM Upstream project + val gaMmUpstreamProject = getSubProject(project, gaProjectName, mmUpstreamProjectName) - // Find sweeper inside - val sweeper: BuildType? = gaNightlyTestProject!!.buildTypes.find { p-> p.name == ServiceSweeperName} - if (sweeper == null) { - Assert.fail("Could not find the sweeper build in the Google (GA) Nightly Test project") + // Find Beta nightly test project + val betaNightlyTestProject = getSubProject(project, betaProjectName, nightlyTestsProjectName) + // Find Beta MM Upstream project + val betaMmUpstreamProject = getSubProject(project, betaProjectName, mmUpstreamProjectName) + + val allProjects: ArrayList = arrayListOf(gaNightlyTestProject, gaMmUpstreamProject, betaNightlyTestProject, betaMmUpstreamProject) + allProjects.forEach{ project -> + // Find sweeper inside + val sweeper: BuildType? = project.buildTypes.find { p-> p.name == ServiceSweeperName} + if (sweeper == null) { + Assert.fail("Could not find the sweeper build in the ${project.name} project") + } + + // For the project sweeper to be skipped, SKIP_PROJECT_SWEEPER needs a value + // See https://github.com/GoogleCloudPlatform/magic-modules/blob/501429790939717ca6dce76dbf4b1b82aef4e9d9/mmv1/third_party/terraform/services/resourcemanager/resource_google_project_sweeper.go#L18-L26 + + val value = sweeper!!.params.findRawParam("env.SKIP_PROJECT_SWEEPER")!!.value + assertTrue("env.SKIP_PROJECT_SWEEPER is set to a non-empty string in the sweeper build in the ${project.name} project. This means project sweepers are skipped. Value = `${value}` ", value != "") } - - // For the project sweeper to be skipped, SKIP_PROJECT_SWEEPER needs a value - // See https://github.com/GoogleCloudPlatform/magic-modules/blob/501429790939717ca6dce76dbf4b1b82aef4e9d9/mmv1/third_party/terraform/services/resourcemanager/resource_google_project_sweeper.go#L18-L26 - - val value = sweeper!!.params.findRawParam("env.SKIP_PROJECT_SWEEPER")!!.value - assertTrue("env.SKIP_PROJECT_SWEEPER is set to a non-empty string, so project sweepers are skipped. Value = `${value}` ", value != "") } @Test @@ -68,14 +71,8 @@ class SweeperTests { val project = googleCloudRootProject(testContextParameters()) // Find GA nightly test project - val gaProject: Project? = project.subProjects.find { p-> p.name == gaProjectName} - if (gaProject == null) { - Assert.fail("Could not find the Google (GA) project") - } - val gaNightlyTestProject: Project? = gaProject!!.subProjects.find { p-> p.name == nightlyTestsProjectName} - if (gaNightlyTestProject == null) { - Assert.fail("Could not find the Google (GA) Nightly Test project") - } + val gaNightlyTestProject = getSubProject(project, gaProjectName, nightlyTestsProjectName) + // Find sweeper inside val sweeper: BuildType? = gaNightlyTestProject!!.buildTypes.find { p-> p.name == ServiceSweeperName} @@ -93,41 +90,7 @@ class SweeperTests { val project = googleCloudRootProject(testContextParameters()) // Find Beta nightly test project - val betaProject: Project? = project.subProjects.find { p-> p.name == betaProjectName} - if (betaProject == null) { - Assert.fail("Could not find the Google (GA) project") - } - val betaNightlyTestProject: Project? = betaProject!!.subProjects.find { p-> p.name == nightlyTestsProjectName} - if (betaNightlyTestProject == null) { - Assert.fail("Could not find the Google (GA) Nightly Test project") - } - - // Find sweeper inside - val sweeper: BuildType? = betaNightlyTestProject!!.buildTypes.find { p-> p.name == ServiceSweeperName} - if (sweeper == null) { - Assert.fail("Could not find the sweeper build in the Google (GA) Nightly Test project") - } - - // For the project sweeper to be skipped, SKIP_PROJECT_SWEEPER needs a value - // See https://github.com/GoogleCloudPlatform/magic-modules/blob/501429790939717ca6dce76dbf4b1b82aef4e9d9/mmv1/third_party/terraform/services/resourcemanager/resource_google_project_sweeper.go#L18-L26 - - val value = sweeper!!.params.findRawParam("env.SKIP_PROJECT_SWEEPER")!!.value - assertTrue("env.SKIP_PROJECT_SWEEPER is set to a non-empty string, so project sweepers are skipped. Value = `${value}` ", value != "") - } - - @Test - fun betaNightlyProjectServiceSweeperRunsInGoogleBeta() { - val project = googleCloudRootProject(testContextParameters()) - - // Find Beta nightly test project - val betaProject: Project? = project.subProjects.find { p-> p.name == betaProjectName} - if (betaProject == null) { - Assert.fail("Could not find the Google (GA) project") - } - val betaNightlyTestProject: Project? = betaProject!!.subProjects.find { p-> p.name == nightlyTestsProjectName} - if (betaNightlyTestProject == null) { - Assert.fail("Could not find the Google (GA) Nightly Test project") - } + val betaNightlyTestProject = getSubProject(project, betaProjectName, nightlyTestsProjectName) // Find sweeper inside val sweeper: BuildType? = betaNightlyTestProject!!.buildTypes.find { p-> p.name == ServiceSweeperName} diff --git a/.teamcity/tests/test_utils.kt b/.teamcity/tests/test_utils.kt index 74738449644..576e4b735c0 100644 --- a/.teamcity/tests/test_utils.kt +++ b/.teamcity/tests/test_utils.kt @@ -8,10 +8,13 @@ package tests import builds.AllContextParameters +import jetbrains.buildServer.configs.kotlin.Project +import org.junit.Assert const val gaProjectName = "Google" const val betaProjectName = "Google Beta" const val nightlyTestsProjectName = "Nightly Tests" +const val mmUpstreamProjectName = "MM Upstream Testing" const val projectSweeperProjectName = "Project Sweeper" fun testContextParameters(): AllContextParameters { @@ -49,4 +52,19 @@ fun testContextParameters(): AllContextParameters { "zone", "infraProject", "vcrBucketName") +} + +fun getSubProject(rootProject: Project, parentProjectName: String, subProjectName: String): Project { + // Find parent project within root + var parentProject: Project? = rootProject.subProjects.find { p-> p.name == parentProjectName} + if (parentProject == null) { + Assert.fail("Could not find the $parentProjectName project") + } + // Find subproject within parent identified above + var subProject: Project? = parentProject!!.subProjects.find { p-> p.name == subProjectName} + if (subProject == null) { + Assert.fail("Could not find the $subProjectName project") + } + + return subProject!! } \ No newline at end of file diff --git a/META.d/_summary.yaml b/META.d/_summary.yaml index c3dc9c1febb..cef4b73691f 100644 --- a/META.d/_summary.yaml +++ b/META.d/_summary.yaml @@ -7,6 +7,6 @@ partition: tf-ecosystem summary: owner: team-tf-hybrid-cloud description: | - The Terraform Google provider is a plugin that allows Terraform to manage resources on Google Cloud Platform. + The Terraform provider for Google Cloud is a plugin that allows Terraform to manage resources on Google Cloud. - visibility: external \ No newline at end of file + visibility: external diff --git a/go.mod b/go.mod index fbd750b3d75..27fa208c6bb 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.20 require ( cloud.google.com/go/bigtable v1.19.0 - github.com/GoogleCloudPlatform/declarative-resource-client-library v1.62.0 + github.com/GoogleCloudPlatform/declarative-resource-client-library v1.63.0 github.com/apparentlymart/go-cidr v1.1.0 github.com/davecgh/go-spew v1.1.1 github.com/dnaeon/go-vcr v1.0.1 @@ -24,21 +24,21 @@ require ( github.com/mitchellh/go-homedir v1.1.0 github.com/mitchellh/hashstructure v1.1.0 github.com/sirupsen/logrus v1.8.1 - golang.org/x/net v0.20.0 - golang.org/x/oauth2 v0.16.0 - google.golang.org/api v0.156.0 - google.golang.org/genproto/googleapis/rpc v0.0.0-20240108191215-35c7eff3a6b1 - google.golang.org/grpc v1.60.1 + golang.org/x/net v0.21.0 + golang.org/x/oauth2 v0.17.0 + google.golang.org/api v0.167.0 + google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9 + google.golang.org/grpc v1.61.1 google.golang.org/protobuf v1.32.0 ) require ( bitbucket.org/creachadair/stringset v0.0.8 // indirect - cloud.google.com/go v0.111.0 // indirect - cloud.google.com/go/compute v1.23.3 // indirect + cloud.google.com/go v0.112.0 // indirect + cloud.google.com/go/compute v1.23.4 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/iam v1.1.5 // indirect - cloud.google.com/go/longrunning v0.5.4 // indirect + cloud.google.com/go/iam v1.1.6 // indirect + cloud.google.com/go/longrunning v0.5.5 // indirect github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 // indirect github.com/agext/levenshtein v1.2.2 // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect @@ -47,7 +47,7 @@ require ( github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cloudflare/circl v1.3.3 // indirect github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe // indirect - github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 // indirect + github.com/cncf/xds/go v0.0.0-20231109132714-523115ebc101 // indirect github.com/envoyproxy/go-control-plane v0.11.1 // indirect github.com/envoyproxy/protoc-gen-validate v1.0.2 // indirect github.com/fatih/color v1.13.0 // indirect @@ -61,9 +61,9 @@ require ( github.com/google/go-cmp v0.6.0 // indirect github.com/google/go-cpy v0.0.0-20211218193943-a9c933c06932 // indirect github.com/google/s2a-go v0.1.7 // indirect - github.com/google/uuid v1.5.0 // indirect + github.com/google/uuid v1.6.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/googleapis/gax-go/v2 v2.12.0 // indirect + github.com/googleapis/gax-go/v2 v2.12.1 // indirect github.com/hashicorp/go-checkpoint v0.5.0 // indirect github.com/hashicorp/go-hclog v1.5.0 // indirect github.com/hashicorp/go-plugin v1.6.0 // indirect @@ -90,19 +90,19 @@ require ( github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect github.com/zclconf/go-cty v1.14.1 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect - go.opentelemetry.io/otel v1.21.0 // indirect - go.opentelemetry.io/otel/metric v1.21.0 // indirect - go.opentelemetry.io/otel/trace v1.21.0 // indirect - golang.org/x/crypto v0.18.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.48.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0 // indirect + go.opentelemetry.io/otel v1.23.0 // indirect + go.opentelemetry.io/otel/metric v1.23.0 // indirect + go.opentelemetry.io/otel/trace v1.23.0 // indirect + golang.org/x/crypto v0.19.0 // indirect golang.org/x/mod v0.14.0 // indirect golang.org/x/sync v0.6.0 // indirect - golang.org/x/sys v0.16.0 // indirect + golang.org/x/sys v0.17.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.5.0 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto v0.0.0-20240102182953-50ed04b92917 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20231212172506-995d672761c0 // indirect + google.golang.org/genproto v0.0.0-20240205150955-31a09d347014 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect ) diff --git a/go.sum b/go.sum index b265f1fc359..63d8188cf48 100644 --- a/go.sum +++ b/go.sum @@ -1,18 +1,18 @@ bitbucket.org/creachadair/stringset v0.0.8 h1:gQqe4vs8XWgMyijfyKE6K8o4TcyGGrRXe0JvHgx5H+M= bitbucket.org/creachadair/stringset v0.0.8/go.mod h1:AgthVMyMxC/6FK1KBJ2ALdqkZObGN8hOetgpwXyMn34= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.111.0 h1:YHLKNupSD1KqjDbQ3+LVdQ81h/UJbJyZG203cEfnQgM= -cloud.google.com/go v0.111.0/go.mod h1:0mibmpKP1TyOOFYQY5izo0LnT+ecvOQ0Sg3OdmMiNRU= +cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM= +cloud.google.com/go v0.112.0/go.mod h1:3jEEVwZ/MHU4djK5t5RHuKOA/GbLddgTdVubX1qnPD4= cloud.google.com/go/bigtable v1.19.0 h1:wiq9LT0kukfInzvy1joMDijCw/OD1UChpSbORXYn0LI= cloud.google.com/go/bigtable v1.19.0/go.mod h1:xl5kPa8PTkJjdBxg6qdGH88464nNqmbISHSRU+D2yFE= -cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= -cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= +cloud.google.com/go/compute v1.23.4 h1:EBT9Nw4q3zyE7G45Wvv3MzolIrCJEuHys5muLY0wvAw= +cloud.google.com/go/compute v1.23.4/go.mod h1:/EJMj55asU6kAFnuZET8zqgwgJ9FvXWXOkkfQZa4ioI= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/iam v1.1.5 h1:1jTsCu4bcsNsE4iiqNT5SHwrDRCfRmIaaaVFhRveTJI= -cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXEbzv8= -cloud.google.com/go/longrunning v0.5.4 h1:w8xEcbZodnA2BbW6sVirkkoC+1gP8wS57EUUgGS0GVg= -cloud.google.com/go/longrunning v0.5.4/go.mod h1:zqNVncI0BOP8ST6XQD1+VcvuShMmq7+xFSzOL++V0dI= +cloud.google.com/go/iam v1.1.6 h1:bEa06k05IO4f4uJonbB5iAgKTPpABy1ayxaIZV/GHVc= +cloud.google.com/go/iam v1.1.6/go.mod h1:O0zxdPeGBoFdWW3HWmBxJsk0pfvNM/p/qa82rWOGTwI= +cloud.google.com/go/longrunning v0.5.5 h1:GOE6pZFdSrTb4KAiKnXsJBtlE6mEyaW44oKyMILWnOg= +cloud.google.com/go/longrunning v0.5.5/go.mod h1:WV2LAxD8/rg5Z1cNW6FJ/ZpX4E4VnDnoTk0yawPBB7s= dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/GoogleCloudPlatform/declarative-resource-client-library v1.62.0 h1:s4Y6r6RrYLBnqosGXLwR0h1Gqr0VT3wgd6rqvHsD9OE= @@ -43,8 +43,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe h1:QQ3GSy+MqSHxm/d8nCtnAiZdYFd45cYZPs8vOOIYKfk= github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= -github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20231109132714-523115ebc101 h1:7To3pQ+pZo0i3dsWEbinPNFs5gPSBOsJtx3wTT94VBY= +github.com/cncf/xds/go v0.0.0-20231109132714-523115ebc101/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/creachadair/staticfile v0.1.2/go.mod h1:a3qySzCIXEprDGxk6tSxSI+dBBdLzqeBOMhZ+o2d3pM= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -120,12 +120,12 @@ github.com/google/go-cpy v0.0.0-20211218193943-a9c933c06932/go.mod h1:cC6EdPbj/1 github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= -github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= -github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= +github.com/googleapis/gax-go/v2 v2.12.1 h1:9F8GV9r9ztXyAi00gsMQHNoF51xPZm8uj1dpYt2ZETM= +github.com/googleapis/gax-go/v2 v2.12.1/go.mod h1:61M8vcyyXR2kqKFxKrfA22jaA8JGF7Dc8App1U3H6jc= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= @@ -249,17 +249,17 @@ github.com/zclconf/go-cty v1.14.1 h1:t9fyA35fwjjUMcmL5hLER+e/rEPqrbCK1/OSE4SI9KA github.com/zclconf/go-cty v1.14.1/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 h1:SpGay3w+nEwMpfVnbqOLH5gY52/foP8RE8UzTZ1pdSE= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 h1:aFJWCqJMNjENlcleuuOkGAPH82y0yULBScfXcIEdS24= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo= -go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc= -go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= -go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4= -go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= -go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o= -go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc= -go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.48.0 h1:P+/g8GpuJGYbOp2tAdKrIPUX9JO02q8Q0YNlHolpibA= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.48.0/go.mod h1:tIKj3DbO8N9Y2xo52og3irLsPI4GW02DSMtrVgNMgxg= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0 h1:doUP+ExOpH3spVTLS0FcWGLnQrPct/hD/bCPbDRUEAU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0/go.mod h1:rdENBZMT2OE6Ne/KLwpiXudnAsbdrdBaqBvTN8M8BgA= +go.opentelemetry.io/otel v1.23.0 h1:Df0pqjqExIywbMCMTxkAwzjLZtRf+bBKLbUcpxO2C9E= +go.opentelemetry.io/otel v1.23.0/go.mod h1:YCycw9ZeKhcJFrb34iVSkyT0iczq/zYDtZYFufObyB0= +go.opentelemetry.io/otel/metric v1.23.0 h1:pazkx7ss4LFVVYSxYew7L5I6qvLXHA0Ap2pwV+9Cnpo= +go.opentelemetry.io/otel/metric v1.23.0/go.mod h1:MqUW2X2a6Q8RN96E2/nqNoT+z9BSms20Jb7Bbp+HiTo= +go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= +go.opentelemetry.io/otel/trace v1.23.0 h1:37Ik5Ib7xfYVb4V1UtnT97T1jI+AoIYkJyPkuL4iJgI= +go.opentelemetry.io/otel/trace v1.23.0/go.mod h1:GSGTbIClEsuZrGIzoEHqsVfxgn5UkggkflQwDScNUsk= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= @@ -269,8 +269,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= -golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= +golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -295,11 +295,11 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= -golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= +golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ= +golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -328,14 +328,14 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= -golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE= +golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -363,8 +363,8 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.156.0 h1:yloYcGbBtVYjLKQe4enCunxvwn3s2w/XPrrhVf6MsvQ= -google.golang.org/api v0.156.0/go.mod h1:bUSmn4KFO0Q+69zo9CNIDp4Psi6BqM0np0CbzKRSiSY= +google.golang.org/api v0.167.0 h1:CKHrQD1BLRii6xdkatBDXyKzM0mkawt2QP+H3LtPmSE= +google.golang.org/api v0.167.0/go.mod h1:4FcBc686KFi7QI/U51/2GKKevfZMpM17sCdibqe/bSA= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= @@ -373,20 +373,20 @@ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoA google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20240102182953-50ed04b92917 h1:nz5NESFLZbJGPFxDT/HCn+V1mZ8JGNoY4nUpmW/Y2eg= -google.golang.org/genproto v0.0.0-20240102182953-50ed04b92917/go.mod h1:pZqR+glSb11aJ+JQcczCvgf47+duRuzNSKqE8YAQnV0= -google.golang.org/genproto/googleapis/api v0.0.0-20231212172506-995d672761c0 h1:s1w3X6gQxwrLEpxnLd/qXTVLgQE2yXwaOaoa6IlY/+o= -google.golang.org/genproto/googleapis/api v0.0.0-20231212172506-995d672761c0/go.mod h1:CAny0tYF+0/9rmDB9fahA9YLzX3+AEVl1qXbv5hhj6c= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240108191215-35c7eff3a6b1 h1:gphdwh0npgs8elJ4T6J+DQJHPVF7RsuJHCfwztUb4J4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240108191215-35c7eff3a6b1/go.mod h1:daQN87bsDqDoe316QbbvX60nMoJQa4r6Ds0ZuoAe5yA= +google.golang.org/genproto v0.0.0-20240205150955-31a09d347014 h1:g/4bk7P6TPMkAUbUhquq98xey1slwvuVJPosdBqYJlU= +google.golang.org/genproto v0.0.0-20240205150955-31a09d347014/go.mod h1:xEgQu1e4stdSSsxPDK8Azkrk/ECl5HvdPf6nbZrTS5M= +google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014 h1:x9PwdEgd11LgK+orcck69WVRo7DezSO4VUMPI4xpc8A= +google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014/go.mod h1:rbHMSEDyoYX62nRVLOCc4Qt1HbsdytAYoVwgjiOhF3I= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9 h1:hZB7eLIaYlW9qXRfCq/qDaPdbeY3757uARz5Vvfv+cY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:YUWgXUFRPfoYK1IHMuxH5K6nPEXSCzIMljnQ59lLRCk= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU= -google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= +google.golang.org/grpc v1.61.1 h1:kLAiWrZs7YeDM6MumDe7m3y4aM6wacLzM1Y/wiLP9XY= +google.golang.org/grpc v1.61.1/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -413,3 +413,5 @@ gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE= +github.com/GoogleCloudPlatform/declarative-resource-client-library v1.63.0 h1:eSOBYPZVnU2fZul9sAJFGLVCgv6stNVKkmsogKF7UeY= +github.com/GoogleCloudPlatform/declarative-resource-client-library v1.63.0/go.mod h1:pL2Qt5HT+x6xrTd806oMiM3awW6kNIXB/iiuClz6m6k= diff --git a/google/fwmodels/provider_model.go b/google/fwmodels/provider_model.go index 5d54d840b6c..55897f70d01 100644 --- a/google/fwmodels/provider_model.go +++ b/google/fwmodels/provider_model.go @@ -34,6 +34,7 @@ type ProviderModel struct { AlloydbCustomEndpoint types.String `tfsdk:"alloydb_custom_endpoint"` ApigeeCustomEndpoint types.String `tfsdk:"apigee_custom_endpoint"` AppEngineCustomEndpoint types.String `tfsdk:"app_engine_custom_endpoint"` + ApphubCustomEndpoint types.String `tfsdk:"apphub_custom_endpoint"` ArtifactRegistryCustomEndpoint types.String `tfsdk:"artifact_registry_custom_endpoint"` BeyondcorpCustomEndpoint types.String `tfsdk:"beyondcorp_custom_endpoint"` BiglakeCustomEndpoint types.String `tfsdk:"biglake_custom_endpoint"` diff --git a/google/fwprovider/framework_provider.go b/google/fwprovider/framework_provider.go index 2e63425e375..f7fee241433 100644 --- a/google/fwprovider/framework_provider.go +++ b/google/fwprovider/framework_provider.go @@ -181,6 +181,12 @@ func (p *FrameworkProvider) Schema(_ context.Context, _ provider.SchemaRequest, transport_tpg.CustomEndpointValidator(), }, }, + "apphub_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, "artifact_registry_custom_endpoint": &schema.StringAttribute{ Optional: true, Validators: []validator.String{ diff --git a/google/fwtransport/framework_config.go b/google/fwtransport/framework_config.go index 7449d664555..89090b8c1c9 100644 --- a/google/fwtransport/framework_config.go +++ b/google/fwtransport/framework_config.go @@ -57,6 +57,7 @@ type FrameworkProviderConfig struct { AlloydbBasePath string ApigeeBasePath string AppEngineBasePath string + ApphubBasePath string ArtifactRegistryBasePath string BeyondcorpBasePath string BiglakeBasePath string @@ -208,6 +209,7 @@ func (p *FrameworkProviderConfig) LoadAndValidateFramework(ctx context.Context, p.AlloydbBasePath = data.AlloydbCustomEndpoint.ValueString() p.ApigeeBasePath = data.ApigeeCustomEndpoint.ValueString() p.AppEngineBasePath = data.AppEngineCustomEndpoint.ValueString() + p.ApphubBasePath = data.ApphubCustomEndpoint.ValueString() p.ArtifactRegistryBasePath = data.ArtifactRegistryCustomEndpoint.ValueString() p.BeyondcorpBasePath = data.BeyondcorpCustomEndpoint.ValueString() p.BiglakeBasePath = data.BiglakeCustomEndpoint.ValueString() @@ -489,6 +491,14 @@ func (p *FrameworkProviderConfig) HandleDefaults(ctx context.Context, data *fwmo data.AppEngineCustomEndpoint = types.StringValue(customEndpoint.(string)) } } + if data.ApphubCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_APPHUB_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.ApphubBasePathKey]) + if customEndpoint != nil { + data.ApphubCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } if data.ArtifactRegistryCustomEndpoint.IsNull() { customEndpoint := transport_tpg.MultiEnvDefault([]string{ "GOOGLE_ARTIFACT_REGISTRY_CUSTOM_ENDPOINT", diff --git a/google/provider/provider.go b/google/provider/provider.go index 43ac24f8e84..6d13d1a1d73 100644 --- a/google/provider/provider.go +++ b/google/provider/provider.go @@ -175,6 +175,11 @@ func Provider() *schema.Provider { Optional: true, ValidateFunc: transport_tpg.ValidateCustomEndpoint, }, + "apphub_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, "artifact_registry_custom_endpoint": { Type: schema.TypeString, Optional: true, @@ -846,12 +851,12 @@ func ProviderConfigure(ctx context.Context, d *schema.ResourceData, p *schema.Pr // Check if the user provided a value from the universe_domain field other than the default if v, ok := d.GetOk("universe_domain"); ok && v.(string) != "googleapis.com" { if config.UniverseDomain == "" { - return nil, diag.FromErr(fmt.Errorf("Universe domain '%s' supplied directly to Terraform with no matching universe domain in credentials. Credentials with no 'universe_domain' set are assumed to be in the default universe.", v)) + return nil, diag.FromErr(fmt.Errorf("Universe domain mismatch: '%s' supplied directly to Terraform with no matching universe domain in credentials. Credentials with no 'universe_domain' set are assumed to be in the default universe.", v)) } else if v.(string) != config.UniverseDomain { if _, err := os.Stat(config.Credentials); err == nil { - return nil, diag.FromErr(fmt.Errorf("'%s' does not match the universe domain '%s' already set in the credential file '%s'. The 'universe_domain' provider configuration can not be used to override the universe domain that is defined in the active credential. Set the 'universe_domain' provider configuration when universe domain information is not already available in the credential, e.g. when authenticating with a JWT token.", v, config.UniverseDomain, config.Credentials)) + return nil, diag.FromErr(fmt.Errorf("Universe domain mismatch: '%s' does not match the universe domain '%s' already set in the credential file '%s'. The 'universe_domain' provider configuration can not be used to override the universe domain that is defined in the active credential. Set the 'universe_domain' provider configuration when universe domain information is not already available in the credential, e.g. when authenticating with a JWT token.", v, config.UniverseDomain, config.Credentials)) } else { - return nil, diag.FromErr(fmt.Errorf("'%s' does not match the universe domain '%s' supplied directly to Terraform. The 'universe_domain' provider configuration can not be used to override the universe domain that is defined in the active credential. Set the 'universe_domain' provider configuration when universe domain information is not already available in the credential, e.g. when authenticating with a JWT token.", v, config.UniverseDomain)) + return nil, diag.FromErr(fmt.Errorf("Universe domain mismatch: '%s' does not match the universe domain '%s' supplied directly to Terraform. The 'universe_domain' provider configuration can not be used to override the universe domain that is defined in the active credential. Set the 'universe_domain' provider configuration when universe domain information is not already available in the credential, e.g. when authenticating with a JWT token.", v, config.UniverseDomain)) } } } @@ -925,6 +930,7 @@ func ProviderConfigure(ctx context.Context, d *schema.ResourceData, p *schema.Pr config.AlloydbBasePath = d.Get("alloydb_custom_endpoint").(string) config.ApigeeBasePath = d.Get("apigee_custom_endpoint").(string) config.AppEngineBasePath = d.Get("app_engine_custom_endpoint").(string) + config.ApphubBasePath = d.Get("apphub_custom_endpoint").(string) config.ArtifactRegistryBasePath = d.Get("artifact_registry_custom_endpoint").(string) config.BeyondcorpBasePath = d.Get("beyondcorp_custom_endpoint").(string) config.BiglakeBasePath = d.Get("biglake_custom_endpoint").(string) diff --git a/google/provider/provider_mmv1_resources.go b/google/provider/provider_mmv1_resources.go index f667cb7c431..5baf4a7447f 100644 --- a/google/provider/provider_mmv1_resources.go +++ b/google/provider/provider_mmv1_resources.go @@ -11,6 +11,7 @@ import ( "github.com/hashicorp/terraform-provider-google/google/services/alloydb" "github.com/hashicorp/terraform-provider-google/google/services/apigee" "github.com/hashicorp/terraform-provider-google/google/services/appengine" + "github.com/hashicorp/terraform-provider-google/google/services/apphub" "github.com/hashicorp/terraform-provider-google/google/services/artifactregistry" "github.com/hashicorp/terraform-provider-google/google/services/beyondcorp" "github.com/hashicorp/terraform-provider-google/google/services/biglake" @@ -299,7 +300,9 @@ var generatedIAMDatasources = map[string]*schema.Resource{ "google_bigquery_datapolicy_data_policy_iam_policy": tpgiamresource.DataSourceIamPolicy(bigquerydatapolicy.BigqueryDatapolicyDataPolicyIamSchema, bigquerydatapolicy.BigqueryDatapolicyDataPolicyIamUpdaterProducer), "google_binary_authorization_attestor_iam_policy": tpgiamresource.DataSourceIamPolicy(binaryauthorization.BinaryAuthorizationAttestorIamSchema, binaryauthorization.BinaryAuthorizationAttestorIamUpdaterProducer), "google_cloudbuildv2_connection_iam_policy": tpgiamresource.DataSourceIamPolicy(cloudbuildv2.Cloudbuildv2ConnectionIamSchema, cloudbuildv2.Cloudbuildv2ConnectionIamUpdaterProducer), + "google_clouddeploy_custom_target_type_iam_policy": tpgiamresource.DataSourceIamPolicy(clouddeploy.ClouddeployCustomTargetTypeIamSchema, clouddeploy.ClouddeployCustomTargetTypeIamUpdaterProducer), "google_clouddeploy_delivery_pipeline_iam_policy": tpgiamresource.DataSourceIamPolicy(clouddeploy.ClouddeployDeliveryPipelineIamSchema, clouddeploy.ClouddeployDeliveryPipelineIamUpdaterProducer), + "google_clouddeploy_target_iam_policy": tpgiamresource.DataSourceIamPolicy(clouddeploy.ClouddeployTargetIamSchema, clouddeploy.ClouddeployTargetIamUpdaterProducer), "google_cloudfunctions_function_iam_policy": tpgiamresource.DataSourceIamPolicy(cloudfunctions.CloudFunctionsCloudFunctionIamSchema, cloudfunctions.CloudFunctionsCloudFunctionIamUpdaterProducer), "google_cloudfunctions2_function_iam_policy": tpgiamresource.DataSourceIamPolicy(cloudfunctions2.Cloudfunctions2functionIamSchema, cloudfunctions2.Cloudfunctions2functionIamUpdaterProducer), "google_cloud_run_service_iam_policy": tpgiamresource.DataSourceIamPolicy(cloudrun.CloudRunServiceIamSchema, cloudrun.CloudRunServiceIamUpdaterProducer), @@ -386,9 +389,9 @@ var handwrittenIAMDatasources = map[string]*schema.Resource{ } // Resources -// Generated resources: 386 -// Generated IAM resources: 225 -// Total generated resources: 611 +// Generated resources: 390 +// Generated IAM resources: 231 +// Total generated resources: 621 var generatedResources = map[string]*schema.Resource{ "google_folder_access_approval_settings": accessapproval.ResourceAccessApprovalFolderSettings(), "google_organization_access_approval_settings": accessapproval.ResourceAccessApprovalOrganizationSettings(), @@ -439,6 +442,7 @@ var generatedResources = map[string]*schema.Resource{ "google_app_engine_service_network_settings": appengine.ResourceAppEngineServiceNetworkSettings(), "google_app_engine_service_split_traffic": appengine.ResourceAppEngineServiceSplitTraffic(), "google_app_engine_standard_app_version": appengine.ResourceAppEngineStandardAppVersion(), + "google_apphub_application": apphub.ResourceApphubApplication(), "google_artifact_registry_repository": artifactregistry.ResourceArtifactRegistryRepository(), "google_artifact_registry_repository_iam_binding": tpgiamresource.ResourceIamBinding(artifactregistry.ArtifactRegistryRepositoryIamSchema, artifactregistry.ArtifactRegistryRepositoryIamUpdaterProducer, artifactregistry.ArtifactRegistryRepositoryIdParseFunc), "google_artifact_registry_repository_iam_member": tpgiamresource.ResourceIamMember(artifactregistry.ArtifactRegistryRepositoryIamSchema, artifactregistry.ArtifactRegistryRepositoryIamUpdaterProducer, artifactregistry.ArtifactRegistryRepositoryIdParseFunc), @@ -500,10 +504,17 @@ var generatedResources = map[string]*schema.Resource{ "google_cloudbuildv2_connection_iam_member": tpgiamresource.ResourceIamMember(cloudbuildv2.Cloudbuildv2ConnectionIamSchema, cloudbuildv2.Cloudbuildv2ConnectionIamUpdaterProducer, cloudbuildv2.Cloudbuildv2ConnectionIdParseFunc), "google_cloudbuildv2_connection_iam_policy": tpgiamresource.ResourceIamPolicy(cloudbuildv2.Cloudbuildv2ConnectionIamSchema, cloudbuildv2.Cloudbuildv2ConnectionIamUpdaterProducer, cloudbuildv2.Cloudbuildv2ConnectionIdParseFunc), "google_cloudbuildv2_repository": cloudbuildv2.ResourceCloudbuildv2Repository(), + "google_clouddeploy_automation": clouddeploy.ResourceClouddeployAutomation(), "google_clouddeploy_custom_target_type": clouddeploy.ResourceClouddeployCustomTargetType(), + "google_clouddeploy_custom_target_type_iam_binding": tpgiamresource.ResourceIamBinding(clouddeploy.ClouddeployCustomTargetTypeIamSchema, clouddeploy.ClouddeployCustomTargetTypeIamUpdaterProducer, clouddeploy.ClouddeployCustomTargetTypeIdParseFunc), + "google_clouddeploy_custom_target_type_iam_member": tpgiamresource.ResourceIamMember(clouddeploy.ClouddeployCustomTargetTypeIamSchema, clouddeploy.ClouddeployCustomTargetTypeIamUpdaterProducer, clouddeploy.ClouddeployCustomTargetTypeIdParseFunc), + "google_clouddeploy_custom_target_type_iam_policy": tpgiamresource.ResourceIamPolicy(clouddeploy.ClouddeployCustomTargetTypeIamSchema, clouddeploy.ClouddeployCustomTargetTypeIamUpdaterProducer, clouddeploy.ClouddeployCustomTargetTypeIdParseFunc), "google_clouddeploy_delivery_pipeline_iam_binding": tpgiamresource.ResourceIamBinding(clouddeploy.ClouddeployDeliveryPipelineIamSchema, clouddeploy.ClouddeployDeliveryPipelineIamUpdaterProducer, clouddeploy.ClouddeployDeliveryPipelineIdParseFunc), "google_clouddeploy_delivery_pipeline_iam_member": tpgiamresource.ResourceIamMember(clouddeploy.ClouddeployDeliveryPipelineIamSchema, clouddeploy.ClouddeployDeliveryPipelineIamUpdaterProducer, clouddeploy.ClouddeployDeliveryPipelineIdParseFunc), "google_clouddeploy_delivery_pipeline_iam_policy": tpgiamresource.ResourceIamPolicy(clouddeploy.ClouddeployDeliveryPipelineIamSchema, clouddeploy.ClouddeployDeliveryPipelineIamUpdaterProducer, clouddeploy.ClouddeployDeliveryPipelineIdParseFunc), + "google_clouddeploy_target_iam_binding": tpgiamresource.ResourceIamBinding(clouddeploy.ClouddeployTargetIamSchema, clouddeploy.ClouddeployTargetIamUpdaterProducer, clouddeploy.ClouddeployTargetIdParseFunc), + "google_clouddeploy_target_iam_member": tpgiamresource.ResourceIamMember(clouddeploy.ClouddeployTargetIamSchema, clouddeploy.ClouddeployTargetIamUpdaterProducer, clouddeploy.ClouddeployTargetIdParseFunc), + "google_clouddeploy_target_iam_policy": tpgiamresource.ResourceIamPolicy(clouddeploy.ClouddeployTargetIamSchema, clouddeploy.ClouddeployTargetIamUpdaterProducer, clouddeploy.ClouddeployTargetIdParseFunc), "google_clouddomains_registration": clouddomains.ResourceClouddomainsRegistration(), "google_cloudfunctions_function_iam_binding": tpgiamresource.ResourceIamBinding(cloudfunctions.CloudFunctionsCloudFunctionIamSchema, cloudfunctions.CloudFunctionsCloudFunctionIamUpdaterProducer, cloudfunctions.CloudFunctionsCloudFunctionIdParseFunc), "google_cloudfunctions_function_iam_member": tpgiamresource.ResourceIamMember(cloudfunctions.CloudFunctionsCloudFunctionIamSchema, cloudfunctions.CloudFunctionsCloudFunctionIamUpdaterProducer, cloudfunctions.CloudFunctionsCloudFunctionIdParseFunc), @@ -727,6 +738,7 @@ var generatedResources = map[string]*schema.Resource{ "google_filestore_snapshot": filestore.ResourceFilestoreSnapshot(), "google_firebase_app_check_app_attest_config": firebaseappcheck.ResourceFirebaseAppCheckAppAttestConfig(), "google_firebase_app_check_debug_token": firebaseappcheck.ResourceFirebaseAppCheckDebugToken(), + "google_firebase_app_check_device_check_config": firebaseappcheck.ResourceFirebaseAppCheckDeviceCheckConfig(), "google_firebase_app_check_play_integrity_config": firebaseappcheck.ResourceFirebaseAppCheckPlayIntegrityConfig(), "google_firebase_app_check_recaptcha_enterprise_config": firebaseappcheck.ResourceFirebaseAppCheckRecaptchaEnterpriseConfig(), "google_firebase_app_check_recaptcha_v3_config": firebaseappcheck.ResourceFirebaseAppCheckRecaptchaV3Config(), @@ -821,6 +833,7 @@ var generatedResources = map[string]*schema.Resource{ "google_integration_connectors_endpoint_attachment": integrationconnectors.ResourceIntegrationConnectorsEndpointAttachment(), "google_kms_crypto_key": kms.ResourceKMSCryptoKey(), "google_kms_crypto_key_version": kms.ResourceKMSCryptoKeyVersion(), + "google_kms_ekm_connection": kms.ResourceKMSEkmConnection(), "google_kms_key_ring": kms.ResourceKMSKeyRing(), "google_kms_key_ring_import_job": kms.ResourceKMSKeyRingImportJob(), "google_kms_secret_ciphertext": kms.ResourceKMSSecretCiphertext(), diff --git a/google/provider/universe/universe_domain_compute_test.go b/google/provider/universe/universe_domain_compute_test.go index 0749f559731..a010e3de094 100644 --- a/google/provider/universe/universe_domain_compute_test.go +++ b/google/provider/universe/universe_domain_compute_test.go @@ -59,7 +59,7 @@ func TestAccDefaultUniverseDomain_doesNotMatchExplicit(t *testing.T) { Steps: []resource.TestStep{ resource.TestStep{ Config: testAccUniverseDomain_basic_disk(universeDomainFake), - ExpectError: regexp.MustCompile("supplied directly to Terraform with no matching universe domain in credentials"), + ExpectError: regexp.MustCompile("Universe domain mismatch"), }, }, }) diff --git a/google/services/apigee/resource_apigee_environment_type_test.go b/google/services/apigee/resource_apigee_environment_type_test.go new file mode 100644 index 00000000000..e063eb4fd21 --- /dev/null +++ b/google/services/apigee/resource_apigee_environment_type_test.go @@ -0,0 +1,3 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package apigee_test diff --git a/google/services/appengine/resource_app_engine_application_url_dispatch_rules_generated_test.go b/google/services/appengine/resource_app_engine_application_url_dispatch_rules_generated_test.go index b3083b10d08..5c6f3e6113a 100644 --- a/google/services/appengine/resource_app_engine_application_url_dispatch_rules_generated_test.go +++ b/google/services/appengine/resource_app_engine_application_url_dispatch_rules_generated_test.go @@ -71,7 +71,7 @@ resource "google_app_engine_application_url_dispatch_rules" "web_service" { resource "google_app_engine_standard_app_version" "admin_v3" { version_id = "v3" service = "admin" - runtime = "nodejs10" + runtime = "nodejs20" entrypoint { shell = "node ./app.js" diff --git a/google/services/appengine/resource_app_engine_service_network_settings_generated_test.go b/google/services/appengine/resource_app_engine_service_network_settings_generated_test.go index 08e6327d843..8ce549c33e0 100644 --- a/google/services/appengine/resource_app_engine_service_network_settings_generated_test.go +++ b/google/services/appengine/resource_app_engine_service_network_settings_generated_test.go @@ -66,7 +66,7 @@ resource "google_app_engine_standard_app_version" "internalapp" { service = "internalapp" delete_service_on_destroy = true - runtime = "nodejs10" + runtime = "nodejs20" entrypoint { shell = "node ./app.js" } diff --git a/google/services/appengine/resource_app_engine_service_network_settings_test.go b/google/services/appengine/resource_app_engine_service_network_settings_test.go index d5922345fcb..7b63c5b28a0 100644 --- a/google/services/appengine/resource_app_engine_service_network_settings_test.go +++ b/google/services/appengine/resource_app_engine_service_network_settings_test.go @@ -58,7 +58,7 @@ resource "google_app_engine_standard_app_version" "app" { service = "app-%{random_suffix}" delete_service_on_destroy = true - runtime = "nodejs10" + runtime = "nodejs20" entrypoint { shell = "node ./app.js" } @@ -98,7 +98,7 @@ resource "google_app_engine_standard_app_version" "app" { service = "app-%{random_suffix}" delete_service_on_destroy = true - runtime = "nodejs10" + runtime = "nodejs20" entrypoint { shell = "node ./app.js" } diff --git a/google/services/appengine/resource_app_engine_service_split_traffic_generated_test.go b/google/services/appengine/resource_app_engine_service_split_traffic_generated_test.go index a863eaeec64..22e6ba1b88e 100644 --- a/google/services/appengine/resource_app_engine_service_split_traffic_generated_test.go +++ b/google/services/appengine/resource_app_engine_service_split_traffic_generated_test.go @@ -69,7 +69,7 @@ resource "google_app_engine_standard_app_version" "liveapp_v1" { service = "liveapp" delete_service_on_destroy = true - runtime = "nodejs10" + runtime = "nodejs20" entrypoint { shell = "node ./app.js" } @@ -88,7 +88,7 @@ resource "google_app_engine_standard_app_version" "liveapp_v2" { service = "liveapp" noop_on_destroy = true - runtime = "nodejs10" + runtime = "nodejs20" entrypoint { shell = "node ./app.js" } diff --git a/google/services/appengine/resource_app_engine_standard_app_version_generated_test.go b/google/services/appengine/resource_app_engine_standard_app_version_generated_test.go index 8c2e79407db..50224bd6199 100644 --- a/google/services/appengine/resource_app_engine_standard_app_version_generated_test.go +++ b/google/services/appengine/resource_app_engine_standard_app_version_generated_test.go @@ -77,7 +77,7 @@ resource "google_project_iam_member" "storage_viewer" { resource "google_app_engine_standard_app_version" "myapp_v1" { version_id = "v1" service = "myapp" - runtime = "nodejs10" + runtime = "nodejs20" entrypoint { shell = "node ./app.js" @@ -114,7 +114,7 @@ resource "google_app_engine_standard_app_version" "myapp_v1" { resource "google_app_engine_standard_app_version" "myapp_v2" { version_id = "v2" service = "myapp" - runtime = "nodejs10" + runtime = "nodejs20" app_engine_apis = true entrypoint { diff --git a/google/services/appengine/resource_app_engine_standard_app_version_test.go b/google/services/appengine/resource_app_engine_standard_app_version_test.go index 1736f960d54..0eebd1c7baa 100644 --- a/google/services/appengine/resource_app_engine_standard_app_version_test.go +++ b/google/services/appengine/resource_app_engine_standard_app_version_test.go @@ -22,7 +22,10 @@ func TestAccAppEngineStandardAppVersion_update(t *testing.T) { acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckAppEngineStandardAppVersionDestroyProducer(t), + ExternalProviders: map[string]resource.ExternalProvider{ + "time": {}, + }, + CheckDestroy: testAccCheckAppEngineStandardAppVersionDestroyProducer(t), Steps: []resource.TestStep{ { Config: testAccAppEngineStandardAppVersion_python(context), @@ -163,11 +166,20 @@ resource "google_project_service" "project" { disable_dependent_services = false } +resource "time_sleep" "wait_60_seconds" { + depends_on = [google_project.my_project] + + create_duration = "60s" +} + resource "google_project_service" "vpcaccess_api" { project = google_project.my_project.project_id service = "vpcaccess.googleapis.com" disable_dependent_services = false + + # Needed for CI tests for permissions to propagate, should not be needed for actual usage + depends_on = [time_sleep.wait_60_seconds] } resource "google_vpc_access_connector" "bar" { diff --git a/google/services/apphub/apphub_operation.go b/google/services/apphub/apphub_operation.go new file mode 100644 index 00000000000..662ee7c875e --- /dev/null +++ b/google/services/apphub/apphub_operation.go @@ -0,0 +1,92 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package apphub + +import ( + "encoding/json" + "errors" + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +type ApphubOperationWaiter struct { + Config *transport_tpg.Config + UserAgent string + Project string + tpgresource.CommonOperationWaiter +} + +func (w *ApphubOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + // Returns the proper get. + url := fmt.Sprintf("%s%s", w.Config.ApphubBasePath, w.CommonOperationWaiter.Op.Name) + + return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + Project: w.Project, + RawURL: url, + UserAgent: w.UserAgent, + }) +} + +func createApphubWaiter(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string) (*ApphubOperationWaiter, error) { + w := &ApphubOperationWaiter{ + Config: config, + UserAgent: userAgent, + Project: project, + } + if err := w.CommonOperationWaiter.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +// nolint: deadcode,unused +func ApphubOperationWaitTimeWithResponse(config *transport_tpg.Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + w, err := createApphubWaiter(config, op, project, activity, userAgent) + if err != nil { + return err + } + if err := tpgresource.OperationWait(w, activity, timeout, config.PollInterval); err != nil { + return err + } + rawResponse := []byte(w.CommonOperationWaiter.Op.Response) + if len(rawResponse) == 0 { + return errors.New("`resource` not set in operation response") + } + return json.Unmarshal(rawResponse, response) +} + +func ApphubOperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + if val, ok := op["name"]; !ok || val == "" { + // This was a synchronous call - there is no operation to wait for. + return nil + } + w, err := createApphubWaiter(config, op, project, activity, userAgent) + if err != nil { + // If w is nil, the op was synchronous. + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/google/services/apphub/resource_apphub_application.go b/google/services/apphub/resource_apphub_application.go new file mode 100644 index 00000000000..71213424514 --- /dev/null +++ b/google/services/apphub/resource_apphub_application.go @@ -0,0 +1,1003 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package apphub + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceApphubApplication() *schema.Resource { + return &schema.Resource{ + Create: resourceApphubApplicationCreate, + Read: resourceApphubApplicationRead, + Update: resourceApphubApplicationUpdate, + Delete: resourceApphubApplicationDelete, + + Importer: &schema.ResourceImporter{ + State: resourceApphubApplicationImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + ), + + Schema: map[string]*schema.Schema{ + "application_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Required. The Application identifier.`, + }, + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Part of 'parent'. See documentation of 'projectsId'.`, + }, + "scope": { + Type: schema.TypeList, + Required: true, + Description: `Scope of an application.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"REGIONAL"}), + Description: `Required. Scope Type. + Possible values: +REGIONAL Possible values: ["REGIONAL"]`, + }, + }, + }, + }, + "attributes": { + Type: schema.TypeList, + Optional: true, + Description: `Consumer provided attributes.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "business_owners": { + Type: schema.TypeList, + Optional: true, + Description: `Optional. Business team that ensures user needs are met and value is delivered`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "email": { + Type: schema.TypeString, + Required: true, + Description: `Required. Email address of the contacts.`, + }, + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: `Optional. Contact's name.`, + }, + }, + }, + }, + "criticality": { + Type: schema.TypeList, + Optional: true, + Description: `Criticality of the Application, Service, or Workload`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"MISSION_CRITICAL", "HIGH", "MEDIUM", "LOW"}), + Description: `Criticality type. Possible values: ["MISSION_CRITICAL", "HIGH", "MEDIUM", "LOW"]`, + }, + }, + }, + }, + "developer_owners": { + Type: schema.TypeList, + Optional: true, + Description: `Optional. Developer team that owns development and coding.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "email": { + Type: schema.TypeString, + Required: true, + Description: `Required. Email address of the contacts.`, + }, + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: `Optional. Contact's name.`, + }, + }, + }, + }, + "environment": { + Type: schema.TypeList, + Optional: true, + Description: `Environment of the Application, Service, or Workload`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"PRODUCTION", "STAGING", "TEST", "DEVELOPMENT"}), + Description: `Environment type. Possible values: ["PRODUCTION", "STAGING", "TEST", "DEVELOPMENT"]`, + }, + }, + }, + }, + "operator_owners": { + Type: schema.TypeList, + Optional: true, + Description: `Optional. Operator team that ensures runtime and operations.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "email": { + Type: schema.TypeString, + Required: true, + Description: `Required. Email address of the contacts.`, + }, + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: `Optional. Contact's name.`, + }, + }, + }, + }, + }, + }, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `Optional. User-defined description of an Application.`, + }, + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: `Optional. User-defined name for the Application.`, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. Create time.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `Identifier. The resource name of an Application. Format: +"projects/{host-project-id}/locations/{location}/applications/{application-id}"`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. Application state. + Possible values: + STATE_UNSPECIFIED +CREATING +ACTIVE +DELETING`, + }, + "uid": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. A universally unique identifier (in UUID4 format) for the 'Application'.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. Update time.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceApphubApplicationCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + displayNameProp, err := expandApphubApplicationDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + descriptionProp, err := expandApphubApplicationDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + attributesProp, err := expandApphubApplicationAttributes(d.Get("attributes"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("attributes"); !tpgresource.IsEmptyValue(reflect.ValueOf(attributesProp)) && (ok || !reflect.DeepEqual(v, attributesProp)) { + obj["attributes"] = attributesProp + } + scopeProp, err := expandApphubApplicationScope(d.Get("scope"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("scope"); !tpgresource.IsEmptyValue(reflect.ValueOf(scopeProp)) && (ok || !reflect.DeepEqual(v, scopeProp)) { + obj["scope"] = scopeProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ApphubBasePath}}projects/{{project}}/locations/{{location}}/applications?applicationId={{application_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Application: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Application: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Application: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/applications/{{application_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = ApphubOperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating Application", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create Application: %s", err) + } + + if err := d.Set("name", flattenApphubApplicationName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/applications/{{application_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Application %q: %#v", d.Id(), res) + + return resourceApphubApplicationRead(d, meta) +} + +func resourceApphubApplicationRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ApphubBasePath}}projects/{{project}}/locations/{{location}}/applications/{{application_id}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Application: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ApphubApplication %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Application: %s", err) + } + + if err := d.Set("name", flattenApphubApplicationName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Application: %s", err) + } + if err := d.Set("display_name", flattenApphubApplicationDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading Application: %s", err) + } + if err := d.Set("description", flattenApphubApplicationDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Application: %s", err) + } + if err := d.Set("attributes", flattenApphubApplicationAttributes(res["attributes"], d, config)); err != nil { + return fmt.Errorf("Error reading Application: %s", err) + } + if err := d.Set("create_time", flattenApphubApplicationCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Application: %s", err) + } + if err := d.Set("update_time", flattenApphubApplicationUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Application: %s", err) + } + if err := d.Set("scope", flattenApphubApplicationScope(res["scope"], d, config)); err != nil { + return fmt.Errorf("Error reading Application: %s", err) + } + if err := d.Set("uid", flattenApphubApplicationUid(res["uid"], d, config)); err != nil { + return fmt.Errorf("Error reading Application: %s", err) + } + if err := d.Set("state", flattenApphubApplicationState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading Application: %s", err) + } + + return nil +} + +func resourceApphubApplicationUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Application: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + displayNameProp, err := expandApphubApplicationDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + descriptionProp, err := expandApphubApplicationDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + attributesProp, err := expandApphubApplicationAttributes(d.Get("attributes"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("attributes"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, attributesProp)) { + obj["attributes"] = attributesProp + } + scopeProp, err := expandApphubApplicationScope(d.Get("scope"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("scope"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, scopeProp)) { + obj["scope"] = scopeProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ApphubBasePath}}projects/{{project}}/locations/{{location}}/applications/{{application_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Application %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") + } + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("attributes") { + updateMask = append(updateMask, "attributes") + } + + if d.HasChange("scope") { + updateMask = append(updateMask, "scope") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + // if updateMask is empty we are not updating anything so skip the post + if len(updateMask) > 0 { + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Application %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Application %q: %#v", d.Id(), res) + } + + err = ApphubOperationWaitTime( + config, res, project, "Updating Application", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + } + + return resourceApphubApplicationRead(d, meta) +} + +func resourceApphubApplicationDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Application: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{ApphubBasePath}}projects/{{project}}/locations/{{location}}/applications/{{application_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Application %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Application") + } + + err = ApphubOperationWaitTime( + config, res, project, "Deleting Application", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Application %q: %#v", d.Id(), res) + return nil +} + +func resourceApphubApplicationImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "^projects/(?P[^/]+)/locations/(?P[^/]+)/applications/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)$", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/applications/{{application_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenApphubApplicationName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApphubApplicationDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApphubApplicationDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApphubApplicationAttributes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["criticality"] = + flattenApphubApplicationAttributesCriticality(original["criticality"], d, config) + transformed["environment"] = + flattenApphubApplicationAttributesEnvironment(original["environment"], d, config) + transformed["developer_owners"] = + flattenApphubApplicationAttributesDeveloperOwners(original["developerOwners"], d, config) + transformed["operator_owners"] = + flattenApphubApplicationAttributesOperatorOwners(original["operatorOwners"], d, config) + transformed["business_owners"] = + flattenApphubApplicationAttributesBusinessOwners(original["businessOwners"], d, config) + return []interface{}{transformed} +} +func flattenApphubApplicationAttributesCriticality(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["type"] = + flattenApphubApplicationAttributesCriticalityType(original["type"], d, config) + return []interface{}{transformed} +} +func flattenApphubApplicationAttributesCriticalityType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApphubApplicationAttributesEnvironment(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["type"] = + flattenApphubApplicationAttributesEnvironmentType(original["type"], d, config) + return []interface{}{transformed} +} +func flattenApphubApplicationAttributesEnvironmentType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApphubApplicationAttributesDeveloperOwners(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "display_name": flattenApphubApplicationAttributesDeveloperOwnersDisplayName(original["displayName"], d, config), + "email": flattenApphubApplicationAttributesDeveloperOwnersEmail(original["email"], d, config), + }) + } + return transformed +} +func flattenApphubApplicationAttributesDeveloperOwnersDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApphubApplicationAttributesDeveloperOwnersEmail(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApphubApplicationAttributesOperatorOwners(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "display_name": flattenApphubApplicationAttributesOperatorOwnersDisplayName(original["displayName"], d, config), + "email": flattenApphubApplicationAttributesOperatorOwnersEmail(original["email"], d, config), + }) + } + return transformed +} +func flattenApphubApplicationAttributesOperatorOwnersDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApphubApplicationAttributesOperatorOwnersEmail(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApphubApplicationAttributesBusinessOwners(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "display_name": flattenApphubApplicationAttributesBusinessOwnersDisplayName(original["displayName"], d, config), + "email": flattenApphubApplicationAttributesBusinessOwnersEmail(original["email"], d, config), + }) + } + return transformed +} +func flattenApphubApplicationAttributesBusinessOwnersDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApphubApplicationAttributesBusinessOwnersEmail(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApphubApplicationCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApphubApplicationUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApphubApplicationScope(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["type"] = + flattenApphubApplicationScopeType(original["type"], d, config) + return []interface{}{transformed} +} +func flattenApphubApplicationScopeType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApphubApplicationUid(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApphubApplicationState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandApphubApplicationDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApphubApplicationDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApphubApplicationAttributes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedCriticality, err := expandApphubApplicationAttributesCriticality(original["criticality"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCriticality); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["criticality"] = transformedCriticality + } + + transformedEnvironment, err := expandApphubApplicationAttributesEnvironment(original["environment"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEnvironment); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["environment"] = transformedEnvironment + } + + transformedDeveloperOwners, err := expandApphubApplicationAttributesDeveloperOwners(original["developer_owners"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDeveloperOwners); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["developerOwners"] = transformedDeveloperOwners + } + + transformedOperatorOwners, err := expandApphubApplicationAttributesOperatorOwners(original["operator_owners"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOperatorOwners); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["operatorOwners"] = transformedOperatorOwners + } + + transformedBusinessOwners, err := expandApphubApplicationAttributesBusinessOwners(original["business_owners"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBusinessOwners); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["businessOwners"] = transformedBusinessOwners + } + + return transformed, nil +} + +func expandApphubApplicationAttributesCriticality(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedType, err := expandApphubApplicationAttributesCriticalityType(original["type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["type"] = transformedType + } + + return transformed, nil +} + +func expandApphubApplicationAttributesCriticalityType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApphubApplicationAttributesEnvironment(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedType, err := expandApphubApplicationAttributesEnvironmentType(original["type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["type"] = transformedType + } + + return transformed, nil +} + +func expandApphubApplicationAttributesEnvironmentType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApphubApplicationAttributesDeveloperOwners(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDisplayName, err := expandApphubApplicationAttributesDeveloperOwnersDisplayName(original["display_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDisplayName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["displayName"] = transformedDisplayName + } + + transformedEmail, err := expandApphubApplicationAttributesDeveloperOwnersEmail(original["email"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEmail); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["email"] = transformedEmail + } + + req = append(req, transformed) + } + return req, nil +} + +func expandApphubApplicationAttributesDeveloperOwnersDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApphubApplicationAttributesDeveloperOwnersEmail(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApphubApplicationAttributesOperatorOwners(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDisplayName, err := expandApphubApplicationAttributesOperatorOwnersDisplayName(original["display_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDisplayName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["displayName"] = transformedDisplayName + } + + transformedEmail, err := expandApphubApplicationAttributesOperatorOwnersEmail(original["email"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEmail); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["email"] = transformedEmail + } + + req = append(req, transformed) + } + return req, nil +} + +func expandApphubApplicationAttributesOperatorOwnersDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApphubApplicationAttributesOperatorOwnersEmail(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApphubApplicationAttributesBusinessOwners(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDisplayName, err := expandApphubApplicationAttributesBusinessOwnersDisplayName(original["display_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDisplayName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["displayName"] = transformedDisplayName + } + + transformedEmail, err := expandApphubApplicationAttributesBusinessOwnersEmail(original["email"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEmail); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["email"] = transformedEmail + } + + req = append(req, transformed) + } + return req, nil +} + +func expandApphubApplicationAttributesBusinessOwnersDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApphubApplicationAttributesBusinessOwnersEmail(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApphubApplicationScope(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedType, err := expandApphubApplicationScopeType(original["type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["type"] = transformedType + } + + return transformed, nil +} + +func expandApphubApplicationScopeType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/google/services/apphub/resource_apphub_application_generated_test.go b/google/services/apphub/resource_apphub_application_generated_test.go new file mode 100644 index 00000000000..bf90441dc41 --- /dev/null +++ b/google/services/apphub/resource_apphub_application_generated_test.go @@ -0,0 +1,166 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package apphub_test + +import ( + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func TestAccApphubApplication_applicationBasicExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckApphubApplicationDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccApphubApplication_applicationBasicExample(context), + }, + { + ResourceName: "google_apphub_application.example", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "application_id"}, + }, + }, + }) +} + +func testAccApphubApplication_applicationBasicExample(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_apphub_application" "example" { + location = "us-east1" + application_id = "tf-test-example-application%{random_suffix}" + scope { + type = "REGIONAL" + } +} +`, context) +} + +func TestAccApphubApplication_applicationFullExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckApphubApplicationDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccApphubApplication_applicationFullExample(context), + }, + { + ResourceName: "google_apphub_application.example2", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "application_id"}, + }, + }, + }) +} + +func testAccApphubApplication_applicationFullExample(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_apphub_application" "example2" { + location = "us-east1" + application_id = "tf-test-example-application%{random_suffix}" + display_name = "Application Full%{random_suffix}" + scope { + type = "REGIONAL" + } + description = "Application for testing%{random_suffix}" + attributes { + environment { + type = "STAGING" + } + criticality { + type = "MISSION_CRITICAL" + } + business_owners { + display_name = "Alice%{random_suffix}" + email = "alice@google.com%{random_suffix}" + } + developer_owners { + display_name = "Bob%{random_suffix}" + email = "bob@google.com%{random_suffix}" + } + operator_owners { + display_name = "Charlie%{random_suffix}" + email = "charlie@google.com%{random_suffix}" + } + } +} +`, context) +} + +func testAccCheckApphubApplicationDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "google_apphub_application" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + url, err := tpgresource.ReplaceVarsForTest(config, rs, "{{ApphubBasePath}}projects/{{project}}/locations/{{location}}/applications/{{application_id}}") + if err != nil { + return err + } + + billingProject := "" + + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: config.UserAgent, + }) + if err == nil { + return fmt.Errorf("ApphubApplication still exists at %s", url) + } + } + + return nil + } +} diff --git a/google/services/apphub/resource_apphub_application_sweeper.go b/google/services/apphub/resource_apphub_application_sweeper.go new file mode 100644 index 00000000000..3281d31b8b8 --- /dev/null +++ b/google/services/apphub/resource_apphub_application_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package apphub + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ApphubApplication", testSweepApphubApplication) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepApphubApplication(region string) error { + resourceName := "ApphubApplication" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://apphub.googleapis.com/v1/projects/{{project}}/locations/{{location}}/applications", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["applications"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://apphub.googleapis.com/v1/projects/{{project}}/locations/{{location}}/applications/{{application_id}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/google/services/apphub/resource_apphub_application_test.go b/google/services/apphub/resource_apphub_application_test.go new file mode 100644 index 00000000000..f4de19d6ebd --- /dev/null +++ b/google/services/apphub/resource_apphub_application_test.go @@ -0,0 +1,212 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package apphub_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccApphubApplication_applicationUpdateFull(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckApphubApplicationDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccApphubApplication_applicationFullExample(context), + }, + { + ResourceName: "google_apphub_application.example2", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "application_id"}, + }, + { + Config: testAccApphubApplication_applicationUpdateDisplayName(context), + }, + { + ResourceName: "google_apphub_application.example2", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "application_id"}, + }, + { + Config: testAccApphubApplication_applicationUpdateEnvironment(context), + }, + { + ResourceName: "google_apphub_application.example2", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "application_id"}, + }, + { + Config: testAccApphubApplication_applicationUpdateCriticality(context), + }, + { + ResourceName: "google_apphub_application.example2", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "application_id"}, + }, + { + Config: testAccApphubApplication_applicationUpdateOwners(context), + }, + { + ResourceName: "google_apphub_application.example2", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "application_id"}, + }, + }, + }) +} + +func testAccApphubApplication_applicationUpdateDisplayName(context map[string]interface{}) string { + return acctest.Nprintf(` + +resource "google_apphub_application" "example2" { + location = "us-east1" + application_id = "tf-test-example-application%{random_suffix}" + display_name = "Application Full New%{random_suffix}" + scope { + type = "REGIONAL" + } + attributes { + environment { + type = "STAGING" + } + criticality { + type = "MISSION_CRITICAL" + } + business_owners { + display_name = "Alice%{random_suffix}" + email = "alice@google.com%{random_suffix}" + } + developer_owners { + display_name = "Bob%{random_suffix}" + email = "bob@google.com%{random_suffix}" + } + operator_owners { + display_name = "Charlie%{random_suffix}" + email = "charlie@google.com%{random_suffix}" + } + } +} +`, context) +} + +func testAccApphubApplication_applicationUpdateEnvironment(context map[string]interface{}) string { + return acctest.Nprintf(` + +resource "google_apphub_application" "example2" { + location = "us-east1" + application_id = "tf-test-example-application%{random_suffix}" + display_name = "Application Full New%{random_suffix}" + scope { + type = "REGIONAL" + } + attributes { + environment { + type = "TEST" + } + criticality { + type = "MISSION_CRITICAL" + } + business_owners { + display_name = "Alice%{random_suffix}" + email = "alice@google.com%{random_suffix}" + } + developer_owners { + display_name = "Bob%{random_suffix}" + email = "bob@google.com%{random_suffix}" + } + operator_owners { + display_name = "Charlie%{random_suffix}" + email = "charlie@google.com%{random_suffix}" + } + } +} +`, context) +} + +func testAccApphubApplication_applicationUpdateCriticality(context map[string]interface{}) string { + return acctest.Nprintf(` + +resource "google_apphub_application" "example2" { + location = "us-east1" + application_id = "tf-test-example-application%{random_suffix}" + display_name = "Application Full New%{random_suffix}" + scope { + type = "REGIONAL" + } + attributes { + environment { + type = "TEST" + } + criticality { + type = "MEDIUM" + } + business_owners { + display_name = "Alice%{random_suffix}" + email = "alice@google.com%{random_suffix}" + } + developer_owners { + display_name = "Bob%{random_suffix}" + email = "bob@google.com%{random_suffix}" + } + operator_owners { + display_name = "Charlie%{random_suffix}" + email = "charlie@google.com%{random_suffix}" + } + } +} +`, context) +} + +func testAccApphubApplication_applicationUpdateOwners(context map[string]interface{}) string { + return acctest.Nprintf(` + +resource "google_apphub_application" "example2" { + location = "us-east1" + application_id = "tf-test-example-application%{random_suffix}" + display_name = "Application Full New%{random_suffix}" + scope { + type = "REGIONAL" + } + attributes { + environment { + type = "TEST" + } + criticality { + type = "MEDIUM" + } + business_owners { + display_name = "Alice%{random_suffix}" + email = "alice@google.com%{random_suffix}" + } + developer_owners { + display_name = "Bob%{random_suffix}" + email = "bob@google.com%{random_suffix}" + } + developer_owners { + display_name = "Derek%{random_suffix}" + email = "derek@google.com%{random_suffix}" + } + operator_owners { + display_name = "Charlie%{random_suffix}" + email = "charlie@google.com%{random_suffix}" + } + } +} +`, context) +} diff --git a/google/services/artifactregistry/resource_artifact_registry_repository.go b/google/services/artifactregistry/resource_artifact_registry_repository.go index ac982306a48..f1a56029b23 100644 --- a/google/services/artifactregistry/resource_artifact_registry_repository.go +++ b/google/services/artifactregistry/resource_artifact_registry_repository.go @@ -1079,9 +1079,6 @@ func flattenArtifactRegistryRepositoryDockerConfig(v interface{}, d *schema.Reso return nil } original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } transformed := make(map[string]interface{}) transformed["immutable_tags"] = flattenArtifactRegistryRepositoryDockerConfigImmutableTags(original["immutableTags"], d, config) @@ -1525,9 +1522,14 @@ func expandArtifactRegistryRepositoryKmsKeyName(v interface{}, d tpgresource.Ter func expandArtifactRegistryRepositoryDockerConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { + if len(l) == 0 { return nil, nil } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } raw := l[0] original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) diff --git a/google/services/bigquery/resource_bigquery_dataset.go b/google/services/bigquery/resource_bigquery_dataset.go index e5b91fdffea..889b3d27b2b 100644 --- a/google/services/bigquery/resource_bigquery_dataset.go +++ b/google/services/bigquery/resource_bigquery_dataset.go @@ -33,7 +33,7 @@ import ( "google.golang.org/api/googleapi" ) -const datasetIdRegexp = `[0-9A-Za-z_]+` +const datasetIdRegexp = `^[0-9A-Za-z_]+$` func validateDatasetId(v interface{}, k string) (ws []string, errors []error) { value := v.(string) diff --git a/google/services/bigquery/resource_bigquery_dataset_test.go b/google/services/bigquery/resource_bigquery_dataset_test.go index 5d061744241..d1b4071550d 100644 --- a/google/services/bigquery/resource_bigquery_dataset_test.go +++ b/google/services/bigquery/resource_bigquery_dataset_test.go @@ -4,6 +4,8 @@ package bigquery_test import ( "fmt" + "regexp" + "strings" "testing" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" @@ -328,6 +330,47 @@ func TestAccBigQueryDataset_storageBillModel(t *testing.T) { }) } +func TestAccBigQueryDataset_invalidCharacterInID(t *testing.T) { + t.Parallel() + // Not an acceptance test. + acctest.SkipIfVcr(t) + + datasetID := fmt.Sprintf("tf_test_%s-with-hyphens", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryDatasetDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryDataset(datasetID), + ExpectError: regexp.MustCompile("must contain only letters.+numbers.+or underscores.+"), + }, + }, + }) +} + +func TestAccBigQueryDataset_invalidLongID(t *testing.T) { + t.Parallel() + // Not an acceptance test. + acctest.SkipIfVcr(t) + + datasetSuffix := acctest.RandString(t, 10) + datasetID := fmt.Sprintf("tf_test_%s", strings.Repeat(datasetSuffix, 200)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryDatasetDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryDataset(datasetID), + ExpectError: regexp.MustCompile(".+cannot be greater than 1,024 characters"), + }, + }, + }) +} + func testAccAddTable(t *testing.T, datasetID string, tableID string) resource.TestCheckFunc { // Not actually a check, but adds a table independently of terraform return func(s *terraform.State) error { diff --git a/google/services/bigquery/resource_bigquery_routine.go b/google/services/bigquery/resource_bigquery_routine.go index f8b14696cd7..95e9b480b7b 100644 --- a/google/services/bigquery/resource_bigquery_routine.go +++ b/google/services/bigquery/resource_bigquery_routine.go @@ -150,6 +150,47 @@ imported JAVASCRIPT libraries.`, ValidateFunc: verify.ValidateEnum([]string{"SQL", "JAVASCRIPT", "PYTHON", "JAVA", "SCALA", ""}), Description: `The language of the routine. Possible values: ["SQL", "JAVASCRIPT", "PYTHON", "JAVA", "SCALA"]`, }, + "remote_function_options": { + Type: schema.TypeList, + Optional: true, + Description: `Remote function specific options.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "connection": { + Type: schema.TypeString, + Optional: true, + Description: `Fully qualified name of the user-provided connection object which holds +the authentication information to send requests to the remote service. +Format: "projects/{projectId}/locations/{locationId}/connections/{connectionId}"`, + }, + "endpoint": { + Type: schema.TypeString, + Optional: true, + Description: `Endpoint of the user-provided remote service, e.g. +'https://us-east1-my_gcf_project.cloudfunctions.net/remote_add'`, + }, + "max_batching_rows": { + Type: schema.TypeString, + Optional: true, + Description: `Max number of rows in each batch sent to the remote service. If absent or if 0, +BigQuery dynamically decides the number of rows in a batch.`, + }, + "user_defined_context": { + Type: schema.TypeMap, + Computed: true, + Optional: true, + Description: `User-defined context as a set of key/value pairs, which will be sent as function +invocation context together with batched arguments in the requests to the remote +service. The total number of bytes of keys and values must be less than 8KB. + +An object containing a list of "key": value pairs. Example: +'{ "name": "wrench", "mass": "1.3kg", "count": "3" }'.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, "return_table_type": { Type: schema.TypeString, Optional: true, @@ -357,6 +398,12 @@ func resourceBigQueryRoutineCreate(d *schema.ResourceData, meta interface{}) err } else if v, ok := d.GetOkExists("spark_options"); !tpgresource.IsEmptyValue(reflect.ValueOf(sparkOptionsProp)) && (ok || !reflect.DeepEqual(v, sparkOptionsProp)) { obj["sparkOptions"] = sparkOptionsProp } + remoteFunctionOptionsProp, err := expandBigQueryRoutineRemoteFunctionOptions(d.Get("remote_function_options"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("remote_function_options"); !tpgresource.IsEmptyValue(reflect.ValueOf(remoteFunctionOptionsProp)) && (ok || !reflect.DeepEqual(v, remoteFunctionOptionsProp)) { + obj["remoteFunctionOptions"] = remoteFunctionOptionsProp + } url, err := tpgresource.ReplaceVars(d, config, "{{BigQueryBasePath}}projects/{{project}}/datasets/{{dataset_id}}/routines") if err != nil { @@ -493,6 +540,9 @@ func resourceBigQueryRoutineRead(d *schema.ResourceData, meta interface{}) error if err := d.Set("spark_options", flattenBigQueryRoutineSparkOptions(res["sparkOptions"], d, config)); err != nil { return fmt.Errorf("Error reading Routine: %s", err) } + if err := d.Set("remote_function_options", flattenBigQueryRoutineRemoteFunctionOptions(res["remoteFunctionOptions"], d, config)); err != nil { + return fmt.Errorf("Error reading Routine: %s", err) + } return nil } @@ -579,6 +629,12 @@ func resourceBigQueryRoutineUpdate(d *schema.ResourceData, meta interface{}) err } else if v, ok := d.GetOkExists("spark_options"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sparkOptionsProp)) { obj["sparkOptions"] = sparkOptionsProp } + remoteFunctionOptionsProp, err := expandBigQueryRoutineRemoteFunctionOptions(d.Get("remote_function_options"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("remote_function_options"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, remoteFunctionOptionsProp)) { + obj["remoteFunctionOptions"] = remoteFunctionOptionsProp + } url, err := tpgresource.ReplaceVars(d, config, "{{BigQueryBasePath}}projects/{{project}}/datasets/{{dataset_id}}/routines/{{routine_id}}") if err != nil { @@ -897,6 +953,41 @@ func flattenBigQueryRoutineSparkOptionsMainClass(v interface{}, d *schema.Resour return v } +func flattenBigQueryRoutineRemoteFunctionOptions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["endpoint"] = + flattenBigQueryRoutineRemoteFunctionOptionsEndpoint(original["endpoint"], d, config) + transformed["connection"] = + flattenBigQueryRoutineRemoteFunctionOptionsConnection(original["connection"], d, config) + transformed["user_defined_context"] = + flattenBigQueryRoutineRemoteFunctionOptionsUserDefinedContext(original["userDefinedContext"], d, config) + transformed["max_batching_rows"] = + flattenBigQueryRoutineRemoteFunctionOptionsMaxBatchingRows(original["maxBatchingRows"], d, config) + return []interface{}{transformed} +} +func flattenBigQueryRoutineRemoteFunctionOptionsEndpoint(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigQueryRoutineRemoteFunctionOptionsConnection(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigQueryRoutineRemoteFunctionOptionsUserDefinedContext(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigQueryRoutineRemoteFunctionOptionsMaxBatchingRows(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func expandBigQueryRoutineRoutineReference(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { transformed := make(map[string]interface{}) @@ -1151,3 +1242,66 @@ func expandBigQueryRoutineSparkOptionsArchiveUris(v interface{}, d tpgresource.T func expandBigQueryRoutineSparkOptionsMainClass(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } + +func expandBigQueryRoutineRemoteFunctionOptions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedEndpoint, err := expandBigQueryRoutineRemoteFunctionOptionsEndpoint(original["endpoint"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEndpoint); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["endpoint"] = transformedEndpoint + } + + transformedConnection, err := expandBigQueryRoutineRemoteFunctionOptionsConnection(original["connection"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedConnection); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["connection"] = transformedConnection + } + + transformedUserDefinedContext, err := expandBigQueryRoutineRemoteFunctionOptionsUserDefinedContext(original["user_defined_context"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUserDefinedContext); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["userDefinedContext"] = transformedUserDefinedContext + } + + transformedMaxBatchingRows, err := expandBigQueryRoutineRemoteFunctionOptionsMaxBatchingRows(original["max_batching_rows"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaxBatchingRows); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maxBatchingRows"] = transformedMaxBatchingRows + } + + return transformed, nil +} + +func expandBigQueryRoutineRemoteFunctionOptionsEndpoint(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigQueryRoutineRemoteFunctionOptionsConnection(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigQueryRoutineRemoteFunctionOptionsUserDefinedContext(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandBigQueryRoutineRemoteFunctionOptionsMaxBatchingRows(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/google/services/bigquery/resource_bigquery_routine_generated_test.go b/google/services/bigquery/resource_bigquery_routine_generated_test.go index 68f07d7548f..948e43accd2 100644 --- a/google/services/bigquery/resource_bigquery_routine_generated_test.go +++ b/google/services/bigquery/resource_bigquery_routine_generated_test.go @@ -30,7 +30,7 @@ import ( transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) -func TestAccBigQueryRoutine_bigQueryRoutineBasicExample(t *testing.T) { +func TestAccBigQueryRoutine_bigqueryRoutineBasicExample(t *testing.T) { t.Parallel() context := map[string]interface{}{ @@ -43,7 +43,7 @@ func TestAccBigQueryRoutine_bigQueryRoutineBasicExample(t *testing.T) { CheckDestroy: testAccCheckBigQueryRoutineDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccBigQueryRoutine_bigQueryRoutineBasicExample(context), + Config: testAccBigQueryRoutine_bigqueryRoutineBasicExample(context), }, { ResourceName: "google_bigquery_routine.sproc", @@ -54,7 +54,7 @@ func TestAccBigQueryRoutine_bigQueryRoutineBasicExample(t *testing.T) { }) } -func testAccBigQueryRoutine_bigQueryRoutineBasicExample(context map[string]interface{}) string { +func testAccBigQueryRoutine_bigqueryRoutineBasicExample(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_bigquery_dataset" "test" { dataset_id = "tf_test_dataset_id%{random_suffix}" @@ -70,7 +70,7 @@ resource "google_bigquery_routine" "sproc" { `, context) } -func TestAccBigQueryRoutine_bigQueryRoutineJsonExample(t *testing.T) { +func TestAccBigQueryRoutine_bigqueryRoutineJsonExample(t *testing.T) { t.Parallel() context := map[string]interface{}{ @@ -83,7 +83,7 @@ func TestAccBigQueryRoutine_bigQueryRoutineJsonExample(t *testing.T) { CheckDestroy: testAccCheckBigQueryRoutineDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccBigQueryRoutine_bigQueryRoutineJsonExample(context), + Config: testAccBigQueryRoutine_bigqueryRoutineJsonExample(context), }, { ResourceName: "google_bigquery_routine.sproc", @@ -94,7 +94,7 @@ func TestAccBigQueryRoutine_bigQueryRoutineJsonExample(t *testing.T) { }) } -func testAccBigQueryRoutine_bigQueryRoutineJsonExample(context map[string]interface{}) string { +func testAccBigQueryRoutine_bigqueryRoutineJsonExample(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_bigquery_dataset" "test" { dataset_id = "tf_test_dataset_id%{random_suffix}" @@ -120,7 +120,7 @@ resource "google_bigquery_routine" "sproc" { `, context) } -func TestAccBigQueryRoutine_bigQueryRoutineTvfExample(t *testing.T) { +func TestAccBigQueryRoutine_bigqueryRoutineTvfExample(t *testing.T) { t.Parallel() context := map[string]interface{}{ @@ -133,7 +133,7 @@ func TestAccBigQueryRoutine_bigQueryRoutineTvfExample(t *testing.T) { CheckDestroy: testAccCheckBigQueryRoutineDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccBigQueryRoutine_bigQueryRoutineTvfExample(context), + Config: testAccBigQueryRoutine_bigqueryRoutineTvfExample(context), }, { ResourceName: "google_bigquery_routine.sproc", @@ -144,7 +144,7 @@ func TestAccBigQueryRoutine_bigQueryRoutineTvfExample(t *testing.T) { }) } -func testAccBigQueryRoutine_bigQueryRoutineTvfExample(context map[string]interface{}) string { +func testAccBigQueryRoutine_bigqueryRoutineTvfExample(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_bigquery_dataset" "test" { dataset_id = "tf_test_dataset_id%{random_suffix}" @@ -170,7 +170,7 @@ resource "google_bigquery_routine" "sproc" { `, context) } -func TestAccBigQueryRoutine_bigQueryRoutinePysparkExample(t *testing.T) { +func TestAccBigQueryRoutine_bigqueryRoutinePysparkExample(t *testing.T) { t.Parallel() context := map[string]interface{}{ @@ -183,7 +183,7 @@ func TestAccBigQueryRoutine_bigQueryRoutinePysparkExample(t *testing.T) { CheckDestroy: testAccCheckBigQueryRoutineDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccBigQueryRoutine_bigQueryRoutinePysparkExample(context), + Config: testAccBigQueryRoutine_bigqueryRoutinePysparkExample(context), }, { ResourceName: "google_bigquery_routine.pyspark", @@ -194,7 +194,7 @@ func TestAccBigQueryRoutine_bigQueryRoutinePysparkExample(t *testing.T) { }) } -func testAccBigQueryRoutine_bigQueryRoutinePysparkExample(context map[string]interface{}) string { +func testAccBigQueryRoutine_bigqueryRoutinePysparkExample(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_bigquery_dataset" "test" { dataset_id = "tf_test_dataset_id%{random_suffix}" @@ -240,7 +240,7 @@ resource "google_bigquery_routine" "pyspark" { `, context) } -func TestAccBigQueryRoutine_bigQueryRoutinePysparkMainfileExample(t *testing.T) { +func TestAccBigQueryRoutine_bigqueryRoutinePysparkMainfileExample(t *testing.T) { t.Parallel() context := map[string]interface{}{ @@ -253,7 +253,7 @@ func TestAccBigQueryRoutine_bigQueryRoutinePysparkMainfileExample(t *testing.T) CheckDestroy: testAccCheckBigQueryRoutineDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccBigQueryRoutine_bigQueryRoutinePysparkMainfileExample(context), + Config: testAccBigQueryRoutine_bigqueryRoutinePysparkMainfileExample(context), }, { ResourceName: "google_bigquery_routine.pyspark_mainfile", @@ -264,7 +264,7 @@ func TestAccBigQueryRoutine_bigQueryRoutinePysparkMainfileExample(t *testing.T) }) } -func testAccBigQueryRoutine_bigQueryRoutinePysparkMainfileExample(context map[string]interface{}) string { +func testAccBigQueryRoutine_bigqueryRoutinePysparkMainfileExample(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_bigquery_dataset" "test" { dataset_id = "tf_test_dataset_id%{random_suffix}" @@ -294,7 +294,7 @@ resource "google_bigquery_routine" "pyspark_mainfile" { `, context) } -func TestAccBigQueryRoutine_bigQueryRoutineSparkJarExample(t *testing.T) { +func TestAccBigQueryRoutine_bigqueryRoutineSparkJarExample(t *testing.T) { t.Parallel() context := map[string]interface{}{ @@ -307,7 +307,7 @@ func TestAccBigQueryRoutine_bigQueryRoutineSparkJarExample(t *testing.T) { CheckDestroy: testAccCheckBigQueryRoutineDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccBigQueryRoutine_bigQueryRoutineSparkJarExample(context), + Config: testAccBigQueryRoutine_bigqueryRoutineSparkJarExample(context), }, { ResourceName: "google_bigquery_routine.spark_jar", @@ -318,7 +318,7 @@ func TestAccBigQueryRoutine_bigQueryRoutineSparkJarExample(t *testing.T) { }) } -func testAccBigQueryRoutine_bigQueryRoutineSparkJarExample(context map[string]interface{}) string { +func testAccBigQueryRoutine_bigqueryRoutineSparkJarExample(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_bigquery_dataset" "test" { dataset_id = "tf_test_dataset_id%{random_suffix}" diff --git a/google/services/bigquery/resource_bigquery_routine_test.go b/google/services/bigquery/resource_bigquery_routine_test.go index f4f3a545cb1..42fc494701d 100644 --- a/google/services/bigquery/resource_bigquery_routine_test.go +++ b/google/services/bigquery/resource_bigquery_routine_test.go @@ -175,3 +175,171 @@ resource "google_bigquery_routine" "spark_jar" { } `, context) } + +func TestAccBigQueryRoutine_bigQueryRoutineRemoteFunction(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "zip_path": "./test-fixtures/function-source.zip", + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryRoutineDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryRoutine_bigQueryRoutineRemoteFunction(context), + }, + { + ResourceName: "google_bigquery_routine.remote_function_routine", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccBigQueryRoutine_bigQueryRoutineRemoteFunction_Update(context), + }, + { + ResourceName: "google_bigquery_routine.remote_function_routine", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccBigQueryRoutine_bigQueryRoutineRemoteFunction(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_storage_bucket" "default" { + name = "%{random_suffix}-gcf-source" + location = "US" + uniform_bucket_level_access = true +} + +resource "google_storage_bucket_object" "object" { + name = "function-source.zip" + bucket = google_storage_bucket.default.name + source = "%{zip_path}" +} + +resource "google_cloudfunctions2_function" "default" { + name = "function-v2-0" + location = "us-central1" + description = "a new function" + + build_config { + runtime = "nodejs18" + entry_point = "helloHttp" + source { + storage_source { + bucket = google_storage_bucket.default.name + object = google_storage_bucket_object.object.name + } + } + } + + service_config { + max_instance_count = 1 + available_memory = "256M" + timeout_seconds = 60 + } +} + +resource "google_bigquery_connection" "test" { + connection_id = "tf_test_connection_id%{random_suffix}" + location = "US" + cloud_resource { } +} + +resource "google_bigquery_dataset" "test" { + dataset_id = "tf_test_dataset_id%{random_suffix}" +} + +resource "google_bigquery_routine" "remote_function_routine" { + dataset_id = "${google_bigquery_dataset.test.dataset_id}" + routine_id = "tf_test_routine_id%{random_suffix}" + routine_type = "SCALAR_FUNCTION" + definition_body = "" + + return_type = "{\"typeKind\" : \"STRING\"}" + + remote_function_options { + endpoint = google_cloudfunctions2_function.default.service_config[0].uri + connection = "${google_bigquery_connection.test.name}" + max_batching_rows = "10" + user_defined_context = { + "z": "1.5", + } + } +} +`, context) +} + +func testAccBigQueryRoutine_bigQueryRoutineRemoteFunction_Update(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_storage_bucket" "default" { + name = "%{random_suffix}-gcf-source" + location = "US" + uniform_bucket_level_access = true +} + +resource "google_storage_bucket_object" "object" { + name = "function-source.zip" + bucket = google_storage_bucket.default.name + source = "%{zip_path}" +} + +resource "google_cloudfunctions2_function" "default2" { + name = "function-v2-1" + location = "us-central1" + description = "a new new function" + + build_config { + runtime = "nodejs18" + entry_point = "helloHttp" + source { + storage_source { + bucket = google_storage_bucket.default.name + object = google_storage_bucket_object.object.name + } + } + } + + service_config { + max_instance_count = 1 + available_memory = "256M" + timeout_seconds = 60 + } +} + +resource "google_bigquery_connection" "test2" { + connection_id = "tf_test_connection2_id%{random_suffix}" + location = "US" + cloud_resource { } +} + +resource "google_bigquery_dataset" "test" { + dataset_id = "tf_test_dataset_id%{random_suffix}" +} + +resource "google_bigquery_routine" "remote_function_routine" { + dataset_id = "${google_bigquery_dataset.test.dataset_id}" + routine_id = "tf_test_routine_id%{random_suffix}" + routine_type = "SCALAR_FUNCTION" + definition_body = "" + + return_type = "{\"typeKind\" : \"STRING\"}" + + remote_function_options { + endpoint = google_cloudfunctions2_function.default2.service_config[0].uri + connection = "${google_bigquery_connection.test2.name}" + max_batching_rows = "5" + user_defined_context = { + "z": "1.2", + "w": "test", + } + } +} +`, context) +} diff --git a/google/services/bigquery/test-fixtures/function-source.zip b/google/services/bigquery/test-fixtures/function-source.zip new file mode 100644 index 00000000000..1cb571888ef Binary files /dev/null and b/google/services/bigquery/test-fixtures/function-source.zip differ diff --git a/google/services/certificatemanager/resource_certificate_manager_certificate_generated_test.go b/google/services/certificatemanager/resource_certificate_manager_certificate_generated_test.go index 78eb17d1fb6..c9187fecbe5 100644 --- a/google/services/certificatemanager/resource_certificate_manager_certificate_generated_test.go +++ b/google/services/certificatemanager/resource_certificate_manager_certificate_generated_test.go @@ -433,6 +433,55 @@ resource "google_certificate_manager_dns_authorization" "instance2" { `, context) } +func TestAccCertificateManagerCertificate_certificateManagerGoogleManagedRegionalCertificateDnsAuthExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckCertificateManagerCertificateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCertificateManagerCertificate_certificateManagerGoogleManagedRegionalCertificateDnsAuthExample(context), + }, + { + ResourceName: "google_certificate_manager_certificate.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"self_managed", "name", "location", "labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccCertificateManagerCertificate_certificateManagerGoogleManagedRegionalCertificateDnsAuthExample(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_certificate_manager_certificate" "default" { + name = "tf-test-dns-cert%{random_suffix}" + description = "regional managed certs" + location = "us-central1" + managed { + domains = [ + google_certificate_manager_dns_authorization.instance.domain, + ] + dns_authorizations = [ + google_certificate_manager_dns_authorization.instance.id, + ] + } +} +resource "google_certificate_manager_dns_authorization" "instance" { + name = "tf-test-dns-auth%{random_suffix}" + location = "us-central1" + description = "The default dnss" + domain = "subdomain%{random_suffix}.hashicorptest.com" +} +`, context) +} + func testAccCheckCertificateManagerCertificateDestroyProducer(t *testing.T) func(s *terraform.State) error { return func(s *terraform.State) error { for name, rs := range s.RootModule().Resources { diff --git a/google/services/certificatemanager/resource_certificate_manager_dns_authorization.go b/google/services/certificatemanager/resource_certificate_manager_dns_authorization.go index a18978d36a0..09b489fe9e2 100644 --- a/google/services/certificatemanager/resource_certificate_manager_dns_authorization.go +++ b/google/services/certificatemanager/resource_certificate_manager_dns_authorization.go @@ -18,6 +18,7 @@ package certificatemanager import ( + "context" "fmt" "log" "reflect" @@ -29,6 +30,7 @@ import ( "github.com/hashicorp/terraform-provider-google/google/tpgresource" transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" ) func ResourceCertificateManagerDnsAuthorization() *schema.Resource { @@ -48,6 +50,15 @@ func ResourceCertificateManagerDnsAuthorization() *schema.Resource { Delete: schema.DefaultTimeout(20 * time.Minute), }, + SchemaVersion: 1, + + StateUpgraders: []schema.StateUpgrader{ + { + Type: resourceCertificateManagerDnsAuthorizationResourceV0().CoreConfigSchema().ImpliedType(), + Upgrade: ResourceCertificateManagerDnsAuthorizationUpgradeV0, + Version: 0, + }, + }, CustomizeDiff: customdiff.All( tpgresource.SetLabelsDiff, tpgresource.DefaultProviderProject, @@ -84,6 +95,28 @@ and all following characters must be a dash, underscore, letter or digit.`, Please refer to the field 'effective_labels' for all of the labels present on the resource.`, Elem: &schema.Schema{Type: schema.TypeString}, }, + "location": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The Certificate Manager location. If not specified, "global" is used.`, + Default: "global", + }, + "type": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"FIXED_RECORD", "PER_PROJECT_RECORD", ""}), + Description: `type of DNS authorization. If unset during the resource creation, FIXED_RECORD will +be used for global resources, and PER_PROJECT_RECORD will be used for other locations. + +FIXED_RECORD DNS authorization uses DNS-01 validation method + +PER_PROJECT_RECORD DNS authorization allows for independent management +of Google-managed certificates with DNS authorization across multiple +projects. Possible values: ["FIXED_RECORD", "PER_PROJECT_RECORD"]`, + }, "dns_resource_record": { Type: schema.TypeList, Computed: true, @@ -155,6 +188,12 @@ func resourceCertificateManagerDnsAuthorizationCreate(d *schema.ResourceData, me } else if v, ok := d.GetOkExists("domain"); !tpgresource.IsEmptyValue(reflect.ValueOf(domainProp)) && (ok || !reflect.DeepEqual(v, domainProp)) { obj["domain"] = domainProp } + typeProp, err := expandCertificateManagerDnsAuthorizationType(d.Get("type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("type"); !tpgresource.IsEmptyValue(reflect.ValueOf(typeProp)) && (ok || !reflect.DeepEqual(v, typeProp)) { + obj["type"] = typeProp + } labelsProp, err := expandCertificateManagerDnsAuthorizationEffectiveLabels(d.Get("effective_labels"), d, config) if err != nil { return err @@ -162,7 +201,7 @@ func resourceCertificateManagerDnsAuthorizationCreate(d *schema.ResourceData, me obj["labels"] = labelsProp } - url, err := tpgresource.ReplaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/global/dnsAuthorizations?dnsAuthorizationId={{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/{{location}}/dnsAuthorizations?dnsAuthorizationId={{name}}") if err != nil { return err } @@ -195,7 +234,7 @@ func resourceCertificateManagerDnsAuthorizationCreate(d *schema.ResourceData, me } // Store the ID now - id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/global/dnsAuthorizations/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/dnsAuthorizations/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -223,7 +262,7 @@ func resourceCertificateManagerDnsAuthorizationRead(d *schema.ResourceData, meta return err } - url, err := tpgresource.ReplaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/global/dnsAuthorizations/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/{{location}}/dnsAuthorizations/{{name}}") if err != nil { return err } @@ -265,6 +304,9 @@ func resourceCertificateManagerDnsAuthorizationRead(d *schema.ResourceData, meta if err := d.Set("domain", flattenCertificateManagerDnsAuthorizationDomain(res["domain"], d, config)); err != nil { return fmt.Errorf("Error reading DnsAuthorization: %s", err) } + if err := d.Set("type", flattenCertificateManagerDnsAuthorizationType(res["type"], d, config)); err != nil { + return fmt.Errorf("Error reading DnsAuthorization: %s", err) + } if err := d.Set("dns_resource_record", flattenCertificateManagerDnsAuthorizationDnsResourceRecord(res["dnsResourceRecord"], d, config)); err != nil { return fmt.Errorf("Error reading DnsAuthorization: %s", err) } @@ -307,7 +349,7 @@ func resourceCertificateManagerDnsAuthorizationUpdate(d *schema.ResourceData, me obj["labels"] = labelsProp } - url, err := tpgresource.ReplaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/global/dnsAuthorizations/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/{{location}}/dnsAuthorizations/{{name}}") if err != nil { return err } @@ -379,7 +421,7 @@ func resourceCertificateManagerDnsAuthorizationDelete(d *schema.ResourceData, me } billingProject = project - url, err := tpgresource.ReplaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/global/dnsAuthorizations/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/{{location}}/dnsAuthorizations/{{name}}") if err != nil { return err } @@ -420,15 +462,15 @@ func resourceCertificateManagerDnsAuthorizationDelete(d *schema.ResourceData, me func resourceCertificateManagerDnsAuthorizationImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { config := meta.(*transport_tpg.Config) if err := tpgresource.ParseImportId([]string{ - "^projects/(?P[^/]+)/locations/global/dnsAuthorizations/(?P[^/]+)$", - "^(?P[^/]+)/(?P[^/]+)$", - "^(?P[^/]+)$", + "^projects/(?P[^/]+)/locations/(?P[^/]+)/dnsAuthorizations/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)$", }, d, config); err != nil { return nil, err } // Replace import id for the resource id - id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/global/dnsAuthorizations/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/dnsAuthorizations/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -460,6 +502,10 @@ func flattenCertificateManagerDnsAuthorizationDomain(v interface{}, d *schema.Re return v } +func flattenCertificateManagerDnsAuthorizationType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenCertificateManagerDnsAuthorizationDnsResourceRecord(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil @@ -516,6 +562,10 @@ func expandCertificateManagerDnsAuthorizationDomain(v interface{}, d tpgresource return v, nil } +func expandCertificateManagerDnsAuthorizationType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandCertificateManagerDnsAuthorizationEffectiveLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil @@ -526,3 +576,95 @@ func expandCertificateManagerDnsAuthorizationEffectiveLabels(v interface{}, d tp } return m, nil } + +func ResourceCertificateManagerDnsAuthorizationUpgradeV0(_ context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) { + log.Printf("[DEBUG] Attributes before migration: %#v", rawState) + // Version 0 didn't support location. Default it to global. + rawState["location"] = "global" + log.Printf("[DEBUG] Attributes after migration: %#v", rawState) + return rawState, nil +} + +func resourceCertificateManagerDnsAuthorizationResourceV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "domain": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `A domain which is being authorized. A DnsAuthorization resource covers a +single domain and its wildcard, e.g. authorization for "example.com" can +be used to issue certificates for "example.com" and "*.example.com".`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Name of the resource; provided by the client when the resource is created. +The name must be 1-64 characters long, and match the regular expression [a-zA-Z][a-zA-Z0-9_-]* which means the first character must be a letter, +and all following characters must be a dash, underscore, letter or digit.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `A human-readable description of the resource.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `Set of label tags associated with the DNS Authorization resource. + +**Note**: This field is non-authoritative, and will only manage the labels present in your configuration. +Please refer to the field 'effective_labels' for all of the labels present on the resource.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "dns_resource_record": { + Type: schema.TypeList, + Computed: true, + Description: `The structure describing the DNS Resource Record that needs to be added +to DNS configuration for the authorization to be usable by +certificate.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "data": { + Type: schema.TypeString, + Computed: true, + Description: `Data of the DNS Resource Record.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `Fully qualified name of the DNS Resource Record. +E.g. '_acme-challenge.example.com'.`, + }, + "type": { + Type: schema.TypeString, + Computed: true, + Description: `Type of the DNS Resource Record.`, + }, + }, + }, + }, + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + Description: `All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "terraform_labels": { + Type: schema.TypeMap, + Computed: true, + Description: `The combination of labels configured directly on the resource + and default labels configured on the provider.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + } + +} diff --git a/google/services/certificatemanager/resource_certificate_manager_dns_authorization_generated_test.go b/google/services/certificatemanager/resource_certificate_manager_dns_authorization_generated_test.go index e1b25407cd7..a5e7f6e2c28 100644 --- a/google/services/certificatemanager/resource_certificate_manager_dns_authorization_generated_test.go +++ b/google/services/certificatemanager/resource_certificate_manager_dns_authorization_generated_test.go @@ -49,7 +49,7 @@ func TestAccCertificateManagerDnsAuthorization_certificateManagerDnsAuthorizatio ResourceName: "google_certificate_manager_dns_authorization.default", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"name", "labels", "terraform_labels"}, + ImportStateVerifyIgnore: []string{"name", "location", "labels", "terraform_labels"}, }, }, }) @@ -59,7 +59,8 @@ func testAccCertificateManagerDnsAuthorization_certificateManagerDnsAuthorizatio return acctest.Nprintf(` resource "google_certificate_manager_dns_authorization" "default" { name = "tf-test-dns-auth%{random_suffix}" - description = "The default dnss" + location = "global" + description = "The default dns" domain = "subdomain%{random_suffix}.hashicorptest.com" } @@ -77,6 +78,43 @@ output "record_data_to_insert" { `, context) } +func TestAccCertificateManagerDnsAuthorization_certificateManagerDnsAuthorizationRegionalExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckCertificateManagerDnsAuthorizationDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCertificateManagerDnsAuthorization_certificateManagerDnsAuthorizationRegionalExample(context), + }, + { + ResourceName: "google_certificate_manager_dns_authorization.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "location", "labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccCertificateManagerDnsAuthorization_certificateManagerDnsAuthorizationRegionalExample(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_certificate_manager_dns_authorization" "default" { + name = "tf-test-dns-auth%{random_suffix}" + location = "us-central1" + description = "reginal dns" + type = "PER_PROJECT_RECORD" + domain = "subdomain%{random_suffix}.hashicorptest.com" +} +`, context) +} + func testAccCheckCertificateManagerDnsAuthorizationDestroyProducer(t *testing.T) func(s *terraform.State) error { return func(s *terraform.State) error { for name, rs := range s.RootModule().Resources { @@ -89,7 +127,7 @@ func testAccCheckCertificateManagerDnsAuthorizationDestroyProducer(t *testing.T) config := acctest.GoogleProviderConfig(t) - url, err := tpgresource.ReplaceVarsForTest(config, rs, "{{CertificateManagerBasePath}}projects/{{project}}/locations/global/dnsAuthorizations/{{name}}") + url, err := tpgresource.ReplaceVarsForTest(config, rs, "{{CertificateManagerBasePath}}projects/{{project}}/locations/{{location}}/dnsAuthorizations/{{name}}") if err != nil { return err } diff --git a/google/services/certificatemanager/resource_certificate_manager_dns_authorization_sweeper.go b/google/services/certificatemanager/resource_certificate_manager_dns_authorization_sweeper.go index 562519e04eb..6211b791872 100644 --- a/google/services/certificatemanager/resource_certificate_manager_dns_authorization_sweeper.go +++ b/google/services/certificatemanager/resource_certificate_manager_dns_authorization_sweeper.go @@ -64,7 +64,7 @@ func testSweepCertificateManagerDnsAuthorization(region string) error { }, } - listTemplate := strings.Split("https://certificatemanager.googleapis.com/v1/projects/{{project}}/locations/global/dnsAuthorizations", "?")[0] + listTemplate := strings.Split("https://certificatemanager.googleapis.com/v1/projects/{{project}}/locations/{{location}}/dnsAuthorizations", "?")[0] listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) if err != nil { log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) @@ -108,7 +108,7 @@ func testSweepCertificateManagerDnsAuthorization(region string) error { continue } - deleteTemplate := "https://certificatemanager.googleapis.com/v1/projects/{{project}}/locations/global/dnsAuthorizations/{{name}}" + deleteTemplate := "https://certificatemanager.googleapis.com/v1/projects/{{project}}/locations/{{location}}/dnsAuthorizations/{{name}}" deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) if err != nil { log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) diff --git a/google/services/certificatemanager/resource_certificate_manager_dns_authorization_upgrade_test.go b/google/services/certificatemanager/resource_certificate_manager_dns_authorization_upgrade_test.go new file mode 100644 index 00000000000..a04bd9089f7 --- /dev/null +++ b/google/services/certificatemanager/resource_certificate_manager_dns_authorization_upgrade_test.go @@ -0,0 +1,80 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package certificatemanager_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +// Tests schema version migration by creating a dns authorization with an old version of the provider (5.15.0) +// and then updating it with the current version the provider. +func TestAccCertificateManagerDnsAuthorization_migration(t *testing.T) { + acctest.SkipIfVcr(t) + t.Parallel() + name := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + + oldVersion := map[string]resource.ExternalProvider{ + "google": { + VersionConstraint: "5.15.0", // a version that doesn't support location yet. + Source: "registry.terraform.io/hashicorp/google", + }, + } + newVersion := map[string]func() (*schema.Provider, error){ + "mynewprovider": func() (*schema.Provider, error) { return acctest.TestAccProviders["google"], nil }, + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + CheckDestroy: testAccCheckCertificateManagerDnsAuthorizationDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: dnsAuthorizationResourceConfig(name), + ExternalProviders: oldVersion, + }, + { + ResourceName: "google_certificate_manager_dns_authorization.default", + ImportState: true, + ImportStateVerifyIgnore: []string{"location"}, + ExternalProviders: oldVersion, + }, + { + Config: dnsAuthorizationResourceConfigUpdated(name), + ProviderFactories: newVersion, + }, + { + ResourceName: "google_certificate_manager_dns_authorization.default", + ImportState: true, + ImportStateVerifyIgnore: []string{"location"}, + ProviderFactories: newVersion, + }, + }, + }) +} + +func dnsAuthorizationResourceConfig(name string) string { + return fmt.Sprintf(` + resource "google_certificate_manager_dns_authorization" "default" { + name = "%s" + description = "The default dns" + domain = "domain.hashicorptest.com" + } + `, name) +} + +func dnsAuthorizationResourceConfigUpdated(name string) string { + return fmt.Sprintf(` + provider "mynewprovider" {} + + resource "google_certificate_manager_dns_authorization" "default" { + provider = mynewprovider + name = "%s" + description = "The migrated default dns" + domain = "domain.hashicorptest.com" + } + `, name) +} diff --git a/google/services/clouddeploy/iam_clouddeploy_custom_target_type.go b/google/services/clouddeploy/iam_clouddeploy_custom_target_type.go new file mode 100644 index 00000000000..ae66bf2047e --- /dev/null +++ b/google/services/clouddeploy/iam_clouddeploy_custom_target_type.go @@ -0,0 +1,245 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package clouddeploy + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var ClouddeployCustomTargetTypeIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "location": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type ClouddeployCustomTargetTypeIamUpdater struct { + project string + location string + name string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func ClouddeployCustomTargetTypeIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + location, _ := tpgresource.GetLocation(d, config) + if location != "" { + if err := d.Set("location", location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + } + values["location"] = location + if v, ok := d.GetOk("name"); ok { + values["name"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/customTargetTypes/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("name").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &ClouddeployCustomTargetTypeIamUpdater{ + project: values["project"], + location: values["location"], + name: values["name"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("location", u.location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + if err := d.Set("name", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + + return u, nil +} + +func ClouddeployCustomTargetTypeIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + values["project"] = project + } + + location, _ := tpgresource.GetLocation(d, config) + if location != "" { + values["location"] = location + } + + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/customTargetTypes/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &ClouddeployCustomTargetTypeIamUpdater{ + project: values["project"], + location: values["location"], + name: values["name"], + d: d, + Config: config, + } + if err := d.Set("name", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *ClouddeployCustomTargetTypeIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyCustomTargetTypeUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *ClouddeployCustomTargetTypeIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyCustomTargetTypeUrl("setIamPolicy") + if err != nil { + return err + } + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *ClouddeployCustomTargetTypeIamUpdater) qualifyCustomTargetTypeUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{ClouddeployBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/customTargetTypes/%s", u.project, u.location, u.name), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *ClouddeployCustomTargetTypeIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/locations/%s/customTargetTypes/%s", u.project, u.location, u.name) +} + +func (u *ClouddeployCustomTargetTypeIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-clouddeploy-customtargettype-%s", u.GetResourceId()) +} + +func (u *ClouddeployCustomTargetTypeIamUpdater) DescribeResource() string { + return fmt.Sprintf("clouddeploy customtargettype %q", u.GetResourceId()) +} diff --git a/google/services/clouddeploy/iam_clouddeploy_custom_target_type_generated_test.go b/google/services/clouddeploy/iam_clouddeploy_custom_target_type_generated_test.go new file mode 100644 index 00000000000..f82ec958d68 --- /dev/null +++ b/google/services/clouddeploy/iam_clouddeploy_custom_target_type_generated_test.go @@ -0,0 +1,291 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package clouddeploy_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" +) + +func TestAccClouddeployCustomTargetTypeIamBindingGenerated(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "role": "roles/viewer", + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccClouddeployCustomTargetTypeIamBinding_basicGenerated(context), + }, + { + ResourceName: "google_clouddeploy_custom_target_type_iam_binding.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/customTargetTypes/%s roles/viewer", envvar.GetTestProjectFromEnv(), envvar.GetTestRegionFromEnv(), fmt.Sprintf("tf-test-my-custom-target-type%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + { + // Test Iam Binding update + Config: testAccClouddeployCustomTargetTypeIamBinding_updateGenerated(context), + }, + { + ResourceName: "google_clouddeploy_custom_target_type_iam_binding.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/customTargetTypes/%s roles/viewer", envvar.GetTestProjectFromEnv(), envvar.GetTestRegionFromEnv(), fmt.Sprintf("tf-test-my-custom-target-type%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccClouddeployCustomTargetTypeIamMemberGenerated(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "role": "roles/viewer", + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + // Test Iam Member creation (no update for member, no need to test) + Config: testAccClouddeployCustomTargetTypeIamMember_basicGenerated(context), + }, + { + ResourceName: "google_clouddeploy_custom_target_type_iam_member.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/customTargetTypes/%s roles/viewer user:admin@hashicorptest.com", envvar.GetTestProjectFromEnv(), envvar.GetTestRegionFromEnv(), fmt.Sprintf("tf-test-my-custom-target-type%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccClouddeployCustomTargetTypeIamPolicyGenerated(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "role": "roles/viewer", + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccClouddeployCustomTargetTypeIamPolicy_basicGenerated(context), + Check: resource.TestCheckResourceAttrSet("data.google_clouddeploy_custom_target_type_iam_policy.foo", "policy_data"), + }, + { + ResourceName: "google_clouddeploy_custom_target_type_iam_policy.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/customTargetTypes/%s", envvar.GetTestProjectFromEnv(), envvar.GetTestRegionFromEnv(), fmt.Sprintf("tf-test-my-custom-target-type%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccClouddeployCustomTargetTypeIamPolicy_emptyBinding(context), + }, + { + ResourceName: "google_clouddeploy_custom_target_type_iam_policy.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/customTargetTypes/%s", envvar.GetTestProjectFromEnv(), envvar.GetTestRegionFromEnv(), fmt.Sprintf("tf-test-my-custom-target-type%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccClouddeployCustomTargetTypeIamMember_basicGenerated(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_clouddeploy_custom_target_type" "custom-target-type" { + location = "us-central1" + name = "tf-test-my-custom-target-type%{random_suffix}" + description = "My custom target type" + annotations = { + my_first_annotation = "example-annotation-1" + my_second_annotation = "example-annotation-2" + } + labels = { + my_first_label = "example-label-1" + my_second_label = "example-label-2" + } + custom_actions { + render_action = "renderAction" + deploy_action = "deployAction" + } +} + +resource "google_clouddeploy_custom_target_type_iam_member" "foo" { + project = google_clouddeploy_custom_target_type.custom-target-type.project + location = google_clouddeploy_custom_target_type.custom-target-type.location + name = google_clouddeploy_custom_target_type.custom-target-type.name + role = "%{role}" + member = "user:admin@hashicorptest.com" +} +`, context) +} + +func testAccClouddeployCustomTargetTypeIamPolicy_basicGenerated(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_clouddeploy_custom_target_type" "custom-target-type" { + location = "us-central1" + name = "tf-test-my-custom-target-type%{random_suffix}" + description = "My custom target type" + annotations = { + my_first_annotation = "example-annotation-1" + my_second_annotation = "example-annotation-2" + } + labels = { + my_first_label = "example-label-1" + my_second_label = "example-label-2" + } + custom_actions { + render_action = "renderAction" + deploy_action = "deployAction" + } +} + +data "google_iam_policy" "foo" { + binding { + role = "%{role}" + members = ["user:admin@hashicorptest.com"] + } +} + +resource "google_clouddeploy_custom_target_type_iam_policy" "foo" { + project = google_clouddeploy_custom_target_type.custom-target-type.project + location = google_clouddeploy_custom_target_type.custom-target-type.location + name = google_clouddeploy_custom_target_type.custom-target-type.name + policy_data = data.google_iam_policy.foo.policy_data +} + +data "google_clouddeploy_custom_target_type_iam_policy" "foo" { + project = google_clouddeploy_custom_target_type.custom-target-type.project + location = google_clouddeploy_custom_target_type.custom-target-type.location + name = google_clouddeploy_custom_target_type.custom-target-type.name + depends_on = [ + google_clouddeploy_custom_target_type_iam_policy.foo + ] +} +`, context) +} + +func testAccClouddeployCustomTargetTypeIamPolicy_emptyBinding(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_clouddeploy_custom_target_type" "custom-target-type" { + location = "us-central1" + name = "tf-test-my-custom-target-type%{random_suffix}" + description = "My custom target type" + annotations = { + my_first_annotation = "example-annotation-1" + my_second_annotation = "example-annotation-2" + } + labels = { + my_first_label = "example-label-1" + my_second_label = "example-label-2" + } + custom_actions { + render_action = "renderAction" + deploy_action = "deployAction" + } +} + +data "google_iam_policy" "foo" { +} + +resource "google_clouddeploy_custom_target_type_iam_policy" "foo" { + project = google_clouddeploy_custom_target_type.custom-target-type.project + location = google_clouddeploy_custom_target_type.custom-target-type.location + name = google_clouddeploy_custom_target_type.custom-target-type.name + policy_data = data.google_iam_policy.foo.policy_data +} +`, context) +} + +func testAccClouddeployCustomTargetTypeIamBinding_basicGenerated(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_clouddeploy_custom_target_type" "custom-target-type" { + location = "us-central1" + name = "tf-test-my-custom-target-type%{random_suffix}" + description = "My custom target type" + annotations = { + my_first_annotation = "example-annotation-1" + my_second_annotation = "example-annotation-2" + } + labels = { + my_first_label = "example-label-1" + my_second_label = "example-label-2" + } + custom_actions { + render_action = "renderAction" + deploy_action = "deployAction" + } +} + +resource "google_clouddeploy_custom_target_type_iam_binding" "foo" { + project = google_clouddeploy_custom_target_type.custom-target-type.project + location = google_clouddeploy_custom_target_type.custom-target-type.location + name = google_clouddeploy_custom_target_type.custom-target-type.name + role = "%{role}" + members = ["user:admin@hashicorptest.com"] +} +`, context) +} + +func testAccClouddeployCustomTargetTypeIamBinding_updateGenerated(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_clouddeploy_custom_target_type" "custom-target-type" { + location = "us-central1" + name = "tf-test-my-custom-target-type%{random_suffix}" + description = "My custom target type" + annotations = { + my_first_annotation = "example-annotation-1" + my_second_annotation = "example-annotation-2" + } + labels = { + my_first_label = "example-label-1" + my_second_label = "example-label-2" + } + custom_actions { + render_action = "renderAction" + deploy_action = "deployAction" + } +} + +resource "google_clouddeploy_custom_target_type_iam_binding" "foo" { + project = google_clouddeploy_custom_target_type.custom-target-type.project + location = google_clouddeploy_custom_target_type.custom-target-type.location + name = google_clouddeploy_custom_target_type.custom-target-type.name + role = "%{role}" + members = ["user:admin@hashicorptest.com", "user:gterraformtest1@gmail.com"] +} +`, context) +} diff --git a/google/services/clouddeploy/iam_clouddeploy_target.go b/google/services/clouddeploy/iam_clouddeploy_target.go new file mode 100644 index 00000000000..0361f1bf958 --- /dev/null +++ b/google/services/clouddeploy/iam_clouddeploy_target.go @@ -0,0 +1,245 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package clouddeploy + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var ClouddeployTargetIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "location": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type ClouddeployTargetIamUpdater struct { + project string + location string + name string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func ClouddeployTargetIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + location, _ := tpgresource.GetLocation(d, config) + if location != "" { + if err := d.Set("location", location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + } + values["location"] = location + if v, ok := d.GetOk("name"); ok { + values["name"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/targets/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("name").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &ClouddeployTargetIamUpdater{ + project: values["project"], + location: values["location"], + name: values["name"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("location", u.location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + if err := d.Set("name", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + + return u, nil +} + +func ClouddeployTargetIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + values["project"] = project + } + + location, _ := tpgresource.GetLocation(d, config) + if location != "" { + values["location"] = location + } + + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/targets/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &ClouddeployTargetIamUpdater{ + project: values["project"], + location: values["location"], + name: values["name"], + d: d, + Config: config, + } + if err := d.Set("name", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *ClouddeployTargetIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyTargetUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *ClouddeployTargetIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyTargetUrl("setIamPolicy") + if err != nil { + return err + } + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *ClouddeployTargetIamUpdater) qualifyTargetUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{ClouddeployBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/targets/%s", u.project, u.location, u.name), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *ClouddeployTargetIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/locations/%s/targets/%s", u.project, u.location, u.name) +} + +func (u *ClouddeployTargetIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-clouddeploy-target-%s", u.GetResourceId()) +} + +func (u *ClouddeployTargetIamUpdater) DescribeResource() string { + return fmt.Sprintf("clouddeploy target %q", u.GetResourceId()) +} diff --git a/google/services/clouddeploy/iam_clouddeploy_target_generated_test.go b/google/services/clouddeploy/iam_clouddeploy_target_generated_test.go new file mode 100644 index 00000000000..9ef6e2e1ce0 --- /dev/null +++ b/google/services/clouddeploy/iam_clouddeploy_target_generated_test.go @@ -0,0 +1,226 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package clouddeploy_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" +) + +func TestAccClouddeployTargetIamBindingGenerated(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "role": "roles/viewer", + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccClouddeployTargetIamBinding_basicGenerated(context), + }, + { + ResourceName: "google_clouddeploy_target_iam_binding.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/targets/%s roles/viewer", envvar.GetTestProjectFromEnv(), envvar.GetTestRegionFromEnv(), fmt.Sprintf("tf-test-cd-target%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + { + // Test Iam Binding update + Config: testAccClouddeployTargetIamBinding_updateGenerated(context), + }, + { + ResourceName: "google_clouddeploy_target_iam_binding.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/targets/%s roles/viewer", envvar.GetTestProjectFromEnv(), envvar.GetTestRegionFromEnv(), fmt.Sprintf("tf-test-cd-target%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccClouddeployTargetIamMemberGenerated(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "role": "roles/viewer", + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + // Test Iam Member creation (no update for member, no need to test) + Config: testAccClouddeployTargetIamMember_basicGenerated(context), + }, + { + ResourceName: "google_clouddeploy_target_iam_member.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/targets/%s roles/viewer user:admin@hashicorptest.com", envvar.GetTestProjectFromEnv(), envvar.GetTestRegionFromEnv(), fmt.Sprintf("tf-test-cd-target%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccClouddeployTargetIamPolicyGenerated(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "role": "roles/viewer", + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccClouddeployTargetIamPolicy_basicGenerated(context), + Check: resource.TestCheckResourceAttrSet("data.google_clouddeploy_target_iam_policy.foo", "policy_data"), + }, + { + ResourceName: "google_clouddeploy_target_iam_policy.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/targets/%s", envvar.GetTestProjectFromEnv(), envvar.GetTestRegionFromEnv(), fmt.Sprintf("tf-test-cd-target%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccClouddeployTargetIamPolicy_emptyBinding(context), + }, + { + ResourceName: "google_clouddeploy_target_iam_policy.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/targets/%s", envvar.GetTestProjectFromEnv(), envvar.GetTestRegionFromEnv(), fmt.Sprintf("tf-test-cd-target%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccClouddeployTargetIamMember_basicGenerated(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_clouddeploy_target" "default" { + name = "tf-test-cd-target%{random_suffix}" + location = "us-central1" + } + +resource "google_clouddeploy_target_iam_member" "foo" { + project = google_clouddeploy_target.default.project + location = google_clouddeploy_target.default.location + name = google_clouddeploy_target.default.name + role = "%{role}" + member = "user:admin@hashicorptest.com" +} +`, context) +} + +func testAccClouddeployTargetIamPolicy_basicGenerated(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_clouddeploy_target" "default" { + name = "tf-test-cd-target%{random_suffix}" + location = "us-central1" + } + +data "google_iam_policy" "foo" { + binding { + role = "%{role}" + members = ["user:admin@hashicorptest.com"] + } +} + +resource "google_clouddeploy_target_iam_policy" "foo" { + project = google_clouddeploy_target.default.project + location = google_clouddeploy_target.default.location + name = google_clouddeploy_target.default.name + policy_data = data.google_iam_policy.foo.policy_data +} + +data "google_clouddeploy_target_iam_policy" "foo" { + project = google_clouddeploy_target.default.project + location = google_clouddeploy_target.default.location + name = google_clouddeploy_target.default.name + depends_on = [ + google_clouddeploy_target_iam_policy.foo + ] +} +`, context) +} + +func testAccClouddeployTargetIamPolicy_emptyBinding(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_clouddeploy_target" "default" { + name = "tf-test-cd-target%{random_suffix}" + location = "us-central1" + } + +data "google_iam_policy" "foo" { +} + +resource "google_clouddeploy_target_iam_policy" "foo" { + project = google_clouddeploy_target.default.project + location = google_clouddeploy_target.default.location + name = google_clouddeploy_target.default.name + policy_data = data.google_iam_policy.foo.policy_data +} +`, context) +} + +func testAccClouddeployTargetIamBinding_basicGenerated(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_clouddeploy_target" "default" { + name = "tf-test-cd-target%{random_suffix}" + location = "us-central1" + } + +resource "google_clouddeploy_target_iam_binding" "foo" { + project = google_clouddeploy_target.default.project + location = google_clouddeploy_target.default.location + name = google_clouddeploy_target.default.name + role = "%{role}" + members = ["user:admin@hashicorptest.com"] +} +`, context) +} + +func testAccClouddeployTargetIamBinding_updateGenerated(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_clouddeploy_target" "default" { + name = "tf-test-cd-target%{random_suffix}" + location = "us-central1" + } + +resource "google_clouddeploy_target_iam_binding" "foo" { + project = google_clouddeploy_target.default.project + location = google_clouddeploy_target.default.location + name = google_clouddeploy_target.default.name + role = "%{role}" + members = ["user:admin@hashicorptest.com", "user:gterraformtest1@gmail.com"] +} +`, context) +} diff --git a/google/services/clouddeploy/resource_clouddeploy_automation.go b/google/services/clouddeploy/resource_clouddeploy_automation.go new file mode 100644 index 00000000000..6a990ed78fc --- /dev/null +++ b/google/services/clouddeploy/resource_clouddeploy_automation.go @@ -0,0 +1,1086 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package clouddeploy + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceClouddeployAutomation() *schema.Resource { + return &schema.Resource{ + Create: resourceClouddeployAutomationCreate, + Read: resourceClouddeployAutomationRead, + Update: resourceClouddeployAutomationUpdate, + Delete: resourceClouddeployAutomationDelete, + + Importer: &schema.ResourceImporter{ + State: resourceClouddeployAutomationImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + tpgresource.SetAnnotationsDiff, + tpgresource.SetLabelsDiff, + tpgresource.DefaultProviderProject, + ), + + Schema: map[string]*schema.Schema{ + "delivery_pipeline": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The delivery_pipeline for the resource`, + }, + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The location for the resource`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Name of the 'Automation'.`, + }, + "rules": { + Type: schema.TypeList, + Required: true, + Description: `Required. List of Automation rules associated with the Automation resource. Must have at least one rule and limited to 250 rules per Delivery Pipeline. Note: the order of the rules here is not the same as the order of execution.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "advance_rollout_rule": { + Type: schema.TypeList, + Optional: true, + Description: `Optional. The 'AdvanceRolloutRule' will automatically advance a successful Rollout.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Required: true, + Description: `Required. ID of the rule. This id must be unique in the 'Automation' resource to which this rule belongs. The format is 'a-z{0,62}'.`, + }, + "source_phases": { + Type: schema.TypeList, + Optional: true, + Description: `Optional. Proceeds only after phase name matched any one in the list. This value must consist of lower-case letters, numbers, and hyphens, start with a letter and end with a letter or a number, and have a max length of 63 characters. In other words, it must match the following regex: '^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$'.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "wait": { + Type: schema.TypeString, + Optional: true, + Description: `Optional. How long to wait after a rollout is finished.`, + }, + }, + }, + }, + "promote_release_rule": { + Type: schema.TypeList, + Optional: true, + Description: `Optional. 'PromoteReleaseRule' will automatically promote a release from the current target to a specified target.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Required: true, + Description: `Required. ID of the rule. This id must be unique in the 'Automation' resource to which this rule belongs. The format is 'a-z{0,62}'.`, + }, + "destination_phase": { + Type: schema.TypeString, + Optional: true, + Description: `Optional. The starting phase of the rollout created by this operation. Default to the first phase.`, + }, + "destination_target_id": { + Type: schema.TypeString, + Optional: true, + Description: `Optional. The ID of the stage in the pipeline to which this 'Release' is deploying. If unspecified, default it to the next stage in the promotion flow. The value of this field could be one of the following: * The last segment of a target name. It only needs the ID to determine if the target is one of the stages in the promotion sequence defined in the pipeline. * "@next", the next target in the promotion sequence.`, + }, + "wait": { + Type: schema.TypeString, + Optional: true, + Description: `Optional. How long the release need to be paused until being promoted to the next target.`, + }, + }, + }, + }, + }, + }, + }, + "selector": { + Type: schema.TypeList, + Required: true, + Description: `Required. Selected resources to which the automation will be applied.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "targets": { + Type: schema.TypeList, + Required: true, + Description: `Contains attributes about a target.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Optional: true, + Description: `ID of the 'Target'. The value of this field could be one of the following: * The last segment of a target name. It only needs the ID to determine which target is being referred to * "*", all targets in a location.`, + }, + "labels": { + Type: schema.TypeMap, + Computed: true, + Optional: true, + Description: `Target labels.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + }, + }, + }, + "service_account": { + Type: schema.TypeString, + Required: true, + Description: `Required. Email address of the user-managed IAM service account that creates Cloud Deploy release and rollout resources.`, + }, + "annotations": { + Type: schema.TypeMap, + Optional: true, + Description: `Optional. User annotations. These attributes can only be set and used by the user, and not by Cloud Deploy. Annotations must meet the following constraints: * Annotations are key/value pairs. * Valid annotation keys have two segments: an optional prefix and name, separated by a slash ('/'). * The name segment is required and must be 63 characters or less, beginning and ending with an alphanumeric character ('[a-z0-9A-Z]') with dashes ('-'), underscores ('_'), dots ('.'), and alphanumerics between. * The prefix is optional. If specified, the prefix must be a DNS subdomain: a series of DNS labels separated by dots('.'), not longer than 253 characters in total, followed by a slash ('/'). See https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/#syntax-and-character-set for more details. + +**Note**: This field is non-authoritative, and will only manage the annotations present in your configuration. +Please refer to the field 'effective_annotations' for all of the annotations present on the resource.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `Optional. Description of the 'Automation'. Max length is 255 characters.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `Optional. Labels are attributes that can be set and used by both the user and by Cloud Deploy. Labels must meet the following constraints: * Keys and values can contain only lowercase letters, numeric characters, underscores, and dashes. * All characters must use UTF-8 encoding, and international characters are allowed. * Keys must start with a lowercase letter or international character. * Each resource is limited to a maximum of 64 labels. Both keys and values are additionally constrained to be <= 63 characters. + +**Note**: This field is non-authoritative, and will only manage the labels present in your configuration. +Please refer to the field 'effective_labels' for all of the labels present on the resource.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "suspended": { + Type: schema.TypeBool, + Optional: true, + Description: `Optional. When Suspended, automation is deactivated from execution.`, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. Time at which the automation was created.`, + }, + "effective_annotations": { + Type: schema.TypeMap, + Computed: true, + Description: `All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + Description: `All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "etag": { + Type: schema.TypeString, + Computed: true, + Description: `Optional. The weak etag of the 'Automation' resource. This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.`, + }, + "terraform_labels": { + Type: schema.TypeMap, + Computed: true, + Description: `The combination of labels configured directly on the resource + and default labels configured on the provider.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "uid": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. Unique identifier of the 'Automation'.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. Time at which the automation was updated.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceClouddeployAutomationCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandClouddeployAutomationDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + suspendedProp, err := expandClouddeployAutomationSuspended(d.Get("suspended"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("suspended"); ok || !reflect.DeepEqual(v, suspendedProp) { + obj["suspended"] = suspendedProp + } + serviceAccountProp, err := expandClouddeployAutomationServiceAccount(d.Get("service_account"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("service_account"); !tpgresource.IsEmptyValue(reflect.ValueOf(serviceAccountProp)) && (ok || !reflect.DeepEqual(v, serviceAccountProp)) { + obj["serviceAccount"] = serviceAccountProp + } + selectorProp, err := expandClouddeployAutomationSelector(d.Get("selector"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("selector"); !tpgresource.IsEmptyValue(reflect.ValueOf(selectorProp)) && (ok || !reflect.DeepEqual(v, selectorProp)) { + obj["selector"] = selectorProp + } + rulesProp, err := expandClouddeployAutomationRules(d.Get("rules"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("rules"); !tpgresource.IsEmptyValue(reflect.ValueOf(rulesProp)) && (ok || !reflect.DeepEqual(v, rulesProp)) { + obj["rules"] = rulesProp + } + annotationsProp, err := expandClouddeployAutomationEffectiveAnnotations(d.Get("effective_annotations"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("effective_annotations"); !tpgresource.IsEmptyValue(reflect.ValueOf(annotationsProp)) && (ok || !reflect.DeepEqual(v, annotationsProp)) { + obj["annotations"] = annotationsProp + } + labelsProp, err := expandClouddeployAutomationEffectiveLabels(d.Get("effective_labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("effective_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ClouddeployBasePath}}projects/{{project}}/locations/{{location}}/deliveryPipelines/{{delivery_pipeline}}/automations?automationId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Automation: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Automation: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Automation: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/deliveryPipelines/{{delivery_pipeline}}/automations/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ClouddeployOperationWaitTime( + config, res, project, "Creating Automation", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create Automation: %s", err) + } + + log.Printf("[DEBUG] Finished creating Automation %q: %#v", d.Id(), res) + + return resourceClouddeployAutomationRead(d, meta) +} + +func resourceClouddeployAutomationRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ClouddeployBasePath}}projects/{{project}}/locations/{{location}}/deliveryPipelines/{{delivery_pipeline}}/automations/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Automation: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ClouddeployAutomation %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Automation: %s", err) + } + + if err := d.Set("uid", flattenClouddeployAutomationUid(res["uid"], d, config)); err != nil { + return fmt.Errorf("Error reading Automation: %s", err) + } + if err := d.Set("description", flattenClouddeployAutomationDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Automation: %s", err) + } + if err := d.Set("create_time", flattenClouddeployAutomationCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Automation: %s", err) + } + if err := d.Set("update_time", flattenClouddeployAutomationUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Automation: %s", err) + } + if err := d.Set("annotations", flattenClouddeployAutomationAnnotations(res["annotations"], d, config)); err != nil { + return fmt.Errorf("Error reading Automation: %s", err) + } + if err := d.Set("labels", flattenClouddeployAutomationLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Automation: %s", err) + } + if err := d.Set("etag", flattenClouddeployAutomationEtag(res["etag"], d, config)); err != nil { + return fmt.Errorf("Error reading Automation: %s", err) + } + if err := d.Set("suspended", flattenClouddeployAutomationSuspended(res["suspended"], d, config)); err != nil { + return fmt.Errorf("Error reading Automation: %s", err) + } + if err := d.Set("service_account", flattenClouddeployAutomationServiceAccount(res["serviceAccount"], d, config)); err != nil { + return fmt.Errorf("Error reading Automation: %s", err) + } + if err := d.Set("selector", flattenClouddeployAutomationSelector(res["selector"], d, config)); err != nil { + return fmt.Errorf("Error reading Automation: %s", err) + } + if err := d.Set("rules", flattenClouddeployAutomationRules(res["rules"], d, config)); err != nil { + return fmt.Errorf("Error reading Automation: %s", err) + } + if err := d.Set("effective_annotations", flattenClouddeployAutomationEffectiveAnnotations(res["annotations"], d, config)); err != nil { + return fmt.Errorf("Error reading Automation: %s", err) + } + if err := d.Set("terraform_labels", flattenClouddeployAutomationTerraformLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Automation: %s", err) + } + if err := d.Set("effective_labels", flattenClouddeployAutomationEffectiveLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Automation: %s", err) + } + + return nil +} + +func resourceClouddeployAutomationUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Automation: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + descriptionProp, err := expandClouddeployAutomationDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + suspendedProp, err := expandClouddeployAutomationSuspended(d.Get("suspended"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("suspended"); ok || !reflect.DeepEqual(v, suspendedProp) { + obj["suspended"] = suspendedProp + } + serviceAccountProp, err := expandClouddeployAutomationServiceAccount(d.Get("service_account"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("service_account"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, serviceAccountProp)) { + obj["serviceAccount"] = serviceAccountProp + } + selectorProp, err := expandClouddeployAutomationSelector(d.Get("selector"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("selector"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, selectorProp)) { + obj["selector"] = selectorProp + } + rulesProp, err := expandClouddeployAutomationRules(d.Get("rules"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("rules"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, rulesProp)) { + obj["rules"] = rulesProp + } + annotationsProp, err := expandClouddeployAutomationEffectiveAnnotations(d.Get("effective_annotations"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("effective_annotations"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, annotationsProp)) { + obj["annotations"] = annotationsProp + } + labelsProp, err := expandClouddeployAutomationEffectiveLabels(d.Get("effective_labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("effective_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ClouddeployBasePath}}projects/{{project}}/locations/{{location}}/deliveryPipelines/{{delivery_pipeline}}/automations/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Automation %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("suspended") { + updateMask = append(updateMask, "suspended") + } + + if d.HasChange("service_account") { + updateMask = append(updateMask, "serviceAccount") + } + + if d.HasChange("selector") { + updateMask = append(updateMask, "selector") + } + + if d.HasChange("rules") { + updateMask = append(updateMask, "rules") + } + + if d.HasChange("effective_annotations") { + updateMask = append(updateMask, "annotations") + } + + if d.HasChange("effective_labels") { + updateMask = append(updateMask, "labels") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + // if updateMask is empty we are not updating anything so skip the post + if len(updateMask) > 0 { + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Automation %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Automation %q: %#v", d.Id(), res) + } + + err = ClouddeployOperationWaitTime( + config, res, project, "Updating Automation", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + } + + return resourceClouddeployAutomationRead(d, meta) +} + +func resourceClouddeployAutomationDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Automation: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{ClouddeployBasePath}}projects/{{project}}/locations/{{location}}/deliveryPipelines/{{delivery_pipeline}}/automations/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Automation %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Automation") + } + + err = ClouddeployOperationWaitTime( + config, res, project, "Deleting Automation", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Automation %q: %#v", d.Id(), res) + return nil +} + +func resourceClouddeployAutomationImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "^projects/(?P[^/]+)/locations/(?P[^/]+)/deliveryPipelines/(?P[^/]+)/automations/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)/(?P[^/]+)$", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/deliveryPipelines/{{delivery_pipeline}}/automations/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenClouddeployAutomationUid(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenClouddeployAutomationDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenClouddeployAutomationCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenClouddeployAutomationUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenClouddeployAutomationAnnotations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + + transformed := make(map[string]interface{}) + if l, ok := d.GetOkExists("annotations"); ok { + for k := range l.(map[string]interface{}) { + transformed[k] = v.(map[string]interface{})[k] + } + } + + return transformed +} + +func flattenClouddeployAutomationLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + + transformed := make(map[string]interface{}) + if l, ok := d.GetOkExists("labels"); ok { + for k := range l.(map[string]interface{}) { + transformed[k] = v.(map[string]interface{})[k] + } + } + + return transformed +} + +func flattenClouddeployAutomationEtag(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenClouddeployAutomationSuspended(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenClouddeployAutomationServiceAccount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenClouddeployAutomationSelector(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["targets"] = + flattenClouddeployAutomationSelectorTargets(original["targets"], d, config) + return []interface{}{transformed} +} +func flattenClouddeployAutomationSelectorTargets(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "id": flattenClouddeployAutomationSelectorTargetsId(original["id"], d, config), + "labels": flattenClouddeployAutomationSelectorTargetsLabels(original["labels"], d, config), + }) + } + return transformed +} +func flattenClouddeployAutomationSelectorTargetsId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenClouddeployAutomationSelectorTargetsLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenClouddeployAutomationRules(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "promote_release_rule": flattenClouddeployAutomationRulesPromoteReleaseRule(original["promoteReleaseRule"], d, config), + "advance_rollout_rule": flattenClouddeployAutomationRulesAdvanceRolloutRule(original["advanceRolloutRule"], d, config), + }) + } + return transformed +} +func flattenClouddeployAutomationRulesPromoteReleaseRule(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["id"] = + flattenClouddeployAutomationRulesPromoteReleaseRuleId(original["id"], d, config) + transformed["wait"] = + flattenClouddeployAutomationRulesPromoteReleaseRuleWait(original["wait"], d, config) + transformed["destination_target_id"] = + flattenClouddeployAutomationRulesPromoteReleaseRuleDestinationTargetId(original["destinationTargetId"], d, config) + transformed["destination_phase"] = + flattenClouddeployAutomationRulesPromoteReleaseRuleDestinationPhase(original["destinationPhase"], d, config) + return []interface{}{transformed} +} +func flattenClouddeployAutomationRulesPromoteReleaseRuleId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenClouddeployAutomationRulesPromoteReleaseRuleWait(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenClouddeployAutomationRulesPromoteReleaseRuleDestinationTargetId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenClouddeployAutomationRulesPromoteReleaseRuleDestinationPhase(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenClouddeployAutomationRulesAdvanceRolloutRule(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["id"] = + flattenClouddeployAutomationRulesAdvanceRolloutRuleId(original["id"], d, config) + transformed["wait"] = + flattenClouddeployAutomationRulesAdvanceRolloutRuleWait(original["wait"], d, config) + transformed["source_phases"] = + flattenClouddeployAutomationRulesAdvanceRolloutRuleSourcePhases(original["sourcePhases"], d, config) + return []interface{}{transformed} +} +func flattenClouddeployAutomationRulesAdvanceRolloutRuleId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenClouddeployAutomationRulesAdvanceRolloutRuleWait(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenClouddeployAutomationRulesAdvanceRolloutRuleSourcePhases(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenClouddeployAutomationEffectiveAnnotations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenClouddeployAutomationTerraformLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + + transformed := make(map[string]interface{}) + if l, ok := d.GetOkExists("terraform_labels"); ok { + for k := range l.(map[string]interface{}) { + transformed[k] = v.(map[string]interface{})[k] + } + } + + return transformed +} + +func flattenClouddeployAutomationEffectiveLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandClouddeployAutomationDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandClouddeployAutomationSuspended(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandClouddeployAutomationServiceAccount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandClouddeployAutomationSelector(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedTargets, err := expandClouddeployAutomationSelectorTargets(original["targets"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTargets); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["targets"] = transformedTargets + } + + return transformed, nil +} + +func expandClouddeployAutomationSelectorTargets(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedId, err := expandClouddeployAutomationSelectorTargetsId(original["id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["id"] = transformedId + } + + transformedLabels, err := expandClouddeployAutomationSelectorTargetsLabels(original["labels"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLabels); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["labels"] = transformedLabels + } + + req = append(req, transformed) + } + return req, nil +} + +func expandClouddeployAutomationSelectorTargetsId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandClouddeployAutomationSelectorTargetsLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandClouddeployAutomationRules(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPromoteReleaseRule, err := expandClouddeployAutomationRulesPromoteReleaseRule(original["promote_release_rule"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPromoteReleaseRule); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["promoteReleaseRule"] = transformedPromoteReleaseRule + } + + transformedAdvanceRolloutRule, err := expandClouddeployAutomationRulesAdvanceRolloutRule(original["advance_rollout_rule"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAdvanceRolloutRule); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["advanceRolloutRule"] = transformedAdvanceRolloutRule + } + + req = append(req, transformed) + } + return req, nil +} + +func expandClouddeployAutomationRulesPromoteReleaseRule(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedId, err := expandClouddeployAutomationRulesPromoteReleaseRuleId(original["id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["id"] = transformedId + } + + transformedWait, err := expandClouddeployAutomationRulesPromoteReleaseRuleWait(original["wait"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWait); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["wait"] = transformedWait + } + + transformedDestinationTargetId, err := expandClouddeployAutomationRulesPromoteReleaseRuleDestinationTargetId(original["destination_target_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDestinationTargetId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["destinationTargetId"] = transformedDestinationTargetId + } + + transformedDestinationPhase, err := expandClouddeployAutomationRulesPromoteReleaseRuleDestinationPhase(original["destination_phase"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDestinationPhase); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["destinationPhase"] = transformedDestinationPhase + } + + return transformed, nil +} + +func expandClouddeployAutomationRulesPromoteReleaseRuleId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandClouddeployAutomationRulesPromoteReleaseRuleWait(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandClouddeployAutomationRulesPromoteReleaseRuleDestinationTargetId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandClouddeployAutomationRulesPromoteReleaseRuleDestinationPhase(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandClouddeployAutomationRulesAdvanceRolloutRule(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedId, err := expandClouddeployAutomationRulesAdvanceRolloutRuleId(original["id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["id"] = transformedId + } + + transformedWait, err := expandClouddeployAutomationRulesAdvanceRolloutRuleWait(original["wait"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWait); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["wait"] = transformedWait + } + + transformedSourcePhases, err := expandClouddeployAutomationRulesAdvanceRolloutRuleSourcePhases(original["source_phases"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSourcePhases); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sourcePhases"] = transformedSourcePhases + } + + return transformed, nil +} + +func expandClouddeployAutomationRulesAdvanceRolloutRuleId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandClouddeployAutomationRulesAdvanceRolloutRuleWait(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandClouddeployAutomationRulesAdvanceRolloutRuleSourcePhases(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandClouddeployAutomationEffectiveAnnotations(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandClouddeployAutomationEffectiveLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} diff --git a/google/services/clouddeploy/resource_clouddeploy_automation_generated_test.go b/google/services/clouddeploy/resource_clouddeploy_automation_generated_test.go new file mode 100644 index 00000000000..b88175d4cb0 --- /dev/null +++ b/google/services/clouddeploy/resource_clouddeploy_automation_generated_test.go @@ -0,0 +1,212 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package clouddeploy_test + +import ( + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func TestAccClouddeployAutomation_clouddeployAutomationBasicExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "service_account": envvar.GetTestServiceAccountFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckClouddeployAutomationDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccClouddeployAutomation_clouddeployAutomationBasicExample(context), + }, + { + ResourceName: "google_clouddeploy_automation.b-automation", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "location", "delivery_pipeline", "annotations", "labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccClouddeployAutomation_clouddeployAutomationBasicExample(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_clouddeploy_automation" "b-automation" { + name = "tf-test-cd-automation%{random_suffix}" + project = google_clouddeploy_delivery_pipeline.pipeline.project + location = google_clouddeploy_delivery_pipeline.pipeline.location + delivery_pipeline = google_clouddeploy_delivery_pipeline.pipeline.name + service_account = "%{service_account}" + selector { + targets { + id = "*" + } + } + suspended = false + rules { + promote_release_rule { + id = "promote-release" + } + } +} + +resource "google_clouddeploy_delivery_pipeline" "pipeline" { + name = "tf-test-cd-pipeline%{random_suffix}" + location = "us-central1" + serial_pipeline { + stages { + target_id = "test" + profiles = [] + } + } + } +`, context) +} + +func TestAccClouddeployAutomation_clouddeployAutomationFullExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "service_account": envvar.GetTestServiceAccountFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckClouddeployAutomationDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccClouddeployAutomation_clouddeployAutomationFullExample(context), + }, + { + ResourceName: "google_clouddeploy_automation.f-automation", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "location", "delivery_pipeline", "annotations", "labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccClouddeployAutomation_clouddeployAutomationFullExample(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_clouddeploy_automation" "f-automation" { + name = "tf-test-cd-automation%{random_suffix}" + location = "us-central1" + delivery_pipeline = google_clouddeploy_delivery_pipeline.pipeline.name + service_account = "%{service_account}" + annotations = { + my_first_annotation = "example-annotation-1" + my_second_annotation = "example-annotation-2" + } + labels = { + my_first_label = "example-label-1" + my_second_label = "example-label-2" + } + description = "automation resource" + selector { + targets { + id = "test" + labels = { + foo = "bar" + } + } + } + suspended = true + rules { + promote_release_rule{ + id = "promote-release" + wait = "200s" + destination_target_id = "@next" + destination_phase = "stable" + } + } + rules { + advance_rollout_rule { + id = "advance-rollout" + source_phases = ["deploy"] + wait = "200s" + } + } +} + +resource "google_clouddeploy_delivery_pipeline" "pipeline" { + name = "tf-test-cd-pipeline%{random_suffix}" + location = "us-central1" + serial_pipeline { + stages { + target_id = "test" + profiles = ["test-profile"] + } + } +} +`, context) +} + +func testAccCheckClouddeployAutomationDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "google_clouddeploy_automation" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + url, err := tpgresource.ReplaceVarsForTest(config, rs, "{{ClouddeployBasePath}}projects/{{project}}/locations/{{location}}/deliveryPipelines/{{delivery_pipeline}}/automations/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: config.UserAgent, + }) + if err == nil { + return fmt.Errorf("ClouddeployAutomation still exists at %s", url) + } + } + + return nil + } +} diff --git a/google/services/clouddeploy/resource_clouddeploy_automation_sweeper.go b/google/services/clouddeploy/resource_clouddeploy_automation_sweeper.go new file mode 100644 index 00000000000..4fd400d26f7 --- /dev/null +++ b/google/services/clouddeploy/resource_clouddeploy_automation_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package clouddeploy + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ClouddeployAutomation", testSweepClouddeployAutomation) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepClouddeployAutomation(region string) error { + resourceName := "ClouddeployAutomation" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://clouddeploy.googleapis.com/v1/projects/{{project}}/locations/{{location}}/deliveryPipelines/{{delivery_pipeline}}/automations", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["automations"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://clouddeploy.googleapis.com/v1/projects/{{project}}/locations/{{location}}/deliveryPipelines/{{delivery_pipeline}}/automations/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/google/services/clouddeploy/resource_clouddeploy_automation_test.go b/google/services/clouddeploy/resource_clouddeploy_automation_test.go index 4ee3d902428..8402bbb227a 100644 --- a/google/services/clouddeploy/resource_clouddeploy_automation_test.go +++ b/google/services/clouddeploy/resource_clouddeploy_automation_test.go @@ -1,3 +1,138 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 package clouddeploy_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" +) + +func TestAccClouddeployAutomation_update(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "service_account": envvar.GetTestServiceAccountFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckClouddeployAutomationDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccClouddeployAutomation_basic(context), + }, + { + ResourceName: "google_clouddeploy_automation.automation", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "delivery_pipeline", "annotations", "labels", "terraform_labels"}, + }, + { + Config: testAccClouddeployAutomation_update(context), + }, + { + ResourceName: "google_clouddeploy_automation.automation", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "delivery_pipeline", "annotations", "labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccClouddeployAutomation_basic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_clouddeploy_automation" "automation" { + name = "tf-test-cd-automation%{random_suffix}" + location = "us-central1" + delivery_pipeline = google_clouddeploy_delivery_pipeline.pipeline.name + service_account = "%{service_account}" + selector { + targets { + id = "*" + labels = {} + } + } + rules { + advance_rollout_rule { + id = "advance-rollout" + source_phases = ["deploy"] + wait = "200s" + } + } +} + +resource "google_clouddeploy_delivery_pipeline" "pipeline" { + name = "tf-test-cd-pipeline%{random_suffix}" + location = "us-central1" + serial_pipeline { + stages { + target_id = "test" + profiles = ["test-profile"] + } + } + } +`, context) +} + +func testAccClouddeployAutomation_update(context map[string]interface{}) string { + return acctest.Nprintf(` + +resource "google_clouddeploy_automation" "automation" { + name = "tf-test-cd-automation%{random_suffix}" + location = "us-central1" + delivery_pipeline = google_clouddeploy_delivery_pipeline.pipeline.name + service_account = "%{service_account}" + annotations = { + first_annotation = "example-annotation-1" + second_annotation = "example-annotation-2" + } + labels = { + first_label = "example-label-1" + second_label = "example-label-2" + } + description = "automation resource" + selector { + targets { + id = "dev" + labels = { + foo = "bar2" + } + } + } + suspended = true + rules { + advance_rollout_rule { + id = "advance-rollout" + source_phases = ["verify"] + wait = "100s" + } + } + rules { + promote_release_rule{ + id = "promote-release" + wait = "200s" + destination_target_id = "@next" + destination_phase = "stable" + } + } +} + +resource "google_clouddeploy_delivery_pipeline" "pipeline" { + name = "tf-test-cd-pipeline%{random_suffix}" + location = "us-central1" + serial_pipeline { + stages { + target_id = "test" + profiles = ["test-profile"] + } + } + } +`, context) +} diff --git a/google/services/cloudfunctions/resource_cloudfunctions_function.go b/google/services/cloudfunctions/resource_cloudfunctions_function.go index 0116097c74d..fbede76589a 100644 --- a/google/services/cloudfunctions/resource_cloudfunctions_function.go +++ b/google/services/cloudfunctions/resource_cloudfunctions_function.go @@ -203,7 +203,7 @@ func ResourceCloudFunctionsFunction() *schema.Resource { Type: schema.TypeString, Optional: true, Computed: true, - Description: `Docker Registry to use for storing the function's Docker images. Allowed values are CONTAINER_REGISTRY (default) and ARTIFACT_REGISTRY.`, + Description: `Docker Registry to use for storing the function's Docker images. Allowed values are ARTIFACT_REGISTRY (default) and CONTAINER_REGISTRY.`, }, "docker_repository": { diff --git a/google/services/cloudfunctions/resource_cloudfunctions_function_test.go b/google/services/cloudfunctions/resource_cloudfunctions_function_test.go index 6a7363ba8c4..bbda1fd6e47 100644 --- a/google/services/cloudfunctions/resource_cloudfunctions_function_test.go +++ b/google/services/cloudfunctions/resource_cloudfunctions_function_test.go @@ -53,7 +53,7 @@ func TestAccCloudFunctionsFunction_basic(t *testing.T) { resource.TestCheckResourceAttr(funcResourceName, "description", "test function"), resource.TestCheckResourceAttr(funcResourceName, - "docker_registry", "CONTAINER_REGISTRY"), + "docker_registry", "ARTIFACT_REGISTRY"), resource.TestCheckResourceAttr(funcResourceName, "available_memory_mb", "128"), resource.TestCheckResourceAttr(funcResourceName, @@ -599,7 +599,7 @@ resource "google_cloudfunctions_function" "function" { name = "%s" runtime = "nodejs10" description = "test function" - docker_registry = "CONTAINER_REGISTRY" + docker_registry = "ARTIFACT_REGISTRY" available_memory_mb = 128 source_archive_bucket = google_storage_bucket.bucket.name source_archive_object = google_storage_bucket_object.archive.name @@ -702,7 +702,7 @@ resource "google_cloudfunctions_function" "function" { name = "%[3]s" runtime = "nodejs10" description = "test function" - docker_registry = "CONTAINER_REGISTRY" + docker_registry = "ARTIFACT_REGISTRY" available_memory_mb = 128 source_archive_bucket = google_storage_bucket.bucket.name source_archive_object = google_storage_bucket_object.archive.name diff --git a/google/services/cloudrunv2/resource_cloud_run_v2_service.go b/google/services/cloudrunv2/resource_cloud_run_v2_service.go index e87adc747bd..b359768cf21 100644 --- a/google/services/cloudrunv2/resource_cloud_run_v2_service.go +++ b/google/services/cloudrunv2/resource_cloud_run_v2_service.go @@ -326,9 +326,10 @@ If omitted, a port number will be chosen and passed to the container through the Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "cpu_idle": { - Type: schema.TypeBool, - Optional: true, - Description: `Determines whether CPU should be throttled or not outside of requests.`, + Type: schema.TypeBool, + Optional: true, + Description: `Determines whether CPU is only allocated during requests. True by default if the parent 'resources' field is not set. However, if +'resources' is set, this field must be explicitly set to true to preserve the default behavior.`, }, "limits": { Type: schema.TypeMap, diff --git a/google/services/composer/resource_composer_environment.go b/google/services/composer/resource_composer_environment.go index c753ae7553d..0a5be2b96c1 100644 --- a/google/services/composer/resource_composer_environment.go +++ b/google/services/composer/resource_composer_environment.go @@ -3,6 +3,7 @@ package composer import ( + "context" "fmt" "log" "regexp" @@ -232,7 +233,7 @@ func ResourceComposerEnvironment() *schema.Resource { Optional: true, ForceNew: true, DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, - Description: `The Compute Engine subnetwork to be used for machine communications, , specified as a self-link, relative resource name (e.g. "projects/{project}/regions/{region}/subnetworks/{subnetwork}"), or by name. If subnetwork is provided, network must also be provided and the subnetwork must belong to the enclosing environment's project and region.`, + Description: `The Compute Engine subnetwork to be used for machine communications, specified as a self-link, relative resource name (e.g. "projects/{project}/regions/{region}/subnetworks/{subnetwork}"), or by name. If subnetwork is provided, network must also be provided and the subnetwork must belong to the enclosing environment's project and region.`, }, "disk_size_gb": { Type: schema.TypeInt, @@ -1394,7 +1395,10 @@ func flattenComposerEnvironmentConfig(envCfg *composer.EnvironmentConfig) interf transformed["airflow_uri"] = envCfg.AirflowUri transformed["node_config"] = flattenComposerEnvironmentConfigNodeConfig(envCfg.NodeConfig) transformed["software_config"] = flattenComposerEnvironmentConfigSoftwareConfig(envCfg.SoftwareConfig) - transformed["private_environment_config"] = flattenComposerEnvironmentConfigPrivateEnvironmentConfig(envCfg.PrivateEnvironmentConfig) + imageVersion := envCfg.SoftwareConfig.ImageVersion + if !isComposer3(imageVersion) { + transformed["private_environment_config"] = flattenComposerEnvironmentConfigPrivateEnvironmentConfig(envCfg.PrivateEnvironmentConfig) + } transformed["web_server_network_access_control"] = flattenComposerEnvironmentConfigWebServerNetworkAccessControl(envCfg.WebServerNetworkAccessControl) transformed["database_config"] = flattenComposerEnvironmentConfigDatabaseConfig(envCfg.DatabaseConfig) transformed["web_server_config"] = flattenComposerEnvironmentConfigWebServerConfig(envCfg.WebServerConfig) @@ -2169,6 +2173,7 @@ func expandComposerEnvironmentConfigNodeConfig(v interface{}, d *schema.Resource } transformed.Subnetwork = transformedSubnetwork } + transformedIPAllocationPolicy, err := expandComposerEnvironmentIPAllocationPolicy(original["ip_allocation_policy"], d, config) if err != nil { return nil, err @@ -2608,7 +2613,59 @@ func versionsEqual(old, new string) (bool, error) { return o.Equal(n), nil } -func isComposer3(d *schema.ResourceData, config *transport_tpg.Config) bool { - image_version := d.Get("config.0.software_config.0.image_version").(string) - return strings.Contains(image_version, "composer-3") +func isComposer3(imageVersion string) bool { + return strings.Contains(imageVersion, "composer-3") +} + +func forceNewCustomDiff(key string) customdiff.ResourceConditionFunc { + return func(ctx context.Context, d *schema.ResourceDiff, meta interface{}) bool { + old, new := d.GetChange(key) + imageVersion := d.Get("config.0.software_config.0.image_version").(string) + if isComposer3(imageVersion) || tpgresource.CompareSelfLinkRelativePaths("", old.(string), new.(string), nil) { + return false + } + return true + } +} + +func imageVersionChangeValidationFunc(ctx context.Context, old, new, meta any) error { + if old.(string) != "" && !isComposer3(old.(string)) && isComposer3(new.(string)) { + return fmt.Errorf("upgrade to composer 3 is not yet supported") + } + return nil +} + +func validateComposer3FieldUsage(d *schema.ResourceDiff, key string, requireComposer3 bool) error { + _, ok := d.GetOk(key) + imageVersion := d.Get("config.0.software_config.0.image_version").(string) + if ok && (isComposer3(imageVersion) != requireComposer3) { + if requireComposer3 { + return fmt.Errorf("error in configuration, %s should only be used in Composer 3", key) + } else { + return fmt.Errorf("error in configuration, %s should not be used in Composer 3", key) + } + } + return nil +} + +func versionValidationCustomizeDiffFunc(ctx context.Context, d *schema.ResourceDiff, meta interface{}) error { + composer3FieldUsagePolicy := map[string]bool{ + "config.0.node_config.0.max_pods_per_node": false, // not allowed in composer 3 + "config.0.node_config.0.enable_ip_masq_agent": false, + "config.0.node_config.0.config.0.node_config.0.ip_allocation_policy": false, + "config.0.private_environment_config": false, + "config.0.master_authorized_networks_config": false, + "config.0.node_config.0.composer_network_attachment": true, // allowed only in composer 3 + "config.0.node_config.0.composer_internal_ipv4_cidr_block": true, + "config.0.software_config.0.web_server_plugins_mode": true, + "config.0.enable_private_environment": true, + "config.0.enable_private_builds_only": true, + "config.0.workloads_config.0.dag_processor": true, + } + for key, allowed := range composer3FieldUsagePolicy { + if err := validateComposer3FieldUsage(d, key, allowed); err != nil { + return err + } + } + return nil } diff --git a/google/services/composer/resource_composer_environment_test.go b/google/services/composer/resource_composer_environment_test.go index 06e4c3aa38e..2ecf522aa5f 100644 --- a/google/services/composer/resource_composer_environment_test.go +++ b/google/services/composer/resource_composer_environment_test.go @@ -22,6 +22,7 @@ import ( const testComposerEnvironmentPrefix = "tf-test-composer-env" const testComposerNetworkPrefix = "tf-test-composer-net" const testComposerBucketPrefix = "tf-test-composer-bucket" +const testComposerNetworkAttachmentPrefix = "tf-test-composer-nta" func allComposerServiceAgents() []string { return []string{ diff --git a/google/services/compute/resource_compute_instance_group_manager.go b/google/services/compute/resource_compute_instance_group_manager.go index dd8800f1e31..a394898a5ce 100644 --- a/google/services/compute/resource_compute_instance_group_manager.go +++ b/google/services/compute/resource_compute_instance_group_manager.go @@ -294,6 +294,13 @@ func ResourceComputeInstanceGroupManager() *schema.Resource { Description: `The instance lifecycle policy for this managed instance group.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "default_action_on_failure": { + Type: schema.TypeString, + Default: "REPAIR", + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"REPAIR", "DO_NOTHING"}, true), + Description: `Default behavior for all instance or health check failures.`, + }, "force_update_on_repair": { Type: schema.TypeString, Default: "NO", @@ -305,6 +312,30 @@ func ResourceComputeInstanceGroupManager() *schema.Resource { }, }, + "all_instances_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Specifies configuration that overrides the instance template configuration for the group.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "metadata": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: `The metadata key-value pairs that you want to patch onto the instance. For more information, see Project and instance metadata,`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: `The label key-value pairs that you want to patch onto the instance,`, + }, + }, + }, + }, "wait_for_instances": { Type: schema.TypeBool, Optional: true, @@ -316,8 +347,7 @@ func ResourceComputeInstanceGroupManager() *schema.Resource { Optional: true, Default: "STABLE", ValidateFunc: validation.StringInSlice([]string{"STABLE", "UPDATED"}, false), - - Description: `When used with wait_for_instances specifies the status to wait for. When STABLE is specified this resource will wait until the instances are stable before returning. When UPDATED is set, it will wait for the version target to be reached and any per instance configs to be effective as well as all instances to be stable before returning.`, + Description: `When used with wait_for_instances specifies the status to wait for. When STABLE is specified this resource will wait until the instances are stable before returning. When UPDATED is set, it will wait for the version target to be reached and any per instance configs to be effective and all instances configs to be effective as well as all instances to be stable before returning.`, }, "stateful_internal_ip": { Type: schema.TypeList, @@ -413,6 +443,20 @@ func ResourceComputeInstanceGroupManager() *schema.Resource { }, }, }, + "all_instances_config": { + Type: schema.TypeList, + Computed: true, + Description: `Status of all-instances configuration on the group.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "effective": { + Type: schema.TypeBool, + Computed: true, + Description: `A bit indicating whether this configuration has been applied to all managed instances in the group.`, + }, + }, + }, + }, "stateful": { Type: schema.TypeList, Computed: true, @@ -532,6 +576,7 @@ func resourceComputeInstanceGroupManagerCreate(d *schema.ResourceData, meta inte Versions: expandVersions(d.Get("version").([]interface{})), UpdatePolicy: expandUpdatePolicy(d.Get("update_policy").([]interface{})), InstanceLifecyclePolicy: expandInstanceLifecyclePolicy(d.Get("instance_lifecycle_policy").([]interface{})), + AllInstancesConfig: expandAllInstancesConfig(nil, d.Get("all_instances_config").([]interface{})), StatefulPolicy: expandStatefulPolicy(d), // Force send TargetSize to allow a value of 0. @@ -755,6 +800,11 @@ func resourceComputeInstanceGroupManagerRead(d *schema.ResourceData, meta interf if err = d.Set("instance_lifecycle_policy", flattenInstanceLifecyclePolicy(manager.InstanceLifecyclePolicy)); err != nil { return fmt.Errorf("Error setting instance lifecycle policy in state: %s", err.Error()) } + if manager.AllInstancesConfig != nil { + if err = d.Set("all_instances_config", flattenAllInstancesConfig(manager.AllInstancesConfig)); err != nil { + return fmt.Errorf("Error setting all_instances_config in state: %s", err.Error()) + } + } if err = d.Set("status", flattenStatus(manager.Status)); err != nil { return fmt.Errorf("Error setting status in state: %s", err.Error()) } @@ -825,6 +875,16 @@ func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta inte change = true } + if d.HasChange("all_instances_config") { + oldAic, newAic := d.GetChange("all_instances_config") + if newAic == nil || len(newAic.([]interface{})) == 0 { + updatedManager.NullFields = append(updatedManager.NullFields, "AllInstancesConfig") + } else { + updatedManager.AllInstancesConfig = expandAllInstancesConfig(oldAic.([]interface{}), newAic.([]interface{})) + } + change = true + } + if d.HasChange("stateful_internal_ip") || d.HasChange("stateful_external_ip") || d.HasChange("stateful_disk") { updatedManager.StatefulPolicy = expandStatefulPolicy(d) change = true @@ -966,7 +1026,7 @@ func resourceComputeInstanceGroupManagerDelete(d *schema.ResourceData, meta inte func computeIGMWaitForInstanceStatus(d *schema.ResourceData, meta interface{}) error { waitForUpdates := d.Get("wait_for_instances_status").(string) == "UPDATED" conf := resource.StateChangeConf{ - Pending: []string{"creating", "error", "updating per instance configs", "reaching version target"}, + Pending: []string{"creating", "error", "updating per instance configs", "reaching version target", "updating all instances config"}, Target: []string{"created"}, Refresh: waitForInstancesRefreshFunc(getManager, waitForUpdates, d, meta), Timeout: d.Timeout(schema.TimeoutCreate), @@ -1124,6 +1184,7 @@ func expandInstanceLifecyclePolicy(configured []interface{}) *compute.InstanceGr for _, raw := range configured { data := raw.(map[string]interface{}) instanceLifecyclePolicy.ForceUpdateOnRepair = data["force_update_on_repair"].(string) + instanceLifecyclePolicy.DefaultActionOnFailure = data["default_action_on_failure"].(string) } return instanceLifecyclePolicy } @@ -1305,11 +1366,68 @@ func flattenInstanceLifecyclePolicy(instanceLifecyclePolicy *compute.InstanceGro if instanceLifecyclePolicy != nil { ilp := map[string]interface{}{} ilp["force_update_on_repair"] = instanceLifecyclePolicy.ForceUpdateOnRepair + ilp["default_action_on_failure"] = instanceLifecyclePolicy.DefaultActionOnFailure results = append(results, ilp) } return results } +func expandAllInstancesConfig(old []interface{}, new []interface{}) *compute.InstanceGroupManagerAllInstancesConfig { + var properties *compute.InstancePropertiesPatch + for _, raw := range new { + properties = &compute.InstancePropertiesPatch{} + if raw != nil { + data := raw.(map[string]interface{}) + properties.Metadata = tpgresource.ConvertStringMap(data["metadata"].(map[string]interface{})) + if len(properties.Metadata) == 0 { + properties.NullFields = append(properties.NullFields, "Metadata") + } + properties.Labels = tpgresource.ConvertStringMap(data["labels"].(map[string]interface{})) + if len(properties.Labels) == 0 { + properties.NullFields = append(properties.NullFields, "Labels") + } + } + } + + if properties != nil { + for _, raw := range old { + if raw != nil { + data := raw.(map[string]interface{}) + for k := range data["metadata"].(map[string]interface{}) { + if _, exist := properties.Metadata[k]; !exist { + properties.NullFields = append(properties.NullFields, fmt.Sprintf("Metadata.%s", k)) + } + } + for k := range data["labels"].(map[string]interface{}) { + if _, exist := properties.Labels[k]; !exist { + properties.NullFields = append(properties.NullFields, fmt.Sprintf("Labels.%s", k)) + } + } + } + } + } + if properties != nil { + allInstancesConfig := &compute.InstanceGroupManagerAllInstancesConfig{} + allInstancesConfig.Properties = properties + return allInstancesConfig + } else { + return nil + } +} + +func flattenAllInstancesConfig(allInstancesConfig *compute.InstanceGroupManagerAllInstancesConfig) []map[string]interface{} { + results := []map[string]interface{}{} + props := map[string]interface{}{} + if len(allInstancesConfig.Properties.Metadata) > 0 { + props["metadata"] = allInstancesConfig.Properties.Metadata + } + if len(allInstancesConfig.Properties.Labels) > 0 { + props["labels"] = allInstancesConfig.Properties.Labels + } + results = append(results, props) + return results +} + func flattenStatus(status *compute.InstanceGroupManagerStatus) []map[string]interface{} { results := []map[string]interface{}{} data := map[string]interface{}{ @@ -1317,6 +1435,9 @@ func flattenStatus(status *compute.InstanceGroupManagerStatus) []map[string]inte "stateful": flattenStatusStateful(status.Stateful), "version_target": flattenStatusVersionTarget(status.VersionTarget), } + if status.AllInstancesConfig != nil { + data["all_instances_config"] = flattenStatusAllInstancesConfig(status.AllInstancesConfig) + } results = append(results, data) return results } @@ -1349,6 +1470,15 @@ func flattenStatusVersionTarget(versionTarget *compute.InstanceGroupManagerStatu return results } +func flattenStatusAllInstancesConfig(allInstancesConfig *compute.InstanceGroupManagerStatusAllInstancesConfig) []map[string]interface{} { + results := []map[string]interface{}{} + data := map[string]interface{}{ + "effective": allInstancesConfig.Effective, + } + results = append(results, data) + return results +} + func resourceInstanceGroupManagerStateImporter(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { if err := d.Set("wait_for_instances", false); err != nil { return nil, fmt.Errorf("Error setting wait_for_instances: %s", err) diff --git a/google/services/compute/resource_compute_instance_group_manager_test.go b/google/services/compute/resource_compute_instance_group_manager_test.go index d5c92163076..00b483ec606 100644 --- a/google/services/compute/resource_compute_instance_group_manager_test.go +++ b/google/services/compute/resource_compute_instance_group_manager_test.go @@ -117,6 +117,9 @@ func TestAccInstanceGroupManager_update(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccInstanceGroupManager_update(template1, target1, description, igm), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_compute_instance_group_manager.igm-update", "instance_lifecycle_policy.0.default_action_on_failure", "DO_NOTHING"), + ), }, { ResourceName: "google_compute_instance_group_manager.igm-update", @@ -126,6 +129,9 @@ func TestAccInstanceGroupManager_update(t *testing.T) { }, { Config: testAccInstanceGroupManager_update2(template1, target1, target2, template2, description, igm), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_compute_instance_group_manager.igm-update", "instance_lifecycle_policy.0.default_action_on_failure", "REPAIR"), + ), }, { ResourceName: "google_compute_instance_group_manager.igm-update", @@ -135,6 +141,9 @@ func TestAccInstanceGroupManager_update(t *testing.T) { }, { Config: testAccInstanceGroupManager_update3(template1, target1, target2, template2, description2, igm), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_compute_instance_group_manager.igm-update", "instance_lifecycle_policy.0.default_action_on_failure", "REPAIR"), + ), }, { ResourceName: "google_compute_instance_group_manager.igm-update", @@ -661,9 +670,18 @@ resource "google_compute_instance_group_manager" "igm-update" { name = "customhttp" port = 8080 } + all_instances_config { + metadata = { + foo = "bar" + } + labels = { + doo = "dad" + } + } instance_lifecycle_policy { force_update_on_repair = "YES" + default_action_on_failure = "DO_NOTHING" } } `, template, target, description, igm) @@ -757,9 +775,18 @@ resource "google_compute_instance_group_manager" "igm-update" { port = 8443 } + all_instances_config { + metadata = { + doo = "dad" + } + labels = { + foo = "bar" + } + } instance_lifecycle_policy { force_update_on_repair = "NO" + default_action_on_failure = "REPAIR" } } `, template1, target1, target2, template2, description, igm) @@ -1758,8 +1785,17 @@ resource "google_compute_instance_group_manager" "igm-basic" { max_surge_fixed = 0 max_unavailable_percent = 50 } + all_instances_config { + metadata = { + doo = "dad" + } + labels = { + foo = "bar" + } + } instance_lifecycle_policy { force_update_on_repair = "YES" + default_action_on_failure = "REPAIR" } wait_for_instances = true wait_for_instances_status = "UPDATED" diff --git a/google/services/compute/resource_compute_region_autoscaler.go b/google/services/compute/resource_compute_region_autoscaler.go index 7e9ee486f9d..ccd8566d750 100644 --- a/google/services/compute/resource_compute_region_autoscaler.go +++ b/google/services/compute/resource_compute_region_autoscaler.go @@ -167,6 +167,62 @@ be a positive float value. If not defined, the default is 0.8.`, The metric cannot have negative values. The metric must have a value type of INT64 or DOUBLE.`, + }, + "filter": { + Type: schema.TypeString, + Optional: true, + Description: `A filter string to be used as the filter string for +a Stackdriver Monitoring TimeSeries.list API call. +This filter is used to select a specific TimeSeries for +the purpose of autoscaling and to determine whether the metric +is exporting per-instance or per-group data. + +You can only use the AND operator for joining selectors. +You can only use direct equality comparison operator (=) without +any functions for each selector. +You can specify the metric in both the filter string and in the +metric field. However, if specified in both places, the metric must +be identical. + +The monitored resource type determines what kind of values are +expected for the metric. If it is a gce_instance, the autoscaler +expects the metric to include a separate TimeSeries for each +instance in a group. In such a case, you cannot filter on resource +labels. + +If the resource type is any other value, the autoscaler expects +this metric to contain values that apply to the entire autoscaled +instance group and resource label filtering can be performed to +point autoscaler at the correct TimeSeries to scale upon. +This is called a per-group metric for the purpose of autoscaling. + +If not specified, the type defaults to gce_instance. + +You should provide a filter that is selective enough to pick just +one TimeSeries for the autoscaled group or for each of the instances +(if you are using gce_instance resource type). If multiple +TimeSeries are returned upon the query execution, the autoscaler +will sum their respective values to obtain its scaling value.`, + }, + "single_instance_assignment": { + Type: schema.TypeFloat, + Optional: true, + Description: `If scaling is based on a per-group metric value that represents the +total amount of work to be done or resource usage, set this value to +an amount assigned for a single instance of the scaled group. +The autoscaler will keep the number of instances proportional to the +value of this metric, the metric itself should not change value due +to group resizing. + +For example, a good metric to use with the target is +'pubsub.googleapis.com/subscription/num_undelivered_messages' +or a custom metric exporting the total number of requests coming to +your instances. + +A bad example would be a metric exporting an average or median +latency, since this value can't include a chunk assignable to a +single instance, it could be better used with utilization_target +instead.`, }, "target": { Type: schema.TypeFloat, @@ -878,9 +934,11 @@ func flattenComputeRegionAutoscalerAutoscalingPolicyMetric(v interface{}, d *sch continue } transformed = append(transformed, map[string]interface{}{ - "name": flattenComputeRegionAutoscalerAutoscalingPolicyMetricName(original["metric"], d, config), - "target": flattenComputeRegionAutoscalerAutoscalingPolicyMetricTarget(original["utilizationTarget"], d, config), - "type": flattenComputeRegionAutoscalerAutoscalingPolicyMetricType(original["utilizationTargetType"], d, config), + "name": flattenComputeRegionAutoscalerAutoscalingPolicyMetricName(original["metric"], d, config), + "single_instance_assignment": flattenComputeRegionAutoscalerAutoscalingPolicyMetricSingleInstanceAssignment(original["singleInstanceAssignment"], d, config), + "target": flattenComputeRegionAutoscalerAutoscalingPolicyMetricTarget(original["utilizationTarget"], d, config), + "type": flattenComputeRegionAutoscalerAutoscalingPolicyMetricType(original["utilizationTargetType"], d, config), + "filter": flattenComputeRegionAutoscalerAutoscalingPolicyMetricFilter(original["filter"], d, config), }) } return transformed @@ -889,6 +947,10 @@ func flattenComputeRegionAutoscalerAutoscalingPolicyMetricName(v interface{}, d return v } +func flattenComputeRegionAutoscalerAutoscalingPolicyMetricSingleInstanceAssignment(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenComputeRegionAutoscalerAutoscalingPolicyMetricTarget(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } @@ -897,6 +959,10 @@ func flattenComputeRegionAutoscalerAutoscalingPolicyMetricType(v interface{}, d return v } +func flattenComputeRegionAutoscalerAutoscalingPolicyMetricFilter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenComputeRegionAutoscalerAutoscalingPolicyLoadBalancingUtilization(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil @@ -1202,6 +1268,13 @@ func expandComputeRegionAutoscalerAutoscalingPolicyMetric(v interface{}, d tpgre transformed["metric"] = transformedName } + transformedSingleInstanceAssignment, err := expandComputeRegionAutoscalerAutoscalingPolicyMetricSingleInstanceAssignment(original["single_instance_assignment"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSingleInstanceAssignment); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["singleInstanceAssignment"] = transformedSingleInstanceAssignment + } + transformedTarget, err := expandComputeRegionAutoscalerAutoscalingPolicyMetricTarget(original["target"], d, config) if err != nil { return nil, err @@ -1216,6 +1289,13 @@ func expandComputeRegionAutoscalerAutoscalingPolicyMetric(v interface{}, d tpgre transformed["utilizationTargetType"] = transformedType } + transformedFilter, err := expandComputeRegionAutoscalerAutoscalingPolicyMetricFilter(original["filter"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFilter); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["filter"] = transformedFilter + } + req = append(req, transformed) } return req, nil @@ -1225,6 +1305,10 @@ func expandComputeRegionAutoscalerAutoscalingPolicyMetricName(v interface{}, d t return v, nil } +func expandComputeRegionAutoscalerAutoscalingPolicyMetricSingleInstanceAssignment(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandComputeRegionAutoscalerAutoscalingPolicyMetricTarget(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } @@ -1233,6 +1317,10 @@ func expandComputeRegionAutoscalerAutoscalingPolicyMetricType(v interface{}, d t return v, nil } +func expandComputeRegionAutoscalerAutoscalingPolicyMetricFilter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandComputeRegionAutoscalerAutoscalingPolicyLoadBalancingUtilization(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { diff --git a/google/services/compute/resource_compute_region_instance_group_manager.go b/google/services/compute/resource_compute_region_instance_group_manager.go index 96c1e95a0d3..5ee1d82c8f3 100644 --- a/google/services/compute/resource_compute_region_instance_group_manager.go +++ b/google/services/compute/resource_compute_region_instance_group_manager.go @@ -205,8 +205,7 @@ func ResourceComputeRegionInstanceGroupManager() *schema.Resource { Optional: true, Default: "STABLE", ValidateFunc: validation.StringInSlice([]string{"STABLE", "UPDATED"}, false), - - Description: `When used with wait_for_instances specifies the status to wait for. When STABLE is specified this resource will wait until the instances are stable before returning. When UPDATED is set, it will wait for the version target to be reached and any per instance configs to be effective as well as all instances to be stable before returning.`, + Description: `When used with wait_for_instances specifies the status to wait for. When STABLE is specified this resource will wait until the instances are stable before returning. When UPDATED is set, it will wait for the version target to be reached and any per instance configs to be effective and all instances configs to be effective as well as all instances to be stable before returning.`, }, "auto_healing_policies": { @@ -261,6 +260,13 @@ func ResourceComputeRegionInstanceGroupManager() *schema.Resource { Description: `The instance lifecycle policy for this managed instance group.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "default_action_on_failure": { + Type: schema.TypeString, + Default: "REPAIR", + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"REPAIR", "DO_NOTHING"}, true), + Description: `Default behavior for all instance or health check failures.`, + }, "force_update_on_repair": { Type: schema.TypeString, Default: "NO", @@ -350,6 +356,30 @@ func ResourceComputeRegionInstanceGroupManager() *schema.Resource { }, }, }, + "all_instances_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Specifies configuration that overrides the instance template configuration for the group.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "metadata": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: `The metadata key-value pairs that you want to patch onto the instance. For more information, see Project and instance metadata,`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: `The label key-value pairs that you want to patch onto the instance,`, + }, + }, + }, + }, "stateful_internal_ip": { Type: schema.TypeList, Optional: true, @@ -440,6 +470,20 @@ func ResourceComputeRegionInstanceGroupManager() *schema.Resource { }, }, }, + "all_instances_config": { + Type: schema.TypeList, + Computed: true, + Description: `Status of all-instances configuration on the group.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "effective": { + Type: schema.TypeBool, + Computed: true, + Description: `A bit indicating whether this configuration has been applied to all managed instances in the group.`, + }, + }, + }, + }, "stateful": { Type: schema.TypeList, Computed: true, @@ -505,6 +549,7 @@ func resourceComputeRegionInstanceGroupManagerCreate(d *schema.ResourceData, met Versions: expandVersions(d.Get("version").([]interface{})), UpdatePolicy: expandRegionUpdatePolicy(d.Get("update_policy").([]interface{})), InstanceLifecyclePolicy: expandInstanceLifecyclePolicy(d.Get("instance_lifecycle_policy").([]interface{})), + AllInstancesConfig: expandAllInstancesConfig(nil, d.Get("all_instances_config").([]interface{})), DistributionPolicy: expandDistributionPolicy(d), StatefulPolicy: expandStatefulPolicy(d), // Force send TargetSize to allow size of 0. @@ -542,7 +587,7 @@ func resourceComputeRegionInstanceGroupManagerCreate(d *schema.ResourceData, met func computeRIGMWaitForInstanceStatus(d *schema.ResourceData, meta interface{}) error { waitForUpdates := d.Get("wait_for_instances_status").(string) == "UPDATED" conf := resource.StateChangeConf{ - Pending: []string{"creating", "error", "updating per instance configs", "reaching version target"}, + Pending: []string{"creating", "error", "updating per instance configs", "reaching version target", "updating all instances config"}, Target: []string{"created"}, Refresh: waitForInstancesRefreshFunc(getRegionalManager, waitForUpdates, d, meta), Timeout: d.Timeout(schema.TimeoutCreate), @@ -610,6 +655,9 @@ func waitForInstancesRefreshFunc(f getInstanceManagerFunc, waitForUpdates bool, if !m.Status.VersionTarget.IsReached { return false, "reaching version target", nil } + if !m.Status.AllInstancesConfig.Effective { + return false, "updating all instances config", nil + } } return true, "created", nil } else { @@ -694,6 +742,11 @@ func resourceComputeRegionInstanceGroupManagerRead(d *schema.ResourceData, meta if err = d.Set("instance_lifecycle_policy", flattenInstanceLifecyclePolicy(manager.InstanceLifecyclePolicy)); err != nil { return fmt.Errorf("Error setting instance lifecycle policy in state: %s", err.Error()) } + if manager.AllInstancesConfig != nil { + if err = d.Set("all_instances_config", flattenAllInstancesConfig(manager.AllInstancesConfig)); err != nil { + return fmt.Errorf("Error setting all_instances_config in state: %s", err.Error()) + } + } if err = d.Set("stateful_disk", flattenStatefulPolicy(manager.StatefulPolicy)); err != nil { return fmt.Errorf("Error setting stateful_disk in state: %s", err.Error()) } @@ -776,6 +829,16 @@ func resourceComputeRegionInstanceGroupManagerUpdate(d *schema.ResourceData, met change = true } + if d.HasChange("all_instances_config") { + oldAic, newAic := d.GetChange("all_instances_config") + if newAic == nil || len(newAic.([]interface{})) == 0 { + updatedManager.NullFields = append(updatedManager.NullFields, "AllInstancesConfig") + } else { + updatedManager.AllInstancesConfig = expandAllInstancesConfig(oldAic.([]interface{}), newAic.([]interface{})) + } + change = true + } + if d.HasChange("list_managed_instances_results") { updatedManager.ListManagedInstancesResults = d.Get("list_managed_instances_results").(string) change = true diff --git a/google/services/compute/resource_compute_region_instance_group_manager_test.go b/google/services/compute/resource_compute_region_instance_group_manager_test.go index 8514dd565a1..1cdbe02a412 100644 --- a/google/services/compute/resource_compute_region_instance_group_manager_test.go +++ b/google/services/compute/resource_compute_region_instance_group_manager_test.go @@ -84,6 +84,9 @@ func TestAccRegionInstanceGroupManager_update(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccRegionInstanceGroupManager_update(template1, target1, igm), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_compute_region_instance_group_manager.igm-update", "instance_lifecycle_policy.0.default_action_on_failure", "DO_NOTHING"), + ), }, { ResourceName: "google_compute_region_instance_group_manager.igm-update", @@ -93,6 +96,9 @@ func TestAccRegionInstanceGroupManager_update(t *testing.T) { }, { Config: testAccRegionInstanceGroupManager_update2(template1, target1, target2, template2, igm), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_compute_region_instance_group_manager.igm-update", "instance_lifecycle_policy.0.default_action_on_failure", "REPAIR"), + ), }, { ResourceName: "google_compute_region_instance_group_manager.igm-update", @@ -102,6 +108,9 @@ func TestAccRegionInstanceGroupManager_update(t *testing.T) { }, { Config: testAccRegionInstanceGroupManager_update3(template1, target1, target2, template2, igm), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_compute_region_instance_group_manager.igm-update", "instance_lifecycle_policy.0.default_action_on_failure", "REPAIR"), + ), }, { ResourceName: "google_compute_region_instance_group_manager.igm-update", @@ -563,9 +572,18 @@ resource "google_compute_region_instance_group_manager" "igm-update" { port = 8080 } + all_instances_config { + metadata = { + foo = "bar" + } + labels = { + doo = "dad" + } + } instance_lifecycle_policy { force_update_on_repair = "YES" + default_action_on_failure = "DO_NOTHING" } } `, template, target, igm) @@ -659,9 +677,18 @@ resource "google_compute_region_instance_group_manager" "igm-update" { port = 8443 } + all_instances_config { + metadata = { + doo = "dad" + } + labels = { + foo = "bar" + } + } instance_lifecycle_policy { force_update_on_repair = "NO" + default_action_on_failure = "REPAIR" } } `, template1, target1, target2, template2, igm) diff --git a/google/services/compute/resource_compute_region_network_endpoint_group.go b/google/services/compute/resource_compute_region_network_endpoint_group.go index 522b87f917a..1133994d713 100644 --- a/google/services/compute/resource_compute_region_network_endpoint_group.go +++ b/google/services/compute/resource_compute_region_network_endpoint_group.go @@ -307,7 +307,7 @@ func resourceComputeRegionNetworkEndpointGroupCreate(d *schema.ResourceData, met appEngineProp, err := expandComputeRegionNetworkEndpointGroupAppEngine(d.Get("app_engine"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("app_engine"); !tpgresource.IsEmptyValue(reflect.ValueOf(appEngineProp)) && (ok || !reflect.DeepEqual(v, appEngineProp)) { + } else if v, ok := d.GetOkExists("app_engine"); ok || !reflect.DeepEqual(v, appEngineProp) { obj["appEngine"] = appEngineProp } cloudFunctionProp, err := expandComputeRegionNetworkEndpointGroupCloudFunction(d.Get("cloud_function"), d, config) diff --git a/google/services/compute/resource_compute_region_network_endpoint_group_generated_test.go b/google/services/compute/resource_compute_region_network_endpoint_group_generated_test.go index 148142d51cc..5afa35ca6d1 100644 --- a/google/services/compute/resource_compute_region_network_endpoint_group_generated_test.go +++ b/google/services/compute/resource_compute_region_network_endpoint_group_generated_test.go @@ -251,6 +251,44 @@ resource "google_storage_bucket_object" "appengine_neg" { `, context) } +func TestAccComputeRegionNetworkEndpointGroup_regionNetworkEndpointGroupAppengineEmptyExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionNetworkEndpointGroupDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionNetworkEndpointGroup_regionNetworkEndpointGroupAppengineEmptyExample(context), + }, + { + ResourceName: "google_compute_region_network_endpoint_group.appengine_neg", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"network", "subnetwork", "region"}, + }, + }, + }) +} + +func testAccComputeRegionNetworkEndpointGroup_regionNetworkEndpointGroupAppengineEmptyExample(context map[string]interface{}) string { + return acctest.Nprintf(` +// App Engine Example +resource "google_compute_region_network_endpoint_group" "appengine_neg" { + name = "tf-test-appengine-neg%{random_suffix}" + network_endpoint_type = "SERVERLESS" + region = "us-central1" + app_engine { + } +} +`, context) +} + func TestAccComputeRegionNetworkEndpointGroup_regionNetworkEndpointGroupPscExample(t *testing.T) { t.Parallel() diff --git a/google/services/compute/resource_compute_region_target_https_proxy.go b/google/services/compute/resource_compute_region_target_https_proxy.go index 35595b984b7..8cc08b7b223 100644 --- a/google/services/compute/resource_compute_region_target_https_proxy.go +++ b/google/services/compute/resource_compute_region_target_https_proxy.go @@ -21,6 +21,8 @@ import ( "fmt" "log" "reflect" + "regexp" + "strings" "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" @@ -64,17 +66,6 @@ first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.`, }, - "ssl_certificates": { - Type: schema.TypeList, - Required: true, - Description: `A list of RegionSslCertificate resources that are used to authenticate -connections between users and the load balancer. Currently, exactly -one SSL certificate must be specified.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, - }, - }, "url_map": { Type: schema.TypeString, Required: true, @@ -82,6 +73,19 @@ one SSL certificate must be specified.`, Description: `A reference to the RegionUrlMap resource that defines the mapping from URL to the RegionBackendService.`, }, + "certificate_manager_certificates": { + Type: schema.TypeList, + Optional: true, + DiffSuppressFunc: tpgresource.CompareResourceNames, + Description: `URLs to certificate manager certificate resources that are used to authenticate connections between users and the load balancer. +Currently, you may specify up to 15 certificates. Certificate manager certificates do not apply when the load balancing scheme is set to INTERNAL_SELF_MANAGED. +sslCertificates and certificateManagerCertificates fields can not be defined together. +Accepted format is '//certificatemanager.googleapis.com/projects/{project}/locations/{location}/certificates/{resourceName}' or just the self_link 'projects/{project}/locations/{location}/certificates/{resourceName}'`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + ConflictsWith: []string{"ssl_certificates"}, + }, "description": { Type: schema.TypeString, Optional: true, @@ -97,6 +101,18 @@ to the RegionBackendService.`, Description: `The Region in which the created target https proxy should reside. If it is not provided, the provider region is used.`, }, + "ssl_certificates": { + Type: schema.TypeList, + Optional: true, + Description: `URLs to SslCertificate resources that are used to authenticate connections between users and the load balancer. +At least one SSL certificate must be specified. Currently, you may specify up to 15 SSL certificates. +sslCertificates do not apply when the load balancing scheme is set to INTERNAL_SELF_MANAGED.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, + ConflictsWith: []string{"certificate_manager_certificates"}, + }, "ssl_policy": { Type: schema.TypeString, Optional: true, @@ -151,6 +167,12 @@ func resourceComputeRegionTargetHttpsProxyCreate(d *schema.ResourceData, meta in } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { obj["name"] = nameProp } + certificateManagerCertificatesProp, err := expandComputeRegionTargetHttpsProxyCertificateManagerCertificates(d.Get("certificate_manager_certificates"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("certificate_manager_certificates"); !tpgresource.IsEmptyValue(reflect.ValueOf(certificateManagerCertificatesProp)) && (ok || !reflect.DeepEqual(v, certificateManagerCertificatesProp)) { + obj["certificateManagerCertificates"] = certificateManagerCertificatesProp + } sslCertificatesProp, err := expandComputeRegionTargetHttpsProxySslCertificates(d.Get("ssl_certificates"), d, config) if err != nil { return err @@ -176,6 +198,11 @@ func resourceComputeRegionTargetHttpsProxyCreate(d *schema.ResourceData, meta in obj["region"] = regionProp } + obj, err = resourceComputeRegionTargetHttpsProxyEncoder(d, meta, obj) + if err != nil { + return err + } + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/targetHttpsProxies") if err != nil { return err @@ -266,6 +293,18 @@ func resourceComputeRegionTargetHttpsProxyRead(d *schema.ResourceData, meta inte return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeRegionTargetHttpsProxy %q", d.Id())) } + res, err = resourceComputeRegionTargetHttpsProxyDecoder(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Decoding the object has resulted in it being gone. It may be marked deleted + log.Printf("[DEBUG] Removing ComputeRegionTargetHttpsProxy because it no longer exists.") + d.SetId("") + return nil + } + if err := d.Set("project", project); err != nil { return fmt.Errorf("Error reading RegionTargetHttpsProxy: %s", err) } @@ -282,6 +321,9 @@ func resourceComputeRegionTargetHttpsProxyRead(d *schema.ResourceData, meta inte if err := d.Set("name", flattenComputeRegionTargetHttpsProxyName(res["name"], d, config)); err != nil { return fmt.Errorf("Error reading RegionTargetHttpsProxy: %s", err) } + if err := d.Set("certificate_manager_certificates", flattenComputeRegionTargetHttpsProxyCertificateManagerCertificates(res["certificateManagerCertificates"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionTargetHttpsProxy: %s", err) + } if err := d.Set("ssl_certificates", flattenComputeRegionTargetHttpsProxySslCertificates(res["sslCertificates"], d, config)); err != nil { return fmt.Errorf("Error reading RegionTargetHttpsProxy: %s", err) } @@ -318,9 +360,15 @@ func resourceComputeRegionTargetHttpsProxyUpdate(d *schema.ResourceData, meta in d.Partial(true) - if d.HasChange("ssl_certificates") { + if d.HasChange("certificate_manager_certificates") || d.HasChange("ssl_certificates") { obj := make(map[string]interface{}) + certificateManagerCertificatesProp, err := expandComputeRegionTargetHttpsProxyCertificateManagerCertificates(d.Get("certificate_manager_certificates"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("certificate_manager_certificates"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, certificateManagerCertificatesProp)) { + obj["certificateManagerCertificates"] = certificateManagerCertificatesProp + } sslCertificatesProp, err := expandComputeRegionTargetHttpsProxySslCertificates(d.Get("ssl_certificates"), d, config) if err != nil { return err @@ -511,6 +559,10 @@ func flattenComputeRegionTargetHttpsProxyName(v interface{}, d *schema.ResourceD return v } +func flattenComputeRegionTargetHttpsProxyCertificateManagerCertificates(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenComputeRegionTargetHttpsProxySslCertificates(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v @@ -547,6 +599,31 @@ func expandComputeRegionTargetHttpsProxyName(v interface{}, d tpgresource.Terraf return v, nil } +func expandComputeRegionTargetHttpsProxyCertificateManagerCertificates(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + if v == nil { + return nil, nil + } + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + return nil, fmt.Errorf("Invalid value for certificate_manager_certificates: nil") + } + if strings.HasPrefix(raw.(string), "//") || strings.HasPrefix(raw.(string), "https://") { + // Any full URL will be passed to the API request (regardless of the resource type). This is to allow self_links of CertificateManagerCeritificate resources. + // If the full URL is an invalid reference, that should be handled by the API. + req = append(req, raw.(string)) + } else if reg, _ := regexp.Compile("projects/(.*)/locations/(.*)/certificates/(.*)"); reg.MatchString(raw.(string)) { + // If the input is the id pattern of CertificateManagerCertificate resource, a prefix will be added to construct the full URL before constructing the API request. + self_link := "https://certificatemanager.googleapis.com/v1/" + raw.(string) + req = append(req, self_link) + } else { + return nil, fmt.Errorf("Invalid value for certificate_manager_certificates: %v is an invalid format for a certificateManagerCertificate resource", raw.(string)) + } + } + return req, nil +} + func expandComputeRegionTargetHttpsProxySslCertificates(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) @@ -586,3 +663,34 @@ func expandComputeRegionTargetHttpsProxyRegion(v interface{}, d tpgresource.Terr } return f.RelativeLink(), nil } + +func resourceComputeRegionTargetHttpsProxyEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + + if _, ok := obj["certificateManagerCertificates"]; ok { + // The field certificateManagerCertificates should not be included in the API request, and it should be renamed to `sslCertificates` + // The API does not allow using both certificate manager certificates and sslCertificates. If that changes + // in the future, the encoder logic should change accordingly because this will mean that both fields are no longer mutual exclusive. + log.Printf("[DEBUG] converting the field CertificateManagerCertificates to sslCertificates before sending the request") + obj["sslCertificates"] = obj["certificateManagerCertificates"] + delete(obj, "certificateManagerCertificates") + } + return obj, nil +} + +func resourceComputeRegionTargetHttpsProxyDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + // Since both sslCertificates and certificateManagerCertificates maps to the same API field (sslCertificates), we need to check the types + // of certificates that exist in the array and decide whether to change the field to certificateManagerCertificate or not. + // The decoder logic depends on the fact that the API does not allow mixed type of certificates and it returns + // certificate manager certificates in the format of //certificatemanager.googleapis.com/projects/*/locations/*/certificates/* + if sslCertificates, ok := res["sslCertificates"].([]interface{}); ok && len(sslCertificates) > 0 { + regPat, _ := regexp.Compile("//certificatemanager.googleapis.com/projects/(.*)/locations/(.*)/certificates/(.*)") + + if regPat.MatchString(sslCertificates[0].(string)) { + // It is enough to check only the type of one of the provided certificates beacuse all the certificates should be the same type. + log.Printf("[DEBUG] The field sslCertificates contains certificateManagerCertificates, the field name will be converted to certificateManagerCertificates") + res["certificateManagerCertificates"] = res["sslCertificates"] + delete(res, "sslCertificates") + } + } + return res, nil +} diff --git a/google/services/compute/resource_compute_region_target_https_proxy_generated_test.go b/google/services/compute/resource_compute_region_target_https_proxy_generated_test.go index edf7d24a6fe..371fac79542 100644 --- a/google/services/compute/resource_compute_region_target_https_proxy_generated_test.go +++ b/google/services/compute/resource_compute_region_target_https_proxy_generated_test.go @@ -114,6 +114,64 @@ resource "google_compute_region_health_check" "default" { `, context) } +func TestAccComputeRegionTargetHttpsProxy_regionTargetHttpsProxyCertificateManagerCertificateExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionTargetHttpsProxyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionTargetHttpsProxy_regionTargetHttpsProxyCertificateManagerCertificateExample(context), + }, + { + ResourceName: "google_compute_region_target_https_proxy.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ssl_policy", "url_map", "region"}, + }, + }, + }) +} + +func testAccComputeRegionTargetHttpsProxy_regionTargetHttpsProxyCertificateManagerCertificateExample(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_region_target_https_proxy" "default" { + name = "tf-test-target-http-proxy%{random_suffix}" + url_map = google_compute_region_url_map.default.id + certificate_manager_certificates = ["//certificatemanager.googleapis.com/${google_certificate_manager_certificate.default.id}"] # [google_certificate_manager_certificate.default.id] is also acceptable +} + +resource "google_certificate_manager_certificate" "default" { + name = "tf-test-my-certificate%{random_suffix}" + location = "us-central1" + self_managed { + pem_certificate = file("test-fixtures/cert.pem") + pem_private_key = file("test-fixtures/private-key.pem") + } +} + +resource "google_compute_region_url_map" "default" { + name = "tf-test-url-map%{random_suffix}" + default_service = google_compute_region_backend_service.default.id + region = "us-central1" +} + +resource "google_compute_region_backend_service" "default" { + name = "tf-test-backend-service%{random_suffix}" + region = "us-central1" + protocol = "HTTPS" + timeout_sec = 30 + load_balancing_scheme = "INTERNAL_MANAGED" +} +`, context) +} + func testAccCheckComputeRegionTargetHttpsProxyDestroyProducer(t *testing.T) func(s *terraform.State) error { return func(s *terraform.State) error { for name, rs := range s.RootModule().Resources { diff --git a/google/services/compute/resource_compute_region_target_tcp_proxy_test.go b/google/services/compute/resource_compute_region_target_tcp_proxy_test.go index a66e86cee6c..47f8a55f72c 100644 --- a/google/services/compute/resource_compute_region_target_tcp_proxy_test.go +++ b/google/services/compute/resource_compute_region_target_tcp_proxy_test.go @@ -14,9 +14,9 @@ import ( func TestAccComputeRegionTargetTcpProxy_update(t *testing.T) { t.Parallel() - target := fmt.Sprintf("trtcp-test-%s", acctest.RandString(t, 10)) - backend := fmt.Sprintf("trtcp-test-%s", acctest.RandString(t, 10)) - hc := fmt.Sprintf("trtcp-test-%s", acctest.RandString(t, 10)) + target := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + backend := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + hc := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, diff --git a/google/services/compute/resource_compute_service_attachment.go b/google/services/compute/resource_compute_service_attachment.go index 76b574f1d9e..5767acded90 100644 --- a/google/services/compute/resource_compute_service_attachment.go +++ b/google/services/compute/resource_compute_service_attachment.go @@ -18,6 +18,7 @@ package compute import ( + "bytes" "fmt" "log" "reflect" @@ -30,6 +31,42 @@ import ( transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) +// Hash based on key, which is either project_id_or_num or network_url. +func computeServiceAttachmentConsumerAcceptListsHash(v interface{}) int { + if v == nil { + return 0 + } + + var buf bytes.Buffer + m := v.(map[string]interface{}) + log.Printf("[DEBUG] hashing %v", m) + + if v, ok := m["project_id_or_num"]; ok { + if v == nil { + v = "" + } + + buf.WriteString(fmt.Sprintf("%v-", v)) + } + + if v, ok := m["network_url"]; ok { + if v == nil { + v = "" + } else { + if networkUrl, err := tpgresource.GetRelativePath(v.(string)); err != nil { + log.Printf("[WARN] Error on retrieving relative path of network url: %s", err) + } else { + v = networkUrl + } + } + + buf.WriteString(fmt.Sprintf("%v-", v)) + } + + log.Printf("[DEBUG] computed hash value of %v from %v", tpgresource.Hashcode(buf.String()), buf.String()) + return tpgresource.Hashcode(buf.String()) +} + func ResourceComputeServiceAttachment() *schema.Resource { return &schema.Resource{ Create: resourceComputeServiceAttachmentCreate, @@ -100,7 +137,7 @@ this service attachment.`, Description: `An array of projects that are allowed to connect to this service attachment.`, Elem: computeServiceAttachmentConsumerAcceptListsSchema(), - // Default schema.HashSchema is used. + Set: computeServiceAttachmentConsumerAcceptListsHash, }, "consumer_reject_lists": { Type: schema.TypeList, @@ -195,11 +232,19 @@ func computeServiceAttachmentConsumerAcceptListsSchema() *schema.Resource { Required: true, Description: `The number of consumer forwarding rules the consumer project can create.`, + }, + "network_url": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkRelativePaths, + Description: `The network that is allowed to connect to this service attachment. +Only one of project_id_or_num and network_url may be set.`, }, "project_id_or_num": { - Type: schema.TypeString, - Required: true, - Description: `A project that is allowed to connect to this service attachment.`, + Type: schema.TypeString, + Optional: true, + Description: `A project that is allowed to connect to this service attachment. +Only one of project_id_or_num and network_url may be set.`, }, }, } @@ -688,7 +733,7 @@ func flattenComputeServiceAttachmentConsumerAcceptLists(v interface{}, d *schema return v } l := v.([]interface{}) - transformed := schema.NewSet(schema.HashResource(computeServiceAttachmentConsumerAcceptListsSchema()), []interface{}{}) + transformed := schema.NewSet(computeServiceAttachmentConsumerAcceptListsHash, []interface{}{}) for _, raw := range l { original := raw.(map[string]interface{}) if len(original) < 1 { @@ -697,6 +742,7 @@ func flattenComputeServiceAttachmentConsumerAcceptLists(v interface{}, d *schema } transformed.Add(map[string]interface{}{ "project_id_or_num": flattenComputeServiceAttachmentConsumerAcceptListsProjectIdOrNum(original["projectIdOrNum"], d, config), + "network_url": flattenComputeServiceAttachmentConsumerAcceptListsNetworkUrl(original["networkUrl"], d, config), "connection_limit": flattenComputeServiceAttachmentConsumerAcceptListsConnectionLimit(original["connectionLimit"], d, config), }) } @@ -706,6 +752,10 @@ func flattenComputeServiceAttachmentConsumerAcceptListsProjectIdOrNum(v interfac return v } +func flattenComputeServiceAttachmentConsumerAcceptListsNetworkUrl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenComputeServiceAttachmentConsumerAcceptListsConnectionLimit(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { @@ -797,6 +847,13 @@ func expandComputeServiceAttachmentConsumerAcceptLists(v interface{}, d tpgresou transformed["projectIdOrNum"] = transformedProjectIdOrNum } + transformedNetworkUrl, err := expandComputeServiceAttachmentConsumerAcceptListsNetworkUrl(original["network_url"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNetworkUrl); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["networkUrl"] = transformedNetworkUrl + } + transformedConnectionLimit, err := expandComputeServiceAttachmentConsumerAcceptListsConnectionLimit(original["connection_limit"], d, config) if err != nil { return nil, err @@ -813,6 +870,10 @@ func expandComputeServiceAttachmentConsumerAcceptListsProjectIdOrNum(v interface return v, nil } +func expandComputeServiceAttachmentConsumerAcceptListsNetworkUrl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandComputeServiceAttachmentConsumerAcceptListsConnectionLimit(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } diff --git a/google/services/compute/resource_compute_service_attachment_generated_test.go b/google/services/compute/resource_compute_service_attachment_generated_test.go index ff34ea13c70..b7c3bc1cac8 100644 --- a/google/services/compute/resource_compute_service_attachment_generated_test.go +++ b/google/services/compute/resource_compute_service_attachment_generated_test.go @@ -255,6 +255,133 @@ resource "google_compute_subnetwork" "psc_ilb_nat" { `, context) } +func TestAccComputeServiceAttachment_serviceAttachmentExplicitNetworksExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeServiceAttachmentDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeServiceAttachment_serviceAttachmentExplicitNetworksExample(context), + }, + { + ResourceName: "google_compute_service_attachment.psc_ilb_service_attachment", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"target_service", "region"}, + }, + }, + }) +} + +func testAccComputeServiceAttachment_serviceAttachmentExplicitNetworksExample(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_service_attachment" "psc_ilb_service_attachment" { + name = "tf-test-my-psc-ilb%{random_suffix}" + region = "us-west2" + description = "A service attachment configured with Terraform" + + enable_proxy_protocol = false + + connection_preference = "ACCEPT_MANUAL" + nat_subnets = [google_compute_subnetwork.psc_ilb_nat.id] + target_service = google_compute_forwarding_rule.psc_ilb_target_service.id + + consumer_accept_lists { + network_url = google_compute_network.psc_ilb_consumer_network.self_link + connection_limit = 1 + } +} + +resource "google_compute_network" "psc_ilb_consumer_network" { + name = "tf-test-psc-ilb-consumer-network%{random_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "psc_ilb_consumer_subnetwork" { + name = "tf-test-psc-ilb-consumer-network%{random_suffix}" + ip_cidr_range = "10.0.0.0/16" + region = "us-west2" + network = google_compute_network.psc_ilb_consumer_network.id +} + +resource "google_compute_address" "psc_ilb_consumer_address" { + name = "tf-test-psc-ilb-consumer-address%{random_suffix}" + region = "us-west2" + + subnetwork = google_compute_subnetwork.psc_ilb_consumer_subnetwork.id + address_type = "INTERNAL" +} + +resource "google_compute_forwarding_rule" "psc_ilb_consumer" { + name = "tf-test-psc-ilb-consumer-forwarding-rule%{random_suffix}" + region = "us-west2" + + target = google_compute_service_attachment.psc_ilb_service_attachment.id + load_balancing_scheme = "" # need to override EXTERNAL default when target is a service attachment + network = google_compute_network.psc_ilb_consumer_network.id + subnetwork = google_compute_subnetwork.psc_ilb_consumer_subnetwork.id + ip_address = google_compute_address.psc_ilb_consumer_address.id +} + +resource "google_compute_forwarding_rule" "psc_ilb_target_service" { + name = "tf-test-producer-forwarding-rule%{random_suffix}" + region = "us-west2" + + load_balancing_scheme = "INTERNAL" + backend_service = google_compute_region_backend_service.producer_service_backend.id + all_ports = true + network = google_compute_network.psc_ilb_network.name + subnetwork = google_compute_subnetwork.psc_ilb_producer_subnetwork.name +} + +resource "google_compute_region_backend_service" "producer_service_backend" { + name = "tf-test-producer-service%{random_suffix}" + region = "us-west2" + + health_checks = [google_compute_health_check.producer_service_health_check.id] +} + +resource "google_compute_health_check" "producer_service_health_check" { + name = "tf-test-producer-service-health-check%{random_suffix}" + + check_interval_sec = 1 + timeout_sec = 1 + tcp_health_check { + port = "80" + } +} + +resource "google_compute_network" "psc_ilb_network" { + name = "tf-test-psc-ilb-network%{random_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "psc_ilb_producer_subnetwork" { + name = "tf-test-psc-ilb-producer-subnetwork%{random_suffix}" + region = "us-west2" + + network = google_compute_network.psc_ilb_network.id + ip_cidr_range = "10.0.0.0/16" +} + +resource "google_compute_subnetwork" "psc_ilb_nat" { + name = "tf-test-psc-ilb-nat%{random_suffix}" + region = "us-west2" + + network = google_compute_network.psc_ilb_network.id + purpose = "PRIVATE_SERVICE_CONNECT" + ip_cidr_range = "10.1.0.0/16" +} +`, context) +} + func TestAccComputeServiceAttachment_serviceAttachmentReconcileConnectionsExample(t *testing.T) { t.Parallel() diff --git a/google/services/container/node_config.go b/google/services/container/node_config.go index 8112c0154f2..2c168f55b3e 100644 --- a/google/services/container/node_config.go +++ b/google/services/container/node_config.go @@ -630,6 +630,12 @@ func schemaNodeConfig() *schema.Schema { Optional: true, Description: `A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty.`, }, + "enable_confidential_storage": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `If enabled boot disks are configured with confidential mode.`, + }, }, }, } @@ -896,6 +902,10 @@ func expandNodeConfig(v interface{}) *container.NodeConfig { nc.SoleTenantConfig = expandSoleTenantConfig(v) } + if v, ok := nodeConfig["enable_confidential_storage"]; ok { + nc.EnableConfidentialStorage = v.(bool) + } + if v, ok := nodeConfig["confidential_nodes"]; ok { nc.ConfidentialNodes = expandConfidentialNodes(v) } @@ -1113,6 +1123,7 @@ func flattenNodeConfig(c *container.NodeConfig, v interface{}) []map[string]inte "sole_tenant_config": flattenSoleTenantConfig(c.SoleTenantConfig), "fast_socket": flattenFastSocket(c.FastSocket), "resource_manager_tags": flattenResourceManagerTags(c.ResourceManagerTags), + "enable_confidential_storage": c.EnableConfidentialStorage, }) if len(c.OauthScopes) > 0 { diff --git a/google/services/container/resource_container_cluster_test.go b/google/services/container/resource_container_cluster_test.go index 445fc48a3a7..7b2be508f13 100644 --- a/google/services/container/resource_container_cluster_test.go +++ b/google/services/container/resource_container_cluster_test.go @@ -3691,7 +3691,7 @@ func TestAccContainerCluster_withGatewayApiConfig(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccContainerCluster_withGatewayApiConfig(clusterName, "CANARY", networkName, subnetworkName), - ExpectError: regexp.MustCompile(`expected gateway_api_config\.0\.channel to be one of \[CHANNEL_DISABLED CHANNEL_EXPERIMENTAL CHANNEL_STANDARD\], got CANARY`), + ExpectError: regexp.MustCompile(`expected gateway_api_config\.0\.channel to be one of [^,]+, got CANARY`), }, { Config: testAccContainerCluster_withGatewayApiConfig(clusterName, "CHANNEL_DISABLED", networkName, subnetworkName), @@ -8234,6 +8234,175 @@ func testAccContainerCluster_additional_pod_ranges_config(name string, nameCount `, name, name, name, aprc) } +func TestAccContainerCluster_withConfidentialBootDisk(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + npName := fmt.Sprintf("tf-test-node-pool-%s", acctest.RandString(t, 10)) + kms := acctest.BootstrapKMSKeyInLocation(t, "us-central1") + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + if acctest.BootstrapPSARole(t, "service-", "compute-system", "roles/cloudkms.cryptoKeyEncrypterDecrypter") { + t.Fatal("Stopping the test because a role was added to the policy.") + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withConfidentialBootDisk(clusterName, npName, kms.CryptoKey.Name, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.with_confidential_boot_disk", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func testAccContainerCluster_withConfidentialBootDisk(clusterName, npName, kmsKeyName, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_confidential_boot_disk" { + name = "%s" + location = "us-central1-a" + release_channel { + channel = "RAPID" +} + node_pool { + name = "%s" + initial_node_count = 1 + node_config { + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform", + ] + image_type = "COS_CONTAINERD" + boot_disk_kms_key = "%s" + machine_type = "n2-standard-2" + enable_confidential_storage = true + disk_type = "hyperdisk-balanced" + } +} + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, npName, kmsKeyName, networkName, subnetworkName) +} + +func TestAccContainerCluster_withConfidentialBootDiskNodeConfig(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + kms := acctest.BootstrapKMSKeyInLocation(t, "us-central1") + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + if acctest.BootstrapPSARole(t, "service-", "compute-system", "roles/cloudkms.cryptoKeyEncrypterDecrypter") { + t.Fatal("Stopping the test because a role was added to the policy.") + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withConfidentialBootDiskNodeConfig(clusterName, kms.CryptoKey.Name, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.with_confidential_boot_disk_node_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func testAccContainerCluster_withConfidentialBootDiskNodeConfig(clusterName, kmsKeyName, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_confidential_boot_disk_node_config" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + release_channel { + channel = "RAPID" + } + node_config { + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform", + ] + image_type = "COS_CONTAINERD" + boot_disk_kms_key = "%s" + machine_type = "n2-standard-2" + enable_confidential_storage = true + disk_type = "hyperdisk-balanced" + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, kmsKeyName, networkName, subnetworkName) +} + +func TestAccContainerCluster_withoutConfidentialBootDisk(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + npName := fmt.Sprintf("tf-test-cluster-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withoutConfidentialBootDisk(clusterName, npName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.without_confidential_boot_disk", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} +func testAccContainerCluster_withoutConfidentialBootDisk(clusterName, npName, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "without_confidential_boot_disk" { + name = "%s" + location = "us-central1-a" + release_channel { + channel = "RAPID" + } + node_pool { + name = "%s" + initial_node_count = 1 + node_config { + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform", + ] + image_type = "COS_CONTAINERD" + machine_type = "n2-standard-2" + enable_confidential_storage = false + disk_type = "pd-balanced" + } + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, npName, networkName, subnetworkName) +} + func testAccContainerCluster_resourceManagerTags(projectID, clusterName, networkName, subnetworkName, randomSuffix string) string { return fmt.Sprintf(` data "google_project" "project" { diff --git a/google/services/container/resource_container_node_pool_test.go b/google/services/container/resource_container_node_pool_test.go index e13b16fcd7a..e11f999729b 100644 --- a/google/services/container/resource_container_node_pool_test.go +++ b/google/services/container/resource_container_node_pool_test.go @@ -3643,6 +3643,132 @@ resource "google_container_node_pool" "with_tpu_topology" { `, cluster, networkName, subnetworkName, np1, np2, tpuTopology) } +func TestAccContainerNodePool_withConfidentialBootDisk(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + np := fmt.Sprintf("tf-test-np-%s", acctest.RandString(t, 10)) + kms := acctest.BootstrapKMSKeyInLocation(t, "us-central1") + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + if acctest.BootstrapPSARole(t, "service-", "compute-system", "roles/cloudkms.cryptoKeyEncrypterDecrypter") { + t.Fatal("Stopping the test because a role was added to the policy.") + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_withConfidentialBootDisk(cluster, np, kms.CryptoKey.Name, networkName, subnetworkName), + }, + { + ResourceName: "google_container_node_pool.with_confidential_boot_disk", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccContainerNodePool_withConfidentialBootDisk(cluster, np string, kmsKeyName, networkName, subnetworkName string) string { + return fmt.Sprintf(` +data "google_container_engine_versions" "central1a" { + location = "us-central1-a" +} + +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + min_master_version = data.google_container_engine_versions.central1a.latest_master_version + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_container_node_pool" "with_confidential_boot_disk" { + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + + node_config { + image_type = "COS_CONTAINERD" + boot_disk_kms_key = "%s" + oauth_scopes = [ + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring", + ] + enable_confidential_storage = true + machine_type = "n2-standard-2" + disk_type = "hyperdisk-balanced" + } +} +`, cluster, networkName, subnetworkName, np, kmsKeyName) +} + +func TestAccContainerNodePool_withoutConfidentialBootDisk(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + np := fmt.Sprintf("tf-test-np-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_withoutConfidentialBootDisk(cluster, np, networkName, subnetworkName), + }, + { + ResourceName: "google_container_node_pool.without_confidential_boot_disk", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccContainerNodePool_withoutConfidentialBootDisk(cluster, np, networkName, subnetworkName string) string { + return fmt.Sprintf(` +data "google_container_engine_versions" "central1a" { + location = "us-central1-a" +} + +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + min_master_version = data.google_container_engine_versions.central1a.latest_master_version + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_container_node_pool" "without_confidential_boot_disk" { + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + + node_config { + image_type = "COS_CONTAINERD" + oauth_scopes = [ + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring", + ] + enable_confidential_storage = false + machine_type = "n2-standard-2" + disk_type = "pd-balanced" + } +} +`, cluster, networkName, subnetworkName, np) +} + func testAccContainerNodePool_resourceManagerTags(projectID, clusterName, networkName, subnetworkName, randomSuffix string) string { return fmt.Sprintf(` data "google_project" "project" { diff --git a/google/services/datapipeline/resource_data_pipeline_pipeline_generated_test.go b/google/services/datapipeline/resource_data_pipeline_pipeline_generated_test.go index c745596ca5b..a31646cb864 100644 --- a/google/services/datapipeline/resource_data_pipeline_pipeline_generated_test.go +++ b/google/services/datapipeline/resource_data_pipeline_pipeline_generated_test.go @@ -49,7 +49,7 @@ func TestAccDataPipelinePipeline_dataPipelinePipelineExample(t *testing.T) { ResourceName: "google_data_pipeline_pipeline.primary", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"region"}, + ImportStateVerifyIgnore: []string{"region", "schedule_info.0.next_job_time"}, }, }, }) diff --git a/google/services/firebaseappcheck/resource_firebase_app_check_device_check_config.go b/google/services/firebaseappcheck/resource_firebase_app_check_device_check_config.go new file mode 100644 index 00000000000..1dd28b7b448 --- /dev/null +++ b/google/services/firebaseappcheck/resource_firebase_app_check_device_check_config.go @@ -0,0 +1,379 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package firebaseappcheck + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceFirebaseAppCheckDeviceCheckConfig() *schema.Resource { + return &schema.Resource{ + Create: resourceFirebaseAppCheckDeviceCheckConfigCreate, + Read: resourceFirebaseAppCheckDeviceCheckConfigRead, + Update: resourceFirebaseAppCheckDeviceCheckConfigUpdate, + Delete: resourceFirebaseAppCheckDeviceCheckConfigDelete, + + Importer: &schema.ResourceImporter{ + State: resourceFirebaseAppCheckDeviceCheckConfigImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + ), + + Schema: map[string]*schema.Schema{ + "app_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The ID of an +[Apple App](https://firebase.google.com/docs/reference/firebase-management/rest/v1beta1/projects.iosApps#IosApp.FIELDS.app_id).`, + }, + "key_id": { + Type: schema.TypeString, + Required: true, + Description: `The key identifier of a private key enabled with DeviceCheck, created in your Apple Developer account.`, + }, + "private_key": { + Type: schema.TypeString, + Required: true, + Description: `The contents of the private key (.p8) file associated with the key specified by keyId.`, + Sensitive: true, + }, + "token_ttl": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `Specifies the duration for which App Check tokens exchanged from DeviceCheck artifacts will be valid. +If unset, a default value of 1 hour is assumed. Must be between 30 minutes and 7 days, inclusive. + +A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s".`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The relative resource name of the DeviceCheck configuration object`, + }, + "private_key_set": { + Type: schema.TypeBool, + Computed: true, + Description: `Whether the privateKey field was previously set. Since App Check will never return the +privateKey field, this field is the only way to find out whether it was previously set.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceFirebaseAppCheckDeviceCheckConfigCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + tokenTtlProp, err := expandFirebaseAppCheckDeviceCheckConfigTokenTtl(d.Get("token_ttl"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("token_ttl"); !tpgresource.IsEmptyValue(reflect.ValueOf(tokenTtlProp)) && (ok || !reflect.DeepEqual(v, tokenTtlProp)) { + obj["tokenTtl"] = tokenTtlProp + } + keyIdProp, err := expandFirebaseAppCheckDeviceCheckConfigKeyId(d.Get("key_id"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("key_id"); !tpgresource.IsEmptyValue(reflect.ValueOf(keyIdProp)) && (ok || !reflect.DeepEqual(v, keyIdProp)) { + obj["keyId"] = keyIdProp + } + privateKeyProp, err := expandFirebaseAppCheckDeviceCheckConfigPrivateKey(d.Get("private_key"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("private_key"); !tpgresource.IsEmptyValue(reflect.ValueOf(privateKeyProp)) && (ok || !reflect.DeepEqual(v, privateKeyProp)) { + obj["privateKey"] = privateKeyProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{FirebaseAppCheckBasePath}}projects/{{project}}/apps/{{app_id}}/deviceCheckConfig?updateMask=tokenTtl,keyId,privateKey") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new DeviceCheckConfig: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for DeviceCheckConfig: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating DeviceCheckConfig: %s", err) + } + if err := d.Set("name", flattenFirebaseAppCheckDeviceCheckConfigName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/apps/{{app_id}}/deviceCheckConfig") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating DeviceCheckConfig %q: %#v", d.Id(), res) + + return resourceFirebaseAppCheckDeviceCheckConfigRead(d, meta) +} + +func resourceFirebaseAppCheckDeviceCheckConfigRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{FirebaseAppCheckBasePath}}projects/{{project}}/apps/{{app_id}}/deviceCheckConfig") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for DeviceCheckConfig: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("FirebaseAppCheckDeviceCheckConfig %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading DeviceCheckConfig: %s", err) + } + + if err := d.Set("name", flattenFirebaseAppCheckDeviceCheckConfigName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading DeviceCheckConfig: %s", err) + } + if err := d.Set("token_ttl", flattenFirebaseAppCheckDeviceCheckConfigTokenTtl(res["tokenTtl"], d, config)); err != nil { + return fmt.Errorf("Error reading DeviceCheckConfig: %s", err) + } + if err := d.Set("key_id", flattenFirebaseAppCheckDeviceCheckConfigKeyId(res["keyId"], d, config)); err != nil { + return fmt.Errorf("Error reading DeviceCheckConfig: %s", err) + } + if err := d.Set("private_key_set", flattenFirebaseAppCheckDeviceCheckConfigPrivateKeySet(res["privateKeySet"], d, config)); err != nil { + return fmt.Errorf("Error reading DeviceCheckConfig: %s", err) + } + + return nil +} + +func resourceFirebaseAppCheckDeviceCheckConfigUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for DeviceCheckConfig: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + tokenTtlProp, err := expandFirebaseAppCheckDeviceCheckConfigTokenTtl(d.Get("token_ttl"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("token_ttl"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, tokenTtlProp)) { + obj["tokenTtl"] = tokenTtlProp + } + keyIdProp, err := expandFirebaseAppCheckDeviceCheckConfigKeyId(d.Get("key_id"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("key_id"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, keyIdProp)) { + obj["keyId"] = keyIdProp + } + privateKeyProp, err := expandFirebaseAppCheckDeviceCheckConfigPrivateKey(d.Get("private_key"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("private_key"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, privateKeyProp)) { + obj["privateKey"] = privateKeyProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{FirebaseAppCheckBasePath}}projects/{{project}}/apps/{{app_id}}/deviceCheckConfig") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating DeviceCheckConfig %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("token_ttl") { + updateMask = append(updateMask, "tokenTtl") + } + + if d.HasChange("key_id") { + updateMask = append(updateMask, "keyId") + } + + if d.HasChange("private_key") { + updateMask = append(updateMask, "privateKey") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + // if updateMask is empty we are not updating anything so skip the post + if len(updateMask) > 0 { + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating DeviceCheckConfig %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating DeviceCheckConfig %q: %#v", d.Id(), res) + } + + } + + return resourceFirebaseAppCheckDeviceCheckConfigRead(d, meta) +} + +func resourceFirebaseAppCheckDeviceCheckConfigDelete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[WARNING] FirebaseAppCheck DeviceCheckConfig resources"+ + " cannot be deleted from Google Cloud. The resource %s will be removed from Terraform"+ + " state, but will still be present on Google Cloud.", d.Id()) + d.SetId("") + + return nil +} + +func resourceFirebaseAppCheckDeviceCheckConfigImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "^projects/(?P[^/]+)/apps/(?P[^/]+)/deviceCheckConfig$", + "^(?P[^/]+)/(?P[^/]+)$", + "^(?P[^/]+)$", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/apps/{{app_id}}/deviceCheckConfig") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenFirebaseAppCheckDeviceCheckConfigName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenFirebaseAppCheckDeviceCheckConfigTokenTtl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenFirebaseAppCheckDeviceCheckConfigKeyId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenFirebaseAppCheckDeviceCheckConfigPrivateKeySet(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandFirebaseAppCheckDeviceCheckConfigTokenTtl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandFirebaseAppCheckDeviceCheckConfigKeyId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandFirebaseAppCheckDeviceCheckConfigPrivateKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/google/services/firebaseappcheck/resource_firebase_app_check_device_check_config_test.go b/google/services/firebaseappcheck/resource_firebase_app_check_device_check_config_test.go new file mode 100644 index 00000000000..d75b76cbd65 --- /dev/null +++ b/google/services/firebaseappcheck/resource_firebase_app_check_device_check_config_test.go @@ -0,0 +1,3 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package firebaseappcheck_test diff --git a/google/services/firebaseappcheck/test-fixtures/private-key-2.p8 b/google/services/firebaseappcheck/test-fixtures/private-key-2.p8 new file mode 100644 index 00000000000..f581bea0e05 --- /dev/null +++ b/google/services/firebaseappcheck/test-fixtures/private-key-2.p8 @@ -0,0 +1,15 @@ +-----BEGIN PRIVATE KEY----- +MIICWwIBAAKBgQCVA/2LQtUYJI8KlNHWzNPzGzVv01qavSbmuW0QYjshxRnXDBk+ +fWZePJAmsyuhU4Y2SkM5Wqvgjo/rDPaRPdTiEtKQuNesRgQeOVmAWDkIXEiieTwb +RYuXbdpZhH86Vt6xOMt14tGPKE5VuuySvTqgQRCvRTylrF3koBc0d/8NVQIDAQAB +AoGAG7qBXH+ULYjoAR0OKv00V2FxwRxAGNknuvk4HTtaK3+Evmpm7CTjfpegb0MZ +1Ew5hjKtbae8oe2FRETGQOKTkS68I/D9PGP4aTzmSkf6PjwXwhlBYp09xxv4nmxV +BCbsoicNMvdk0F7SPblnZBO9i0DpZ8pT9wyPo8QzWBfi5IECQQD8gIOja3Zim4R9 +HVL7Blvhzhl2ibuITV2PKfQ11v0a+Om+rZKwdrhxKgWoguDvvP7ExWSPTZJKSm0J +bzhU+APhAkEAlxR3fY+zSpxHaxbOqZ6cea5cZtyHcX607nW8N037yBErIjcJKL65 +gHx9Vq1Xo24o4C6kyzmh00BnkyXul4439QJAPWvtmaUcaSQ3eE/XzaRgWW9PFlyu +t5tKNPcZprcjXppKEc4bLr3SZAS616DuoqKwvqDds1ZFTbkJCRB6/YBPQQJAeyGG +JYKJyKRIHMJw2pNXymBOFNNlXB29jp/ML3LSYwODGRar01ZmT46mhI8wXxV4IQZC +7xLgjhDumWIP69tQRQJAfuOy4TP2drxNI7fP7YenV1ks6CiLHcBN04f6NItWilTN +Cc+Mv/rio9xO56Yp9oePMaFT9QEzfO/cqX6QvyfblQ== +-----END PRIVATE KEY----- \ No newline at end of file diff --git a/google/services/firebaseappcheck/test-fixtures/private-key.p8 b/google/services/firebaseappcheck/test-fixtures/private-key.p8 new file mode 100644 index 00000000000..d48a562a97b --- /dev/null +++ b/google/services/firebaseappcheck/test-fixtures/private-key.p8 @@ -0,0 +1,15 @@ +-----BEGIN PRIVATE KEY----- +MIICXAIBAAKBgG3vDegwb8uUvns/Iuodo/cNK0eMHxqb+2n16dQnxL7az+ShNWKQ +jTSzXY5y4VexrTdPEU5ZiTPONZXyl4/iFvOnyFxnC6Zjyr+xeIU5X4TmjYq0yCuZ +xbovAWw+E4KUKt1V62avd+hGZHPtCKLfV/uYITG7I8R+GyEAdMoaXP8JAgMBAAEC +gYBsQFf7aabMWx3uks84phNNNOSHQY1nUG2Te5nTVY3BOgo44h2Ffz3sxSq9GJaZ +GdatfehWtIgMQWQ20Xk5L7LUzSxmndHbUIzYU17xZrAsgmjYTwvAQ13If2L6S+pz +EUbTLkMnlbAgvtJ2AqZZZ3LE41N9ey60gVB1cCu9fCXLuQJBANAeoDXXvh7nXdyN +Zd84zXpSk8SLmAmQn1JB7z3oPkrZ0dG42GMOzYw9MP8n2bATHV+OB0/gdUFJAYYp +kwz+bJ8CQQCHObHelAlkH3or+JVECpovNMHs2UGB6yF1ZX0Nep3iR90fhi3BsnVo +IQGdHlQC2NL+iaBF4Mv2/dfZTen1vMtXAkEAk7+KQW8+G7ZpXjBLyCMNTO/e08O+ +VdwEH2OLsslzn7PvTxIJHJnfttWiOSJTWrrXOYUdD8nrtENd/574NFtTRQJAaExD +uJ0NsT/mB0wwNM7IpWhXusrHD+G/aMDidyb/56vuDYZ8fE2c6LesevcNbTS3aMPV +7o+4QcUAWwcRUQxQ+QJBAJEAwwzFnLJtrFYEnz7YNufgjiMrX7CBJCwrXGZpZrHX +EdDDOGiLrm871hc3tNQWmzou9AFIwZFeIOXVdIHIQzk= +-----END PRIVATE KEY----- \ No newline at end of file diff --git a/google/services/firestore/resource_firestore_field_generated_test.go b/google/services/firestore/resource_firestore_field_generated_test.go index bea60779b28..f5ebb41935e 100644 --- a/google/services/firestore/resource_firestore_field_generated_test.go +++ b/google/services/firestore/resource_firestore_field_generated_test.go @@ -24,6 +24,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "google.golang.org/api/googleapi" "github.com/hashicorp/terraform-provider-google/google/acctest" "github.com/hashicorp/terraform-provider-google/google/envvar" @@ -231,6 +232,15 @@ func testAccCheckFirestoreFieldDestroyProducer(t *testing.T) func(s *terraform.S UserAgent: config.UserAgent, }) if err != nil { + e := err.(*googleapi.Error) + if e.Code == 403 && strings.Contains(e.Message, "Cloud Firestore API has not been used in project") { + // The acceptance test has provisioned the resources under test in a new project, and the destory check is seeing the + // effects of the project not existing. This means the service isn't enabled, and that the resource is definitely destroyed. + // We do not return the error in this case - destroy was successful + return nil + } + + // Return err in all other cases return err } diff --git a/google/services/firestore/resource_firestore_field_test.go b/google/services/firestore/resource_firestore_field_test.go index 3d431c80345..abcc602a052 100644 --- a/google/services/firestore/resource_firestore_field_test.go +++ b/google/services/firestore/resource_firestore_field_test.go @@ -105,7 +105,11 @@ resource "google_firestore_database" "database" { location_id = "nam5" type = "FIRESTORE_NATIVE" - depends_on = [google_project_service.firestore] + # used to control delete order + depends_on = [ + google_project_service.firestore, + google_project.project + ] } `, context) } else { @@ -117,7 +121,7 @@ resource "google_firestore_database" "database" { type = "FIRESTORE_NATIVE" delete_protection_state = "DELETE_PROTECTION_DISABLED" - deletion_policy = "DELETE" + deletion_policy = "DELETE" } `, context) } diff --git a/google/services/gkebackup/resource_gke_backup_backup_plan_sweeper.go b/google/services/gkebackup/resource_gke_backup_backup_plan_sweeper.go new file mode 100644 index 00000000000..f29d5fe877a --- /dev/null +++ b/google/services/gkebackup/resource_gke_backup_backup_plan_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package gkebackup + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("GKEBackupBackupPlan", testSweepGKEBackupBackupPlan) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepGKEBackupBackupPlan(region string) error { + resourceName := "GKEBackupBackupPlan" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://gkebackup.googleapis.com/v1/projects/{{project}}/locations/{{location}}/backupPlans", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["backupPlans"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://gkebackup.googleapis.com/v1/projects/{{project}}/locations/{{location}}/backupPlans/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/google/services/gkebackup/resource_gke_backup_restore_plan_sweeper.go b/google/services/gkebackup/resource_gke_backup_restore_plan_sweeper.go new file mode 100644 index 00000000000..489027abd53 --- /dev/null +++ b/google/services/gkebackup/resource_gke_backup_restore_plan_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package gkebackup + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("GKEBackupRestorePlan", testSweepGKEBackupRestorePlan) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepGKEBackupRestorePlan(region string) error { + resourceName := "GKEBackupRestorePlan" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://gkebackup.googleapis.com/v1/projects/{{project}}/locations/{{location}}/restorePlans", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["restorePlans"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://gkebackup.googleapis.com/v1/projects/{{project}}/locations/{{location}}/restorePlans/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/google/services/gkehub/resource_gke_hub_feature_membership.go b/google/services/gkehub/resource_gke_hub_feature_membership.go index 57a73388f74..1fdce92b3dd 100644 --- a/google/services/gkehub/resource_gke_hub_feature_membership.go +++ b/google/services/gkehub/resource_gke_hub_feature_membership.go @@ -461,6 +461,15 @@ func GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigSchema() *s Description: "The maximum number of audit violations to be stored in a constraint. If not set, the internal default of 20 will be used.", }, + "deployment_configs": { + Type: schema.TypeSet, + Computed: true, + Optional: true, + Description: "Map of deployment configs to deployments (\"admission\", \"audit\", \"mutation\").", + Elem: GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsSchema(), + Set: schema.HashResource(GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsSchema()), + }, + "exemptable_namespaces": { Type: schema.TypeList, Optional: true, @@ -513,6 +522,133 @@ func GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigSchema() *s } } +func GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "component_name": { + Type: schema.TypeString, + Required: true, + Description: "The name for the key in the map for which this object is mapped to in the API", + }, + + "container_resources": { + Type: schema.TypeList, + Optional: true, + Description: "Container resource requirements.", + MaxItems: 1, + Elem: GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesSchema(), + }, + + "pod_affinity": { + Type: schema.TypeString, + Optional: true, + Description: "Pod affinity configuration. Possible values: AFFINITY_UNSPECIFIED, NO_AFFINITY, ANTI_AFFINITY", + }, + + "pod_tolerations": { + Type: schema.TypeList, + Optional: true, + Description: "Pod tolerations of node taints.", + Elem: GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsSchema(), + }, + + "replica_count": { + Type: schema.TypeInt, + Optional: true, + Description: "Pod replica count.", + }, + }, + } +} + +func GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "limits": { + Type: schema.TypeList, + Optional: true, + Description: "Limits describes the maximum amount of compute resources allowed for use by the running container.", + MaxItems: 1, + Elem: GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimitsSchema(), + }, + + "requests": { + Type: schema.TypeList, + Optional: true, + Description: "Requests describes the amount of compute resources reserved for the container by the kube-scheduler.", + MaxItems: 1, + Elem: GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequestsSchema(), + }, + }, + } +} + +func GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimitsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cpu": { + Type: schema.TypeString, + Optional: true, + Description: "CPU requirement expressed in Kubernetes resource units.", + }, + + "memory": { + Type: schema.TypeString, + Optional: true, + Description: "Memory requirement expressed in Kubernetes resource units.", + }, + }, + } +} + +func GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequestsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cpu": { + Type: schema.TypeString, + Optional: true, + Description: "CPU requirement expressed in Kubernetes resource units.", + }, + + "memory": { + Type: schema.TypeString, + Optional: true, + Description: "Memory requirement expressed in Kubernetes resource units.", + }, + }, + } +} + +func GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "effect": { + Type: schema.TypeString, + Optional: true, + Description: "Matches a taint effect.", + }, + + "key": { + Type: schema.TypeString, + Optional: true, + Description: "Matches a taint key (not necessarily unique).", + }, + + "operator": { + Type: schema.TypeString, + Optional: true, + Description: "Matches a taint operator.", + }, + + "value": { + Type: schema.TypeString, + Optional: true, + Description: "Matches a taint value.", + }, + }, + } +} + func GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringSchema() *schema.Resource { return &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -530,6 +666,14 @@ func GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringS func GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentSchema() *schema.Resource { return &schema.Resource{ Schema: map[string]*schema.Schema{ + "bundles": { + Type: schema.TypeSet, + Optional: true, + Description: "map of bundle name to BundleInstallSpec. The bundle name maps to the `bundleName` key in the `policycontroller.gke.io/constraintData` annotation on a constraint.", + Elem: GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesSchema(), + Set: schema.HashResource(GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesSchema()), + }, + "template_library": { Type: schema.TypeList, Computed: true, @@ -542,6 +686,25 @@ func GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyConte } } +func GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bundle_name": { + Type: schema.TypeString, + Required: true, + Description: "The name for the key in the map for which this object is mapped to in the API", + }, + + "exempted_namespaces": { + Type: schema.TypeList, + Optional: true, + Description: "The set of namespaces to be exempted from the bundle.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + func GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrarySchema() *schema.Resource { return &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -1141,6 +1304,7 @@ func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfig(o in return &gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfig{ AuditIntervalSeconds: dcl.Int64(int64(obj["audit_interval_seconds"].(int))), ConstraintViolationLimit: dcl.Int64(int64(obj["constraint_violation_limit"].(int))), + DeploymentConfigs: expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsMap(obj["deployment_configs"]), ExemptableNamespaces: tpgdclresource.ExpandStringArray(obj["exemptable_namespaces"]), InstallSpec: gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnumRef(obj["install_spec"].(string)), LogDeniesEnabled: dcl.Bool(obj["log_denies_enabled"].(bool)), @@ -1158,6 +1322,7 @@ func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfig(obj transformed := map[string]interface{}{ "audit_interval_seconds": obj.AuditIntervalSeconds, "constraint_violation_limit": obj.ConstraintViolationLimit, + "deployment_configs": flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsMap(obj.DeploymentConfigs), "exemptable_namespaces": obj.ExemptableNamespaces, "install_spec": obj.InstallSpec, "log_denies_enabled": obj.LogDeniesEnabled, @@ -1171,6 +1336,219 @@ func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfig(obj } +func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsMap(o interface{}) map[string]gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs { + if o == nil { + return nil + } + + o = o.(*schema.Set).List() + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return nil + } + + items := make(map[string]gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs) + for _, item := range objs { + i := expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs(item) + if item != nil { + items[item.(map[string]interface{})["component_name"].(string)] = *i + } + } + + return items +} + +func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs(o interface{}) *gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs { + if o == nil { + return nil + } + + obj := o.(map[string]interface{}) + return &gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs{ + ContainerResources: expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources(obj["container_resources"]), + PodAffinity: gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnumRef(obj["pod_affinity"].(string)), + PodTolerations: expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsArray(obj["pod_tolerations"]), + ReplicaCount: dcl.Int64(int64(obj["replica_count"].(int))), + } +} + +func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsMap(objs map[string]gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for name, item := range objs { + i := flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs(&item, name) + items = append(items, i) + } + + return items +} + +func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs(obj *gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs, name string) interface{} { + if obj == nil { + return nil + } + transformed := map[string]interface{}{ + "container_resources": flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources(obj.ContainerResources), + "pod_affinity": obj.PodAffinity, + "pod_tolerations": flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsArray(obj.PodTolerations), + "replica_count": obj.ReplicaCount, + } + + transformed["component_name"] = name + + return transformed + +} + +func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources(o interface{}) *gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources { + if o == nil { + return gkehub.EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return gkehub.EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources + } + obj := objArr[0].(map[string]interface{}) + return &gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources{ + Limits: expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits(obj["limits"]), + Requests: expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests(obj["requests"]), + } +} + +func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources(obj *gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "limits": flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits(obj.Limits), + "requests": flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests(obj.Requests), + } + + return []interface{}{transformed} + +} + +func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits(o interface{}) *gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits { + if o == nil { + return gkehub.EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return gkehub.EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits + } + obj := objArr[0].(map[string]interface{}) + return &gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits{ + Cpu: dcl.String(obj["cpu"].(string)), + Memory: dcl.String(obj["memory"].(string)), + } +} + +func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits(obj *gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "cpu": obj.Cpu, + "memory": obj.Memory, + } + + return []interface{}{transformed} + +} + +func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests(o interface{}) *gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests { + if o == nil { + return gkehub.EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return gkehub.EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests + } + obj := objArr[0].(map[string]interface{}) + return &gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests{ + Cpu: dcl.String(obj["cpu"].(string)), + Memory: dcl.String(obj["memory"].(string)), + } +} + +func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests(obj *gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "cpu": obj.Cpu, + "memory": obj.Memory, + } + + return []interface{}{transformed} + +} +func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsArray(o interface{}) []gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations { + if o == nil { + return make([]gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return make([]gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations, 0) + } + + items := make([]gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations, 0, len(objs)) + for _, item := range objs { + i := expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations(item) + items = append(items, *i) + } + + return items +} + +func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations(o interface{}) *gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations { + if o == nil { + return gkehub.EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations + } + + obj := o.(map[string]interface{}) + return &gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations{ + Effect: dcl.String(obj["effect"].(string)), + Key: dcl.String(obj["key"].(string)), + Operator: dcl.String(obj["operator"].(string)), + Value: dcl.String(obj["value"].(string)), + } +} + +func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsArray(objs []gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations(&item) + items = append(items, i) + } + + return items +} + +func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations(obj *gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "effect": obj.Effect, + "key": obj.Key, + "operator": obj.Operator, + "value": obj.Value, + } + + return transformed + +} + func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring(o interface{}) *gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring { if o == nil { return nil @@ -1207,6 +1585,7 @@ func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolic } obj := objArr[0].(map[string]interface{}) return &gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent{ + Bundles: expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesMap(obj["bundles"]), TemplateLibrary: expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary(obj["template_library"]), } } @@ -1216,6 +1595,7 @@ func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPoli return nil } transformed := map[string]interface{}{ + "bundles": flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesMap(obj.Bundles), "template_library": flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary(obj.TemplateLibrary), } @@ -1223,6 +1603,68 @@ func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPoli } +func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesMap(o interface{}) map[string]gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles { + if o == nil { + return make(map[string]gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles) + } + + o = o.(*schema.Set).List() + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return make(map[string]gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles) + } + + items := make(map[string]gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles) + for _, item := range objs { + i := expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles(item) + if item != nil { + items[item.(map[string]interface{})["bundle_name"].(string)] = *i + } + } + + return items +} + +func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles(o interface{}) *gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles { + if o == nil { + return gkehub.EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles + } + + obj := o.(map[string]interface{}) + return &gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles{ + ExemptedNamespaces: tpgdclresource.ExpandStringArray(obj["exempted_namespaces"]), + } +} + +func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesMap(objs map[string]gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for name, item := range objs { + i := flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles(&item, name) + items = append(items, i) + } + + return items +} + +func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles(obj *gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles, name string) interface{} { + if obj == nil { + return nil + } + transformed := map[string]interface{}{ + "exempted_namespaces": obj.ExemptedNamespaces, + } + + transformed["bundle_name"] = name + + return transformed + +} + func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary(o interface{}) *gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary { if o == nil { return nil diff --git a/google/services/gkehub/resource_gke_hub_feature_membership_test.go b/google/services/gkehub/resource_gke_hub_feature_membership_test.go index 75c1f3f707d..bb7da9d7538 100644 --- a/google/services/gkehub/resource_gke_hub_feature_membership_test.go +++ b/google/services/gkehub/resource_gke_hub_feature_membership_test.go @@ -991,6 +991,17 @@ func TestAccGKEHubFeatureMembership_gkehubFeaturePolicyController(t *testing.T) ImportState: true, ImportStateVerify: true, }, + { + Config: testAccGKEHubFeatureMembership_policycontrollerUpdateMaps(context), + Check: resource.ComposeTestCheckFunc( + testAccCheckGkeHubFeatureMembershipPresent(t, fmt.Sprintf("tf-test-gkehub%s", context["random_suffix"]), "global", "policycontroller", fmt.Sprintf("tf-test1%s", context["random_suffix"])), + ), + }, + { + ResourceName: "google_gke_hub_feature_membership.feature_member", + ImportState: true, + ImportStateVerify: true, + }, }, }) } @@ -1047,9 +1058,92 @@ resource "google_gke_hub_feature_membership" "feature_member" { "PROMETHEUS" ] } + deployment_configs { + component_name = "admission" + replica_count = 3 + pod_affinity = "ANTI_AFFINITY" + container_resources { + limits { + memory = "1Gi" + cpu = "1.5" + } + requests { + memory = "500Mi" + cpu = "150m" + } + } + pod_tolerations { + key = "key1" + operator = "Equal" + value = "value1" + effect = "NoSchedule" + } + } + deployment_configs { + component_name = "mutation" + replica_count = 3 + pod_affinity = "ANTI_AFFINITY" + } policy_content { template_library { - installation = "NOT_INSTALLED" + installation = "ALL" + } + bundles { + bundle_name = "pci-dss-v3.2.1" + exempted_namespaces = ["sample-namespace"] + } + bundles { + bundle_name = "nist-sp-800-190" + } + } + } + version = "1.17.0" + } +} +`, context) +} + +func testAccGKEHubFeatureMembership_policycontrollerUpdateMaps(context map[string]interface{}) string { + return gkeHubFeatureProjectSetup(context) + gkeHubClusterMembershipSetup(context) + acctest.Nprintf(` +resource "google_gke_hub_feature" "feature" { + project = google_project.project.project_id + name = "policycontroller" + location = "global" + depends_on = [google_project_service.container, google_project_service.gkehub, google_project_service.poco] +} + +resource "google_gke_hub_feature_membership" "feature_member" { + project = google_project.project.project_id + location = "global" + feature = google_gke_hub_feature.feature.name + membership = google_gke_hub_membership.membership.membership_id + policycontroller { + policy_controller_hub_config { + install_spec = "INSTALL_SPEC_SUSPENDED" + constraint_violation_limit = 50 + referential_rules_enabled = true + log_denies_enabled = true + mutation_enabled = true + monitoring { + backends = [ + "PROMETHEUS" + ] + } + deployment_configs { + component_name = "admission" + pod_affinity = "NO_AFFINITY" + } + deployment_configs { + component_name = "audit" + container_resources { + limits { + memory = "1Gi" + cpu = "1.5" + } + requests { + memory = "500Mi" + cpu = "150m" + } } } } diff --git a/google/services/gkehub2/iam_gke_hub_scope_generated_test.go b/google/services/gkehub2/iam_gke_hub_scope_generated_test.go index ee62301898e..1ffea5d419b 100644 --- a/google/services/gkehub2/iam_gke_hub_scope_generated_test.go +++ b/google/services/gkehub2/iam_gke_hub_scope_generated_test.go @@ -130,6 +130,11 @@ func testAccGKEHub2ScopeIamMember_basicGenerated(context map[string]interface{}) return acctest.Nprintf(` resource "google_gke_hub_scope" "scope" { scope_id = "tf-test-my-scope%{random_suffix}" + namespace_labels = { + keyb = "valueb" + keya = "valuea" + keyc = "valuec" + } labels = { keyb = "valueb" keya = "valuea" @@ -150,6 +155,11 @@ func testAccGKEHub2ScopeIamPolicy_basicGenerated(context map[string]interface{}) return acctest.Nprintf(` resource "google_gke_hub_scope" "scope" { scope_id = "tf-test-my-scope%{random_suffix}" + namespace_labels = { + keyb = "valueb" + keya = "valuea" + keyc = "valuec" + } labels = { keyb = "valueb" keya = "valuea" @@ -184,6 +194,11 @@ func testAccGKEHub2ScopeIamPolicy_emptyBinding(context map[string]interface{}) s return acctest.Nprintf(` resource "google_gke_hub_scope" "scope" { scope_id = "tf-test-my-scope%{random_suffix}" + namespace_labels = { + keyb = "valueb" + keya = "valuea" + keyc = "valuec" + } labels = { keyb = "valueb" keya = "valuea" @@ -206,6 +221,11 @@ func testAccGKEHub2ScopeIamBinding_basicGenerated(context map[string]interface{} return acctest.Nprintf(` resource "google_gke_hub_scope" "scope" { scope_id = "tf-test-my-scope%{random_suffix}" + namespace_labels = { + keyb = "valueb" + keya = "valuea" + keyc = "valuec" + } labels = { keyb = "valueb" keya = "valuea" @@ -226,6 +246,11 @@ func testAccGKEHub2ScopeIamBinding_updateGenerated(context map[string]interface{ return acctest.Nprintf(` resource "google_gke_hub_scope" "scope" { scope_id = "tf-test-my-scope%{random_suffix}" + namespace_labels = { + keyb = "valueb" + keya = "valuea" + keyc = "valuec" + } labels = { keyb = "valueb" keya = "valuea" diff --git a/google/services/gkehub2/resource_gke_hub_scope.go b/google/services/gkehub2/resource_gke_hub_scope.go index 7685840bb87..5e1d1b73411 100644 --- a/google/services/gkehub2/resource_gke_hub_scope.go +++ b/google/services/gkehub2/resource_gke_hub_scope.go @@ -70,6 +70,16 @@ func ResourceGKEHub2Scope() *schema.Resource { Please refer to the field 'effective_labels' for all of the labels present on the resource.`, Elem: &schema.Schema{Type: schema.TypeString}, }, + "namespace_labels": { + Type: schema.TypeMap, + Optional: true, + Description: `Scope-level cluster namespace labels. For the member clusters bound +to the Scope, these labels are applied to each namespace under the +Scope. Scope-level labels take precedence over Namespace-level +labels ('namespace_labels' in the Fleet Namespace resource) if they +share a key. Keys and values must be Kubernetes-conformant.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, "create_time": { Type: schema.TypeString, Computed: true, @@ -141,6 +151,12 @@ func resourceGKEHub2ScopeCreate(d *schema.ResourceData, meta interface{}) error } obj := make(map[string]interface{}) + namespaceLabelsProp, err := expandGKEHub2ScopeNamespaceLabels(d.Get("namespace_labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("namespace_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(namespaceLabelsProp)) && (ok || !reflect.DeepEqual(v, namespaceLabelsProp)) { + obj["namespaceLabels"] = namespaceLabelsProp + } labelsProp, err := expandGKEHub2ScopeEffectiveLabels(d.Get("effective_labels"), d, config) if err != nil { return err @@ -274,6 +290,9 @@ func resourceGKEHub2ScopeRead(d *schema.ResourceData, meta interface{}) error { if err := d.Set("state", flattenGKEHub2ScopeState(res["state"], d, config)); err != nil { return fmt.Errorf("Error reading Scope: %s", err) } + if err := d.Set("namespace_labels", flattenGKEHub2ScopeNamespaceLabels(res["namespaceLabels"], d, config)); err != nil { + return fmt.Errorf("Error reading Scope: %s", err) + } if err := d.Set("labels", flattenGKEHub2ScopeLabels(res["labels"], d, config)); err != nil { return fmt.Errorf("Error reading Scope: %s", err) } @@ -303,6 +322,12 @@ func resourceGKEHub2ScopeUpdate(d *schema.ResourceData, meta interface{}) error billingProject = project obj := make(map[string]interface{}) + namespaceLabelsProp, err := expandGKEHub2ScopeNamespaceLabels(d.Get("namespace_labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("namespace_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, namespaceLabelsProp)) { + obj["namespaceLabels"] = namespaceLabelsProp + } labelsProp, err := expandGKEHub2ScopeEffectiveLabels(d.Get("effective_labels"), d, config) if err != nil { return err @@ -318,6 +343,10 @@ func resourceGKEHub2ScopeUpdate(d *schema.ResourceData, meta interface{}) error log.Printf("[DEBUG] Updating Scope %q: %#v", d.Id(), obj) updateMask := []string{} + if d.HasChange("namespace_labels") { + updateMask = append(updateMask, "namespaceLabels") + } + if d.HasChange("effective_labels") { updateMask = append(updateMask, "labels") } @@ -473,6 +502,10 @@ func flattenGKEHub2ScopeStateCode(v interface{}, d *schema.ResourceData, config return v } +func flattenGKEHub2ScopeNamespaceLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenGKEHub2ScopeLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v @@ -507,6 +540,17 @@ func flattenGKEHub2ScopeEffectiveLabels(v interface{}, d *schema.ResourceData, c return v } +func expandGKEHub2ScopeNamespaceLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + func expandGKEHub2ScopeEffectiveLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil diff --git a/google/services/gkehub2/resource_gke_hub_scope_generated_test.go b/google/services/gkehub2/resource_gke_hub_scope_generated_test.go index 92d2fe6d434..5aa2b0be7bc 100644 --- a/google/services/gkehub2/resource_gke_hub_scope_generated_test.go +++ b/google/services/gkehub2/resource_gke_hub_scope_generated_test.go @@ -61,6 +61,11 @@ func testAccGKEHub2Scope_gkehubScopeBasicExample(context map[string]interface{}) return acctest.Nprintf(` resource "google_gke_hub_scope" "scope" { scope_id = "tf-test-my-scope%{random_suffix}" + namespace_labels = { + keyb = "valueb" + keya = "valuea" + keyc = "valuec" + } labels = { keyb = "valueb" keya = "valuea" diff --git a/google/services/gkehub2/resource_gke_hub_scope_test.go b/google/services/gkehub2/resource_gke_hub_scope_test.go index 30dc3e4111e..9112da64e7d 100644 --- a/google/services/gkehub2/resource_gke_hub_scope_test.go +++ b/google/services/gkehub2/resource_gke_hub_scope_test.go @@ -49,6 +49,11 @@ func testAccGKEHub2Scope_gkehubScopeBasicExample_basic(context map[string]interf return acctest.Nprintf(` resource "google_gke_hub_scope" "scope" { scope_id = "tf-test-scope%{random_suffix}" + namespace_labels = { + keyb = "valueb" + keya = "valuea" + keyc = "valuec" + } labels = { keyb = "valueb" keya = "valuea" @@ -62,6 +67,11 @@ func testAccGKEHub2Scope_gkehubScopeBasicExample_update(context map[string]inter return acctest.Nprintf(` resource "google_gke_hub_scope" "scope" { scope_id = "tf-test-scope%{random_suffix}" + namespace_labels = { + updated_keyb = "updated_valueb" + updated_keya = "updated_valuea" + updated_keyc = "updated_valuec" + } labels = { updated_keyb = "updated_valueb" updated_keya = "updated_valuea" diff --git a/google/services/gkeonprem/resource_gkeonprem_vmware_cluster.go b/google/services/gkeonprem/resource_gkeonprem_vmware_cluster.go index 1307678f107..3c0271e78df 100644 --- a/google/services/gkeonprem/resource_gkeonprem_vmware_cluster.go +++ b/google/services/gkeonprem/resource_gkeonprem_vmware_cluster.go @@ -591,6 +591,8 @@ used for VMware user clusters.`, "vcenter_network": { Type: schema.TypeString, Computed: true, + Optional: true, + ForceNew: true, Description: `vcenter_network specifies vCenter network name. Inherited from the admin cluster.`, }, }, diff --git a/google/services/gkeonprem/resource_gkeonprem_vmware_cluster_generated_test.go b/google/services/gkeonprem/resource_gkeonprem_vmware_cluster_generated_test.go index 28d61275d0a..705280090be 100644 --- a/google/services/gkeonprem/resource_gkeonprem_vmware_cluster_generated_test.go +++ b/google/services/gkeonprem/resource_gkeonprem_vmware_cluster_generated_test.go @@ -150,6 +150,7 @@ resource "google_gkeonprem_vmware_cluster" "cluster-f5lb" { gateway="test-gateway" } } + vcenter_network = "test-vcenter-network" } control_plane_node { cpus = 4 diff --git a/google/services/iap/iam_iap_app_engine_service_generated_test.go b/google/services/iap/iam_iap_app_engine_service_generated_test.go index 4f02ba935b2..679d714975e 100644 --- a/google/services/iap/iam_iap_app_engine_service_generated_test.go +++ b/google/services/iap/iam_iap_app_engine_service_generated_test.go @@ -400,7 +400,7 @@ resource "google_app_engine_standard_app_version" "version" { project = google_app_engine_application.app.project version_id = "v2" service = "default" - runtime = "nodejs10" + runtime = "nodejs20" noop_on_destroy = true // TODO: Removed basic scaling once automatic_scaling refresh behavior is fixed. @@ -470,7 +470,7 @@ resource "google_app_engine_standard_app_version" "version" { project = google_app_engine_application.app.project version_id = "v2" service = "default" - runtime = "nodejs10" + runtime = "nodejs20" noop_on_destroy = true // TODO: Removed basic scaling once automatic_scaling refresh behavior is fixed. @@ -555,7 +555,7 @@ resource "google_app_engine_standard_app_version" "version" { project = google_app_engine_application.app.project version_id = "v2" service = "default" - runtime = "nodejs10" + runtime = "nodejs20" noop_on_destroy = true // TODO: Removed basic scaling once automatic_scaling refresh behavior is fixed. @@ -627,7 +627,7 @@ resource "google_app_engine_standard_app_version" "version" { project = google_app_engine_application.app.project version_id = "v2" service = "default" - runtime = "nodejs10" + runtime = "nodejs20" noop_on_destroy = true // TODO: Removed basic scaling once automatic_scaling refresh behavior is fixed. @@ -697,7 +697,7 @@ resource "google_app_engine_standard_app_version" "version" { project = google_app_engine_application.app.project version_id = "v2" service = "default" - runtime = "nodejs10" + runtime = "nodejs20" noop_on_destroy = true // TODO: Removed basic scaling once automatic_scaling refresh behavior is fixed. @@ -767,7 +767,7 @@ resource "google_app_engine_standard_app_version" "version" { project = google_app_engine_application.app.project version_id = "v2" service = "default" - runtime = "nodejs10" + runtime = "nodejs20" noop_on_destroy = true // TODO: Removed basic scaling once automatic_scaling refresh behavior is fixed. @@ -842,7 +842,7 @@ resource "google_app_engine_standard_app_version" "version" { project = google_app_engine_application.app.project version_id = "v2" service = "default" - runtime = "nodejs10" + runtime = "nodejs20" noop_on_destroy = true // TODO: Removed basic scaling once automatic_scaling refresh behavior is fixed. @@ -939,7 +939,7 @@ resource "google_app_engine_standard_app_version" "version" { project = google_app_engine_application.app.project version_id = "v2" service = "default" - runtime = "nodejs10" + runtime = "nodejs20" noop_on_destroy = true // TODO: Removed basic scaling once automatic_scaling refresh behavior is fixed. @@ -1014,7 +1014,7 @@ resource "google_app_engine_standard_app_version" "version" { project = google_app_engine_application.app.project version_id = "v2" service = "default" - runtime = "nodejs10" + runtime = "nodejs20" noop_on_destroy = true // TODO: Removed basic scaling once automatic_scaling refresh behavior is fixed. @@ -1111,7 +1111,7 @@ resource "google_app_engine_standard_app_version" "version" { project = google_app_engine_application.app.project version_id = "v2" service = "default" - runtime = "nodejs10" + runtime = "nodejs20" noop_on_destroy = true // TODO: Removed basic scaling once automatic_scaling refresh behavior is fixed. diff --git a/google/services/iap/iam_iap_app_engine_version_generated_test.go b/google/services/iap/iam_iap_app_engine_version_generated_test.go index 8759040f250..553bf97bbc7 100644 --- a/google/services/iap/iam_iap_app_engine_version_generated_test.go +++ b/google/services/iap/iam_iap_app_engine_version_generated_test.go @@ -344,7 +344,7 @@ resource "google_storage_bucket_object" "object" { resource "google_app_engine_standard_app_version" "version" { version_id = "%{random_suffix}" service = "default" - runtime = "nodejs10" + runtime = "nodejs20" noop_on_destroy = false entrypoint { @@ -387,7 +387,7 @@ resource "google_storage_bucket_object" "object" { resource "google_app_engine_standard_app_version" "version" { version_id = "%{random_suffix}" service = "default" - runtime = "nodejs10" + runtime = "nodejs20" noop_on_destroy = false entrypoint { @@ -446,7 +446,7 @@ resource "google_storage_bucket_object" "object" { resource "google_app_engine_standard_app_version" "version" { version_id = "%{random_suffix}" service = "default" - runtime = "nodejs10" + runtime = "nodejs20" noop_on_destroy = false entrypoint { @@ -491,7 +491,7 @@ resource "google_storage_bucket_object" "object" { resource "google_app_engine_standard_app_version" "version" { version_id = "%{random_suffix}" service = "default" - runtime = "nodejs10" + runtime = "nodejs20" noop_on_destroy = false entrypoint { @@ -534,7 +534,7 @@ resource "google_storage_bucket_object" "object" { resource "google_app_engine_standard_app_version" "version" { version_id = "%{random_suffix}" service = "default" - runtime = "nodejs10" + runtime = "nodejs20" noop_on_destroy = false entrypoint { @@ -577,7 +577,7 @@ resource "google_storage_bucket_object" "object" { resource "google_app_engine_standard_app_version" "version" { version_id = "%{random_suffix}" service = "default" - runtime = "nodejs10" + runtime = "nodejs20" noop_on_destroy = false entrypoint { @@ -625,7 +625,7 @@ resource "google_storage_bucket_object" "object" { resource "google_app_engine_standard_app_version" "version" { version_id = "%{random_suffix}" service = "default" - runtime = "nodejs10" + runtime = "nodejs20" noop_on_destroy = false entrypoint { @@ -697,7 +697,7 @@ resource "google_storage_bucket_object" "object" { resource "google_app_engine_standard_app_version" "version" { version_id = "%{random_suffix}" service = "default" - runtime = "nodejs10" + runtime = "nodejs20" noop_on_destroy = false entrypoint { @@ -745,7 +745,7 @@ resource "google_storage_bucket_object" "object" { resource "google_app_engine_standard_app_version" "version" { version_id = "%{random_suffix}" service = "default" - runtime = "nodejs10" + runtime = "nodejs20" noop_on_destroy = false entrypoint { @@ -817,7 +817,7 @@ resource "google_storage_bucket_object" "object" { resource "google_app_engine_standard_app_version" "version" { version_id = "%{random_suffix}" service = "default" - runtime = "nodejs10" + runtime = "nodejs20" noop_on_destroy = false entrypoint { diff --git a/google/services/kms/resource_kms_crypto_key_test.go b/google/services/kms/resource_kms_crypto_key_test.go index 0b16cf2a738..336d2236435 100644 --- a/google/services/kms/resource_kms_crypto_key_test.go +++ b/google/services/kms/resource_kms_crypto_key_test.go @@ -446,6 +446,35 @@ func TestAccKmsCryptoKeyVersion_basic(t *testing.T) { }) } +func TestAccKmsCryptoKeyVersionWithSymmetricHSM(t *testing.T) { + t.Parallel() + + projectId := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + projectOrg := envvar.GetTestOrgFromEnv(t) + projectBillingAccount := envvar.GetTestBillingAccountFromEnv(t) + keyRingName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + cryptoKeyName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testGoogleKmsCryptoKeyVersionWithSymmetricHSM(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName), + }, + { + ResourceName: "google_kms_crypto_key_version.crypto_key_version", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testGoogleKmsCryptoKeyVersion_removed(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName), + }, + }, + }) +} + func TestAccKmsCryptoKeyVersion_skipInitialVersion(t *testing.T) { t.Parallel() @@ -749,6 +778,44 @@ resource "google_kms_crypto_key_version" "crypto_key_version" { `, projectId, projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName) } +func testGoogleKmsCryptoKeyVersionWithSymmetricHSM(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + name = "%s" + project_id = "%s" + org_id = "%s" + billing_account = "%s" +} + +resource "google_project_service" "acceptance" { + project = google_project.acceptance.project_id + service = "cloudkms.googleapis.com" +} + +resource "google_kms_key_ring" "key_ring" { + project = google_project_service.acceptance.project + name = "%s" + location = "us-central1" +} + +resource "google_kms_crypto_key" "crypto_key" { + name = "%s" + key_ring = google_kms_key_ring.key_ring.id + labels = { + key = "value" + } + version_template { + algorithm = "GOOGLE_SYMMETRIC_ENCRYPTION" + protection_level = "HSM" + } +} + +resource "google_kms_crypto_key_version" "crypto_key_version" { + crypto_key = google_kms_crypto_key.crypto_key.id +} +`, projectId, projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName) +} + func testGoogleKmsCryptoKeyVersion_removed(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName string) string { return fmt.Sprintf(` resource "google_project" "acceptance" { diff --git a/google/services/kms/resource_kms_crypto_key_version.go b/google/services/kms/resource_kms_crypto_key_version.go index 9cd8eaba3f4..34fa11a6f6f 100644 --- a/google/services/kms/resource_kms_crypto_key_version.go +++ b/google/services/kms/resource_kms_crypto_key_version.go @@ -83,19 +83,28 @@ Only provided for key versions with protectionLevel HSM.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "cavium_certs": { - Type: schema.TypeString, + Type: schema.TypeList, Optional: true, Description: `Cavium certificate chain corresponding to the attestation.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, }, "google_card_certs": { - Type: schema.TypeString, + Type: schema.TypeList, Optional: true, Description: `Google card certificate chain corresponding to the attestation.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, }, "google_partition_certs": { - Type: schema.TypeString, + Type: schema.TypeList, Optional: true, Description: `Google partition certificate chain corresponding to the attestation.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, }, }, }, diff --git a/google/services/kms/resource_kms_ekm_connection.go b/google/services/kms/resource_kms_ekm_connection.go new file mode 100644 index 00000000000..68709d22265 --- /dev/null +++ b/google/services/kms/resource_kms_ekm_connection.go @@ -0,0 +1,766 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package kms + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceKMSEkmConnection() *schema.Resource { + return &schema.Resource{ + Create: resourceKMSEkmConnectionCreate, + Read: resourceKMSEkmConnectionRead, + Update: resourceKMSEkmConnectionUpdate, + Delete: resourceKMSEkmConnectionDelete, + + Importer: &schema.ResourceImporter{ + State: resourceKMSEkmConnectionImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + ), + + Schema: map[string]*schema.Schema{ + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The location for the EkmConnection. +A full list of valid locations can be found by running 'gcloud kms locations list'.`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareResourceNames, + Description: `The resource name for the EkmConnection.`, + }, + "service_resolvers": { + Type: schema.TypeList, + Required: true, + Description: `A list of ServiceResolvers where the EKM can be reached. There should be one ServiceResolver per EKM replica. Currently, only a single ServiceResolver is supported`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "hostname": { + Type: schema.TypeString, + Required: true, + Description: `Required. The hostname of the EKM replica used at TLS and HTTP layers.`, + }, + "server_certificates": { + Type: schema.TypeList, + Required: true, + Description: `Required. A list of leaf server certificates used to authenticate HTTPS connections to the EKM replica. Currently, a maximum of 10 Certificate is supported.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "raw_der": { + Type: schema.TypeString, + Required: true, + Description: `Required. The raw certificate bytes in DER format. A base64-encoded string.`, + }, + "issuer": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The issuer distinguished name in RFC 2253 format. Only present if parsed is true.`, + }, + "not_after_time": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The certificate is not valid after this time. Only present if parsed is true. +A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + "not_before_time": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The certificate is not valid before this time. Only present if parsed is true. +A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + "parsed": { + Type: schema.TypeBool, + Computed: true, + Description: `Output only. True if the certificate was parsed successfully.`, + }, + "serial_number": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The certificate serial number as a hex string. Only present if parsed is true.`, + }, + "sha256_fingerprint": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The SHA-256 certificate fingerprint as a hex string. Only present if parsed is true.`, + }, + "subject": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The subject distinguished name in RFC 2253 format. Only present if parsed is true.`, + }, + "subject_alternative_dns_names": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `Output only. The subject Alternative DNS names. Only present if parsed is true.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "service_directory_service": { + Type: schema.TypeString, + Required: true, + Description: `Required. The resource name of the Service Directory service pointing to an EKM replica, in the format projects/*/locations/*/namespaces/*/services/*`, + }, + "endpoint_filter": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `Optional. The filter applied to the endpoints of the resolved service. If no filter is specified, all endpoints will be considered. An endpoint will be chosen arbitrarily from the filtered list for each request. For endpoint filter syntax and examples, see https://cloud.google.com/service-directory/docs/reference/rpc/google.cloud.servicedirectory.v1#resolveservicerequest.`, + }, + }, + }, + }, + "crypto_space_path": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `Optional. Identifies the EKM Crypto Space that this EkmConnection maps to. Note: This field is required if KeyManagementMode is CLOUD_KMS.`, + }, + "etag": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `Optional. Etag of the currently stored EkmConnection.`, + }, + "key_management_mode": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"MANUAL", "CLOUD_KMS", ""}), + Description: `Optional. Describes who can perform control plane operations on the EKM. If unset, this defaults to MANUAL Default value: "MANUAL" Possible values: ["MANUAL", "CLOUD_KMS"]`, + Default: "MANUAL", + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The time at which the EkmConnection was created. +A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceKMSEkmConnectionCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandKMSEkmConnectionName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + serviceResolversProp, err := expandKMSEkmConnectionServiceResolvers(d.Get("service_resolvers"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("service_resolvers"); !tpgresource.IsEmptyValue(reflect.ValueOf(serviceResolversProp)) && (ok || !reflect.DeepEqual(v, serviceResolversProp)) { + obj["serviceResolvers"] = serviceResolversProp + } + keyManagementModeProp, err := expandKMSEkmConnectionKeyManagementMode(d.Get("key_management_mode"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("key_management_mode"); !tpgresource.IsEmptyValue(reflect.ValueOf(keyManagementModeProp)) && (ok || !reflect.DeepEqual(v, keyManagementModeProp)) { + obj["keyManagementMode"] = keyManagementModeProp + } + etagProp, err := expandKMSEkmConnectionEtag(d.Get("etag"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("etag"); !tpgresource.IsEmptyValue(reflect.ValueOf(etagProp)) && (ok || !reflect.DeepEqual(v, etagProp)) { + obj["etag"] = etagProp + } + cryptoSpacePathProp, err := expandKMSEkmConnectionCryptoSpacePath(d.Get("crypto_space_path"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("crypto_space_path"); !tpgresource.IsEmptyValue(reflect.ValueOf(cryptoSpacePathProp)) && (ok || !reflect.DeepEqual(v, cryptoSpacePathProp)) { + obj["cryptoSpacePath"] = cryptoSpacePathProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{KMSBasePath}}projects/{{project}}/locations/{{location}}/ekmConnections?ekmConnectionId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new EkmConnection: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for EkmConnection: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating EkmConnection: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/ekmConnections/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating EkmConnection %q: %#v", d.Id(), res) + + return resourceKMSEkmConnectionRead(d, meta) +} + +func resourceKMSEkmConnectionRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{KMSBasePath}}projects/{{project}}/locations/{{location}}/ekmConnections/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for EkmConnection: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("KMSEkmConnection %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading EkmConnection: %s", err) + } + + if err := d.Set("name", flattenKMSEkmConnectionName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading EkmConnection: %s", err) + } + if err := d.Set("service_resolvers", flattenKMSEkmConnectionServiceResolvers(res["serviceResolvers"], d, config)); err != nil { + return fmt.Errorf("Error reading EkmConnection: %s", err) + } + if err := d.Set("key_management_mode", flattenKMSEkmConnectionKeyManagementMode(res["keyManagementMode"], d, config)); err != nil { + return fmt.Errorf("Error reading EkmConnection: %s", err) + } + if err := d.Set("etag", flattenKMSEkmConnectionEtag(res["etag"], d, config)); err != nil { + return fmt.Errorf("Error reading EkmConnection: %s", err) + } + if err := d.Set("crypto_space_path", flattenKMSEkmConnectionCryptoSpacePath(res["cryptoSpacePath"], d, config)); err != nil { + return fmt.Errorf("Error reading EkmConnection: %s", err) + } + if err := d.Set("create_time", flattenKMSEkmConnectionCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading EkmConnection: %s", err) + } + + return nil +} + +func resourceKMSEkmConnectionUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for EkmConnection: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + serviceResolversProp, err := expandKMSEkmConnectionServiceResolvers(d.Get("service_resolvers"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("service_resolvers"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, serviceResolversProp)) { + obj["serviceResolvers"] = serviceResolversProp + } + keyManagementModeProp, err := expandKMSEkmConnectionKeyManagementMode(d.Get("key_management_mode"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("key_management_mode"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, keyManagementModeProp)) { + obj["keyManagementMode"] = keyManagementModeProp + } + etagProp, err := expandKMSEkmConnectionEtag(d.Get("etag"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("etag"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, etagProp)) { + obj["etag"] = etagProp + } + cryptoSpacePathProp, err := expandKMSEkmConnectionCryptoSpacePath(d.Get("crypto_space_path"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("crypto_space_path"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, cryptoSpacePathProp)) { + obj["cryptoSpacePath"] = cryptoSpacePathProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{KMSBasePath}}projects/{{project}}/locations/{{location}}/ekmConnections/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating EkmConnection %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("service_resolvers") { + updateMask = append(updateMask, "serviceResolvers") + } + + if d.HasChange("key_management_mode") { + updateMask = append(updateMask, "keyManagementMode") + } + + if d.HasChange("etag") { + updateMask = append(updateMask, "etag") + } + + if d.HasChange("crypto_space_path") { + updateMask = append(updateMask, "cryptoSpacePath") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + // if updateMask is empty we are not updating anything so skip the post + if len(updateMask) > 0 { + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating EkmConnection %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating EkmConnection %q: %#v", d.Id(), res) + } + + } + + return resourceKMSEkmConnectionRead(d, meta) +} + +func resourceKMSEkmConnectionDelete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[WARNING] KMS EkmConnection resources"+ + " cannot be deleted from Google Cloud. The resource %s will be removed from Terraform"+ + " state, but will still be present on Google Cloud.", d.Id()) + d.SetId("") + + return nil +} + +func resourceKMSEkmConnectionImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "^projects/(?P[^/]+)/locations/(?P[^/]+)/ekmConnections/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)$", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/ekmConnections/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenKMSEkmConnectionName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func flattenKMSEkmConnectionServiceResolvers(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "service_directory_service": flattenKMSEkmConnectionServiceResolversServiceDirectoryService(original["serviceDirectoryService"], d, config), + "hostname": flattenKMSEkmConnectionServiceResolversHostname(original["hostname"], d, config), + "server_certificates": flattenKMSEkmConnectionServiceResolversServerCertificates(original["serverCertificates"], d, config), + "endpoint_filter": flattenKMSEkmConnectionServiceResolversEndpointFilter(original["endpointFilter"], d, config), + }) + } + return transformed +} +func flattenKMSEkmConnectionServiceResolversServiceDirectoryService(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenKMSEkmConnectionServiceResolversHostname(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenKMSEkmConnectionServiceResolversServerCertificates(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "raw_der": flattenKMSEkmConnectionServiceResolversServerCertificatesRawDer(original["rawDer"], d, config), + "parsed": flattenKMSEkmConnectionServiceResolversServerCertificatesParsed(original["parsed"], d, config), + "issuer": flattenKMSEkmConnectionServiceResolversServerCertificatesIssuer(original["issuer"], d, config), + "subject": flattenKMSEkmConnectionServiceResolversServerCertificatesSubject(original["subject"], d, config), + "not_before_time": flattenKMSEkmConnectionServiceResolversServerCertificatesNotBeforeTime(original["notBeforeTime"], d, config), + "not_after_time": flattenKMSEkmConnectionServiceResolversServerCertificatesNotAfterTime(original["notAfterTime"], d, config), + "sha256_fingerprint": flattenKMSEkmConnectionServiceResolversServerCertificatesSha256Fingerprint(original["sha256Fingerprint"], d, config), + "serial_number": flattenKMSEkmConnectionServiceResolversServerCertificatesSerialNumber(original["serialNumber"], d, config), + "subject_alternative_dns_names": flattenKMSEkmConnectionServiceResolversServerCertificatesSubjectAlternativeDnsNames(original["subjectAlternativeDnsNames"], d, config), + }) + } + return transformed +} +func flattenKMSEkmConnectionServiceResolversServerCertificatesRawDer(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenKMSEkmConnectionServiceResolversServerCertificatesParsed(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenKMSEkmConnectionServiceResolversServerCertificatesIssuer(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenKMSEkmConnectionServiceResolversServerCertificatesSubject(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenKMSEkmConnectionServiceResolversServerCertificatesNotBeforeTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenKMSEkmConnectionServiceResolversServerCertificatesNotAfterTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenKMSEkmConnectionServiceResolversServerCertificatesSha256Fingerprint(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenKMSEkmConnectionServiceResolversServerCertificatesSerialNumber(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenKMSEkmConnectionServiceResolversServerCertificatesSubjectAlternativeDnsNames(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenKMSEkmConnectionServiceResolversEndpointFilter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenKMSEkmConnectionKeyManagementMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenKMSEkmConnectionEtag(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenKMSEkmConnectionCryptoSpacePath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenKMSEkmConnectionCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandKMSEkmConnectionName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandKMSEkmConnectionServiceResolvers(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedServiceDirectoryService, err := expandKMSEkmConnectionServiceResolversServiceDirectoryService(original["service_directory_service"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedServiceDirectoryService); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["serviceDirectoryService"] = transformedServiceDirectoryService + } + + transformedHostname, err := expandKMSEkmConnectionServiceResolversHostname(original["hostname"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHostname); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["hostname"] = transformedHostname + } + + transformedServerCertificates, err := expandKMSEkmConnectionServiceResolversServerCertificates(original["server_certificates"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedServerCertificates); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["serverCertificates"] = transformedServerCertificates + } + + transformedEndpointFilter, err := expandKMSEkmConnectionServiceResolversEndpointFilter(original["endpoint_filter"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEndpointFilter); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["endpointFilter"] = transformedEndpointFilter + } + + req = append(req, transformed) + } + return req, nil +} + +func expandKMSEkmConnectionServiceResolversServiceDirectoryService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandKMSEkmConnectionServiceResolversHostname(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandKMSEkmConnectionServiceResolversServerCertificates(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedRawDer, err := expandKMSEkmConnectionServiceResolversServerCertificatesRawDer(original["raw_der"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRawDer); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["rawDer"] = transformedRawDer + } + + transformedParsed, err := expandKMSEkmConnectionServiceResolversServerCertificatesParsed(original["parsed"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedParsed); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["parsed"] = transformedParsed + } + + transformedIssuer, err := expandKMSEkmConnectionServiceResolversServerCertificatesIssuer(original["issuer"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIssuer); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["issuer"] = transformedIssuer + } + + transformedSubject, err := expandKMSEkmConnectionServiceResolversServerCertificatesSubject(original["subject"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSubject); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["subject"] = transformedSubject + } + + transformedNotBeforeTime, err := expandKMSEkmConnectionServiceResolversServerCertificatesNotBeforeTime(original["not_before_time"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNotBeforeTime); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["notBeforeTime"] = transformedNotBeforeTime + } + + transformedNotAfterTime, err := expandKMSEkmConnectionServiceResolversServerCertificatesNotAfterTime(original["not_after_time"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNotAfterTime); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["notAfterTime"] = transformedNotAfterTime + } + + transformedSha256Fingerprint, err := expandKMSEkmConnectionServiceResolversServerCertificatesSha256Fingerprint(original["sha256_fingerprint"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSha256Fingerprint); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sha256Fingerprint"] = transformedSha256Fingerprint + } + + transformedSerialNumber, err := expandKMSEkmConnectionServiceResolversServerCertificatesSerialNumber(original["serial_number"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSerialNumber); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["serialNumber"] = transformedSerialNumber + } + + transformedSubjectAlternativeDnsNames, err := expandKMSEkmConnectionServiceResolversServerCertificatesSubjectAlternativeDnsNames(original["subject_alternative_dns_names"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSubjectAlternativeDnsNames); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["subjectAlternativeDnsNames"] = transformedSubjectAlternativeDnsNames + } + + req = append(req, transformed) + } + return req, nil +} + +func expandKMSEkmConnectionServiceResolversServerCertificatesRawDer(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandKMSEkmConnectionServiceResolversServerCertificatesParsed(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandKMSEkmConnectionServiceResolversServerCertificatesIssuer(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandKMSEkmConnectionServiceResolversServerCertificatesSubject(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandKMSEkmConnectionServiceResolversServerCertificatesNotBeforeTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandKMSEkmConnectionServiceResolversServerCertificatesNotAfterTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandKMSEkmConnectionServiceResolversServerCertificatesSha256Fingerprint(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandKMSEkmConnectionServiceResolversServerCertificatesSerialNumber(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandKMSEkmConnectionServiceResolversServerCertificatesSubjectAlternativeDnsNames(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandKMSEkmConnectionServiceResolversEndpointFilter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandKMSEkmConnectionKeyManagementMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandKMSEkmConnectionEtag(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandKMSEkmConnectionCryptoSpacePath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/google/services/kms/resource_kms_ekm_connection_test.go b/google/services/kms/resource_kms_ekm_connection_test.go new file mode 100644 index 00000000000..844bdf9998e --- /dev/null +++ b/google/services/kms/resource_kms_ekm_connection_test.go @@ -0,0 +1,141 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package kms_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccKMSEkmConnection_kmsEkmConnectionBasicExample_update(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccKMSEkmConnection_kmsEkmConnectionBasicExample_full(context), + }, + { + ResourceName: "google_kms_ekm_connection.example-ekmconnection", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "name"}, + }, + { + Config: testAccKMSEkmConnection_kmsEkmConnectionBasicExample_update(context), + }, + { + ResourceName: "google_kms_ekm_connection.example-ekmconnection", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "name"}, + }, + }, + }) +} + +func testAccKMSEkmConnection_kmsEkmConnectionBasicExample_full(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_secret_manager_secret_version" "raw_der" { + secret = "playground-cert" + project = "315636579862" +} +data "google_secret_manager_secret_version" "hostname" { + secret = "external-uri" + project = "315636579862" +} +data "google_secret_manager_secret_version" "servicedirectoryservice" { + secret = "external-servicedirectoryservice" + project = "315636579862" +} +data "google_project" "vpc-project" { + project_id = "cloud-ekm-refekm-playground" +} +data "google_project" "project" { +} +resource "google_project_iam_member" "add_sdviewer" { + project = data.google_project.vpc-project.number + role = "roles/servicedirectory.viewer" + member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-ekms.iam.gserviceaccount.com" +} +resource "google_project_iam_member" "add_pscAuthorizedService" { + project = data.google_project.vpc-project.number + role = "roles/servicedirectory.pscAuthorizedService" + member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-ekms.iam.gserviceaccount.com" +} +resource "google_kms_ekm_connection" "example-ekmconnection" { + name = "tf_test_ekmconnection_example%{random_suffix}" + location = "us-central1" + key_management_mode = "MANUAL" + service_resolvers { + service_directory_service = data.google_secret_manager_secret_version.servicedirectoryservice.secret_data + hostname = data.google_secret_manager_secret_version.hostname.secret_data + server_certificates { + raw_der = data.google_secret_manager_secret_version.raw_der.secret_data + } + } + depends_on = [ + google_project_iam_member.add_pscAuthorizedService, + google_project_iam_member.add_sdviewer + ] +} +`, context) +} + +func testAccKMSEkmConnection_kmsEkmConnectionBasicExample_update(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_project" "vpc-project" { + project_id = "cloud-ekm-refekm-playground" +} +data "google_project" "project" { +} +data "google_secret_manager_secret_version" "raw_der" { + secret = "playground-cert" + project = "315636579862" +} +data "google_secret_manager_secret_version" "hostname" { + secret = "external-uri" + project = "315636579862" +} +data "google_secret_manager_secret_version" "servicedirectoryservice" { + secret = "external-servicedirectoryservice" + project = "315636579862" +} +resource "google_project_iam_member" "add_sdviewer_updateekmconnection" { + project = data.google_project.vpc-project.number + role = "roles/servicedirectory.viewer" + member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-ekms.iam.gserviceaccount.com" +} +resource "google_project_iam_member" "add_pscAuthorizedService_updateekmconnection" { + project = data.google_project.vpc-project.number + role = "roles/servicedirectory.pscAuthorizedService" + member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-ekms.iam.gserviceaccount.com" +} +resource "google_kms_ekm_connection" "example-ekmconnection" { + name = "tf_test_ekmconnection_example%{random_suffix}" + location = "us-central1" + key_management_mode = "CLOUD_KMS" + crypto_space_path = "v0/longlived/crypto-space-placeholder" + service_resolvers { + service_directory_service = data.google_secret_manager_secret_version.servicedirectoryservice.secret_data + hostname = data.google_secret_manager_secret_version.hostname.secret_data + server_certificates { + raw_der = data.google_secret_manager_secret_version.raw_der.secret_data + } + } + depends_on = [ + google_project_iam_member.add_pscAuthorizedService_updateekmconnection, + google_project_iam_member.add_sdviewer_updateekmconnection + ] +} +`, context) +} diff --git a/google/services/notebooks/resource_notebooks_runtime_test.go b/google/services/notebooks/resource_notebooks_runtime_test.go index 4f1d6a3450b..619c1ef7b83 100644 --- a/google/services/notebooks/resource_notebooks_runtime_test.go +++ b/google/services/notebooks/resource_notebooks_runtime_test.go @@ -23,25 +23,28 @@ func TestAccNotebooksRuntime_update(t *testing.T) { Config: testAccNotebooksRuntime_basic(context), }, { - ResourceName: "google_notebooks_runtime.runtime", - ImportState: true, - ImportStateVerify: true, + ResourceName: "google_notebooks_runtime.runtime", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, }, { Config: testAccNotebooksRuntime_update(context), }, { - ResourceName: "google_notebooks_runtime.runtime", - ImportState: true, - ImportStateVerify: true, + ResourceName: "google_notebooks_runtime.runtime", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, }, { Config: testAccNotebooksRuntime_basic(context), }, { - ResourceName: "google_notebooks_runtime.runtime", - ImportState: true, - ImportStateVerify: true, + ResourceName: "google_notebooks_runtime.runtime", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, }, }, }) @@ -101,6 +104,9 @@ resource "google_notebooks_runtime" "runtime" { reserved_ip_range = "192.168.255.0/24" } } + labels = { + k = "val" + } } `, context) } diff --git a/google/services/resourcemanager/resource_google_project.go b/google/services/resourcemanager/resource_google_project.go index dd4ef711d57..f700ecd72c8 100644 --- a/google/services/resourcemanager/resource_google_project.go +++ b/google/services/resourcemanager/resource_google_project.go @@ -229,7 +229,13 @@ func resourceGoogleProjectCreate(d *schema.ResourceData, meta interface{}) error return errwrap.Wrapf("Error enabling the Compute Engine API required to delete the default network: {{err}} ", err) } - if err = forceDeleteComputeNetwork(d, config, project.ProjectId, "default"); err != nil { + err = forceDeleteComputeNetwork(d, config, project.ProjectId, "default") + // Retry if API is not yet enabled. + if err != nil && transport_tpg.IsGoogleApiErrorWithCode(err, 403) { + time.Sleep(10 * time.Second) + err = forceDeleteComputeNetwork(d, config, project.ProjectId, "default") + } + if err != nil { if transport_tpg.IsGoogleApiErrorWithCode(err, 404) { log.Printf("[DEBUG] Default network not found for project %q, no need to delete it", project.ProjectId) } else { diff --git a/google/services/securityposture/resource_securityposture_posture_deployment_generated_test.go b/google/services/securityposture/resource_securityposture_posture_deployment_generated_test.go index 82282a9b597..e1e7b0471a2 100644 --- a/google/services/securityposture/resource_securityposture_posture_deployment_generated_test.go +++ b/google/services/securityposture/resource_securityposture_posture_deployment_generated_test.go @@ -60,7 +60,7 @@ func TestAccSecurityposturePostureDeployment_securityposturePostureDeploymentBas func testAccSecurityposturePostureDeployment_securityposturePostureDeploymentBasicExample(context map[string]interface{}) string { return acctest.Nprintf(` -resource "google_securityposture_posture" "posture1" { +resource "google_securityposture_posture" "posture_1" { posture_id = "posture_1" parent = "organizations/%{org_id}" location = "global" @@ -89,8 +89,8 @@ resource "google_securityposture_posture_deployment" "postureDeployment" { location = "global" description = "a new posture deployment" target_resource = "projects/%{project_number}" - posture_id = google_securityposture_posture.posture1.name - posture_revision_id = google_securityposture_posture.posture1.revision_id + posture_id = google_securityposture_posture.posture_1.name + posture_revision_id = google_securityposture_posture.posture_1.revision_id } `, context) } diff --git a/google/services/securityposture/resource_securityposture_posture_generated_test.go b/google/services/securityposture/resource_securityposture_posture_generated_test.go index 3da5a694445..53d9523727b 100644 --- a/google/services/securityposture/resource_securityposture_posture_generated_test.go +++ b/google/services/securityposture/resource_securityposture_posture_generated_test.go @@ -60,7 +60,7 @@ func TestAccSecurityposturePosture_securityposturePostureBasicExample(t *testing func testAccSecurityposturePosture_securityposturePostureBasicExample(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_securityposture_posture" "posture1"{ - posture_id = "posture_1" + posture_id = "posture_example" parent = "organizations/%{org_id}" location = "global" state = "ACTIVE" diff --git a/google/services/storage/resource_storage_bucket.go b/google/services/storage/resource_storage_bucket.go index 4a7c7bce2b3..16f0611343d 100644 --- a/google/services/storage/resource_storage_bucket.go +++ b/google/services/storage/resource_storage_bucket.go @@ -1238,10 +1238,10 @@ func flattenBucketLifecycle(d *schema.ResourceData, lifecycle *storage.BucketLif rules := make([]map[string]interface{}, 0, len(lifecycle.Rule)) - for _, rule := range lifecycle.Rule { + for index, rule := range lifecycle.Rule { rules = append(rules, map[string]interface{}{ "action": schema.NewSet(resourceGCSBucketLifecycleRuleActionHash, []interface{}{flattenBucketLifecycleRuleAction(rule.Action)}), - "condition": schema.NewSet(resourceGCSBucketLifecycleRuleConditionHash, []interface{}{flattenBucketLifecycleRuleCondition(d, rule.Condition)}), + "condition": schema.NewSet(resourceGCSBucketLifecycleRuleConditionHash, []interface{}{flattenBucketLifecycleRuleCondition(index, d, rule.Condition)}), }) } @@ -1255,7 +1255,7 @@ func flattenBucketLifecycleRuleAction(action *storage.BucketLifecycleRuleAction) } } -func flattenBucketLifecycleRuleCondition(d *schema.ResourceData, condition *storage.BucketLifecycleRuleCondition) map[string]interface{} { +func flattenBucketLifecycleRuleCondition(index int, d *schema.ResourceData, condition *storage.BucketLifecycleRuleCondition) map[string]interface{} { ruleCondition := map[string]interface{}{ "created_before": condition.CreatedBefore, "matches_storage_class": tpgresource.ConvertStringArrToInterface(condition.MatchesStorageClass), @@ -1280,7 +1280,7 @@ func flattenBucketLifecycleRuleCondition(d *schema.ResourceData, condition *stor } } // setting no_age value from state config since it is terraform only variable and not getting value from backend. - if v, ok := d.GetOk("lifecycle_rule.0.condition"); ok { + if v, ok := d.GetOk(fmt.Sprintf("lifecycle_rule.%d.condition", index)); ok { state_condition := v.(*schema.Set).List()[0].(map[string]interface{}) ruleCondition["no_age"] = state_condition["no_age"].(bool) } diff --git a/google/services/storage/resource_storage_bucket_test.go b/google/services/storage/resource_storage_bucket_test.go index 0619f624962..c04894751d1 100644 --- a/google/services/storage/resource_storage_bucket_test.go +++ b/google/services/storage/resource_storage_bucket_test.go @@ -509,7 +509,7 @@ func TestAccStorageBucket_lifecycleRulesNoAge(t *testing.T) { ResourceName: "google_storage_bucket.bucket", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"force_destroy", "lifecycle_rule.0.condition.0.no_age"}, + ImportStateVerifyIgnore: []string{"force_destroy", "lifecycle_rule.1.condition.0.no_age"}, }, { Config: testAccStorageBucket_customAttributes_withLifecycleNoAgeAndAge(bucketName), @@ -523,7 +523,7 @@ func TestAccStorageBucket_lifecycleRulesNoAge(t *testing.T) { ResourceName: "google_storage_bucket.bucket", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"force_destroy", "lifecycle_rule.0.condition.0.no_age"}, + ImportStateVerifyIgnore: []string{"force_destroy", "lifecycle_rule.1.condition.0.no_age"}, }, { Config: testAccStorageBucket_customAttributes_withLifecycle1(bucketName), @@ -1478,8 +1478,8 @@ func testAccCheckStorageBucketLifecycleConditionState(expected *bool, b *storage func testAccCheckStorageBucketLifecycleConditionNoAge(expected *int64, b *storage.Bucket) resource.TestCheckFunc { return func(s *terraform.State) error { - actual := b.Lifecycle.Rule[0].Condition.Age - if expected == nil && b.Lifecycle.Rule[0].Condition.Age == nil { + actual := b.Lifecycle.Rule[1].Condition.Age + if expected == nil && b.Lifecycle.Rule[1].Condition.Age == nil { return nil } if expected == nil { @@ -1689,6 +1689,15 @@ resource "google_storage_bucket" "bucket" { name = "%s" location = "EU" force_destroy = "true" + lifecycle_rule { + action { + type = "Delete" + } + condition { + age = 10 + no_age = false + } + } lifecycle_rule { action { type = "Delete" @@ -1708,6 +1717,15 @@ resource "google_storage_bucket" "bucket" { name = "%s" location = "EU" force_destroy = "true" + lifecycle_rule { + action { + type = "Delete" + } + condition { + age = 10 + no_age = false + } + } lifecycle_rule { action { type = "Delete" diff --git a/google/sweeper/gcp_sweeper_test.go b/google/sweeper/gcp_sweeper_test.go index 0b40be0f0f2..a711d551968 100644 --- a/google/sweeper/gcp_sweeper_test.go +++ b/google/sweeper/gcp_sweeper_test.go @@ -13,6 +13,7 @@ import ( _ "github.com/hashicorp/terraform-provider-google/google/services/alloydb" _ "github.com/hashicorp/terraform-provider-google/google/services/apigee" _ "github.com/hashicorp/terraform-provider-google/google/services/appengine" + _ "github.com/hashicorp/terraform-provider-google/google/services/apphub" _ "github.com/hashicorp/terraform-provider-google/google/services/artifactregistry" _ "github.com/hashicorp/terraform-provider-google/google/services/beyondcorp" _ "github.com/hashicorp/terraform-provider-google/google/services/biglake" diff --git a/google/transport/config.go b/google/transport/config.go index 53079ebaa22..6e8732cbdeb 100644 --- a/google/transport/config.go +++ b/google/transport/config.go @@ -193,6 +193,7 @@ type Config struct { AlloydbBasePath string ApigeeBasePath string AppEngineBasePath string + ApphubBasePath string ArtifactRegistryBasePath string BeyondcorpBasePath string BiglakeBasePath string @@ -326,6 +327,7 @@ const ActiveDirectoryBasePathKey = "ActiveDirectory" const AlloydbBasePathKey = "Alloydb" const ApigeeBasePathKey = "Apigee" const AppEngineBasePathKey = "AppEngine" +const ApphubBasePathKey = "Apphub" const ArtifactRegistryBasePathKey = "ArtifactRegistry" const BeyondcorpBasePathKey = "Beyondcorp" const BiglakeBasePathKey = "Biglake" @@ -453,6 +455,7 @@ var DefaultBasePaths = map[string]string{ AlloydbBasePathKey: "https://alloydb.googleapis.com/v1/", ApigeeBasePathKey: "https://apigee.googleapis.com/v1/", AppEngineBasePathKey: "https://appengine.googleapis.com/v1/", + ApphubBasePathKey: "https://apphub.googleapis.com/v1/", ArtifactRegistryBasePathKey: "https://artifactregistry.googleapis.com/v1/", BeyondcorpBasePathKey: "https://beyondcorp.googleapis.com/v1/", BiglakeBasePathKey: "https://biglake.googleapis.com/v1/", @@ -675,6 +678,11 @@ func SetEndpointDefaults(d *schema.ResourceData) error { "GOOGLE_APP_ENGINE_CUSTOM_ENDPOINT", }, DefaultBasePaths[AppEngineBasePathKey])) } + if d.Get("apphub_custom_endpoint") == "" { + d.Set("apphub_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_APPHUB_CUSTOM_ENDPOINT", + }, DefaultBasePaths[ApphubBasePathKey])) + } if d.Get("artifact_registry_custom_endpoint") == "" { d.Set("artifact_registry_custom_endpoint", MultiEnvDefault([]string{ "GOOGLE_ARTIFACT_REGISTRY_CUSTOM_ENDPOINT", @@ -2071,6 +2079,7 @@ func ConfigureBasePaths(c *Config) { c.AlloydbBasePath = DefaultBasePaths[AlloydbBasePathKey] c.ApigeeBasePath = DefaultBasePaths[ApigeeBasePathKey] c.AppEngineBasePath = DefaultBasePaths[AppEngineBasePathKey] + c.ApphubBasePath = DefaultBasePaths[ApphubBasePathKey] c.ArtifactRegistryBasePath = DefaultBasePaths[ArtifactRegistryBasePathKey] c.BeyondcorpBasePath = DefaultBasePaths[BeyondcorpBasePathKey] c.BiglakeBasePath = DefaultBasePaths[BiglakeBasePathKey] diff --git a/website/docs/d/clouddeploy_custom_target_type_iam_policy.html.markdown b/website/docs/d/clouddeploy_custom_target_type_iam_policy.html.markdown new file mode 100644 index 00000000000..c87e6ae9f2a --- /dev/null +++ b/website/docs/d/clouddeploy_custom_target_type_iam_policy.html.markdown @@ -0,0 +1,53 @@ +--- +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file in +# .github/CONTRIBUTING.md. +# +# ---------------------------------------------------------------------------- +subcategory: "Cloud Deploy" +description: |- + A datasource to retrieve the IAM policy state for Cloud Deploy CustomTargetType +--- + + +# `google_clouddeploy_custom_target_type_iam_policy` +Retrieves the current IAM policy data for customtargettype + + + +## example + +```hcl +data "google_clouddeploy_custom_target_type_iam_policy" "policy" { + project = google_clouddeploy_custom_target_type.custom-target-type.project + location = google_clouddeploy_custom_target_type.custom-target-type.location + name = google_clouddeploy_custom_target_type.custom-target-type.name +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) Used to find the parent resource to bind the IAM policy to +* `location` - (Required) The location of the source. Used to find the parent resource to bind the IAM policy to + +* `project` - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the project will be parsed from the identifier of the parent resource. If no project is provided in the parent identifier and no project is specified, the provider project is used. + +## Attributes Reference + +The attributes are exported: + +* `etag` - (Computed) The etag of the IAM policy. + +* `policy_data` - (Required only by `google_clouddeploy_custom_target_type_iam_policy`) The policy data generated by + a `google_iam_policy` data source. diff --git a/website/docs/d/clouddeploy_target_iam_policy.html.markdown b/website/docs/d/clouddeploy_target_iam_policy.html.markdown new file mode 100644 index 00000000000..6ec5230db49 --- /dev/null +++ b/website/docs/d/clouddeploy_target_iam_policy.html.markdown @@ -0,0 +1,52 @@ +--- +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file in +# .github/CONTRIBUTING.md. +# +# ---------------------------------------------------------------------------- +subcategory: "Cloud Deploy" +description: |- + A datasource to retrieve the IAM policy state for Cloud Deploy Target +--- + + +# `google_clouddeploy_target_iam_policy` +Retrieves the current IAM policy data for target + + + +## example + +```hcl +data "google_clouddeploy_target_iam_policy" "policy" { + project = google_clouddeploy_target.default.project + location = google_clouddeploy_target.default.location + name = google_clouddeploy_target.default.name +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) Used to find the parent resource to bind the IAM policy to + +* `project` - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the project will be parsed from the identifier of the parent resource. If no project is provided in the parent identifier and no project is specified, the provider project is used. + +## Attributes Reference + +The attributes are exported: + +* `etag` - (Computed) The etag of the IAM policy. + +* `policy_data` - (Required only by `google_clouddeploy_target_iam_policy`) The policy data generated by + a `google_iam_policy` data source. diff --git a/website/docs/d/monitoring_app_engine_service.html.markdown b/website/docs/d/monitoring_app_engine_service.html.markdown index 03525dcd610..188d6e46e37 100644 --- a/website/docs/d/monitoring_app_engine_service.html.markdown +++ b/website/docs/d/monitoring_app_engine_service.html.markdown @@ -34,7 +34,7 @@ data "google_monitoring_app_engine_service" "srv" { resource "google_app_engine_standard_app_version" "myapp" { version_id = "v1" service = "myapp" - runtime = "nodejs10" + runtime = "nodejs20" entrypoint { shell = "node ./app.js" diff --git a/website/docs/guides/getting_started.html.markdown b/website/docs/guides/getting_started.html.markdown index a329a71258e..8ecb74a8521 100644 --- a/website/docs/guides/getting_started.html.markdown +++ b/website/docs/guides/getting_started.html.markdown @@ -1,10 +1,10 @@ --- -page_title: "Getting Started with the Google provider" +page_title: "Getting Started with the Google Cloud provider" description: |- - Getting started with the Google Cloud Platform provider + Getting started with the Google Cloud provider --- -# Getting Started with the Google Provider +# Getting Started with the Google Cloud provider ## Before you begin diff --git a/website/docs/guides/provider_reference.html.markdown b/website/docs/guides/provider_reference.html.markdown index 56642c7a867..8b03ef43503 100644 --- a/website/docs/guides/provider_reference.html.markdown +++ b/website/docs/guides/provider_reference.html.markdown @@ -1,7 +1,7 @@ --- -page_title: "Google Provider Configuration Reference" +page_title: "Google Cloud Provider Configuration Reference" description: |- - Configuration reference for the Google provider for Terraform. + Configuration reference for the Terraform provider for Google Cloud. --- # Google Provider Configuration Reference diff --git a/website/docs/guides/version_2_upgrade.html.markdown b/website/docs/guides/version_2_upgrade.html.markdown index e3eb664de0e..f7a3fafb7a6 100644 --- a/website/docs/guides/version_2_upgrade.html.markdown +++ b/website/docs/guides/version_2_upgrade.html.markdown @@ -1,12 +1,12 @@ --- -page_title: "Terraform Google Provider 2.0.0 Upgrade Guide" +page_title: "Terraform provider for Google Cloud 2.0.0 Upgrade Guide" description: |- - Terraform Google Provider 2.0.0 Upgrade Guide + Terraform provider for Google Cloud 2.0.0 Upgrade Guide --- -# Terraform Google Provider 2.0.0 Upgrade Guide +# Terraform provider for Google Cloud 2.0.0 Upgrade Guide -Version `2.0.0` of the Google provider for Terraform is a major release and +Version `2.0.0` of the Terraform provider for Google Cloud is a major release and includes some changes that you will need to consider when upgrading. This guide is intended to help with that process and focuses only on the changes necessary to upgrade from version `1.20.0` to `2.0.0`. diff --git a/website/docs/guides/version_3_upgrade.html.markdown b/website/docs/guides/version_3_upgrade.html.markdown index 06e5e64b36b..305989cfae7 100644 --- a/website/docs/guides/version_3_upgrade.html.markdown +++ b/website/docs/guides/version_3_upgrade.html.markdown @@ -1,12 +1,12 @@ --- -page_title: "Terraform Google Provider 3.0.0 Upgrade Guide" +page_title: "Terraform provider for Google Cloud 3.0.0 Upgrade Guide" description: |- - Terraform Google Provider 3.0.0 Upgrade Guide + Terraform provider for Google Cloud 3.0.0 Upgrade Guide --- -# Terraform Google Provider 3.0.0 Upgrade Guide +# Terraform provider for Google Cloud 3.0.0 Upgrade Guide -The `3.0.0` release of the Google provider for Terraform is a major version and +The `3.0.0` release of the Terraform provider for Google Cloud is a major version and includes some changes that you will need to consider when upgrading. This guide is intended to help with that process and focuses only on the changes necessary to upgrade from the final `2.X` series release to `3.0.0`. diff --git a/website/docs/guides/version_4_upgrade.html.markdown b/website/docs/guides/version_4_upgrade.html.markdown index bad7cca8783..b34c133d106 100644 --- a/website/docs/guides/version_4_upgrade.html.markdown +++ b/website/docs/guides/version_4_upgrade.html.markdown @@ -1,12 +1,12 @@ --- -page_title: "Terraform Google Provider 4.0.0 Upgrade Guide" +page_title: "Terraform provider for Google Cloud 4.0.0 Upgrade Guide" description: |- - Terraform Google Provider 4.0.0 Upgrade Guide + Terraform provider for Google Cloud 4.0.0 Upgrade Guide --- -# Terraform Google Provider 4.0.0 Upgrade Guide +# Terraform provider for Google Cloud 4.0.0 Upgrade Guide -The `4.0.0` release of the Google provider for Terraform is a major version and +The `4.0.0` release of the Terraform provider for Google Cloud is a major version and includes some changes that you will need to consider when upgrading. This guide is intended to help with that process and focuses only on the changes necessary to upgrade from the final `3.X` series release to `4.0.0`. diff --git a/website/docs/guides/version_5_upgrade.html.markdown b/website/docs/guides/version_5_upgrade.html.markdown index 1bd49e4d52c..078ef0f0323 100644 --- a/website/docs/guides/version_5_upgrade.html.markdown +++ b/website/docs/guides/version_5_upgrade.html.markdown @@ -1,12 +1,12 @@ --- -page_title: "Terraform Google Provider 5.0.0 Upgrade Guide" +page_title: "Terraform provider for Google Cloud 5.0.0 Upgrade Guide" description: |- - Terraform Google Provider 5.0.0 Upgrade Guide + Terraform provider for Google Cloud 5.0.0 Upgrade Guide --- -# Terraform Google Provider 5.0.0 Upgrade Guide +# Terraform provider for Google Cloud 5.0.0 Upgrade Guide -The `5.0.0` release of the Google provider for Terraform is a major version and +The `5.0.0` release of the Terraform provider for Google Cloud is a major version and includes some changes that you will need to consider when upgrading. This guide is intended to help with that process and focuses only on the changes necessary to upgrade from the final `4.X` series release to `5.0.0`. @@ -113,8 +113,8 @@ included in requests to the API. Replacing those labels' values with `_` or `true` are recommended. Not all of Google Cloud resources support labels and annotations. Please check -the Terraform Google provider resource documentation to figure out if a given -resource supports `labels` or `annotations` fields. +the resource documentation to figure out if a given resource supports `labels` +or `annotations` fields. #### Provider default labels @@ -188,7 +188,7 @@ Provider-level default annotations are not supported at this time. #### Resource labels -Previously, `labels` and `annotations` fields in the Terraform Google provider +Previously, `labels` and `annotations` fields in the Google Cloud provider were authoritative and Terraform thought it was the only owner of the fields. This model worked well initially, but with the introduction of system labels and other client-managed labels, Terraform would conflict with their labels and show diff --git a/website/docs/index.html.markdown b/website/docs/index.html.markdown index db9c0e41fba..fee57ac9c42 100644 --- a/website/docs/index.html.markdown +++ b/website/docs/index.html.markdown @@ -1,12 +1,12 @@ --- -page_title: "Provider: Google Cloud Platform" +page_title: "Provider: Google Cloud" description: |- - The Google provider is used to configure your Google Cloud Platform infrastructure + The Terraform provider for Google Cloud is used to configure your Google Cloud infrastructure --- -# Google Cloud Platform Provider +# Terraform provider for Google Cloud -The Google provider is used to configure your [Google Cloud Platform](https://cloud.google.com/) infrastructure. +The Google Cloud provider is used to configure your [Google Cloud](https://cloud.google.com/) infrastructure. To learn the basics of Terraform using this provider, follow the hands-on [get started tutorials](https://developer.hashicorp.com/terraform/tutorials/gcp-get-started/infrastructure-as-code). @@ -14,7 +14,7 @@ For more involved examples, try [provisioning a GKE cluster](https://learn.hashi and deploying [Consul-backed Vault into it using Terraform Cloud](https://learn.hashicorp.com/tutorials/terraform/kubernetes-consul-vault-pipeline). Already experienced with Terraform? Check out the [Getting Started](/docs/providers/google/guides/getting_started.html) -page for a short introduction to using Terraform with Google Cloud Platform. +page for a short introduction to using Terraform with Google Cloud. ## Example Usage @@ -31,9 +31,9 @@ See the [provider reference](/docs/providers/google/guides/provider_reference.ht page for details on authentication and configuring the provider. Take advantage of [Modules](https://www.terraform.io/docs/modules/index.html) -to simplify your config by browsing the [Module Registry for GCP modules](https://registry.terraform.io/browse?provider=google). +to simplify your config by browsing the [Module Registry for Google Cloud modules](https://registry.terraform.io/browse?provider=google). -The Google provider is jointly maintained by: +The Google Cloud provider is jointly maintained by: * The [Terraform Team](https://cloud.google.com/docs/terraform) at Google * The Terraform team at [HashiCorp](https://www.hashicorp.com/) @@ -41,7 +41,7 @@ The Google provider is jointly maintained by: If you have configuration questions, or general questions about using the provider, try checking out: * [The Google category on discuss.hashicorp.com](https://discuss.hashicorp.com/c/terraform-providers/tf-google/32) -* The [Google Cloud Platform Community Slack](https://googlecloud-community.slack.com/) `#terraform` channel. If you are not registered with that Slack Workspace yet, the up-to-date **public sign-up link** can be found in the "Stay Connected" section of the [Google Developer Center](https://cloud.google.com/developers#stay-connected). +* The [Google Cloud Community Slack](https://googlecloud-community.slack.com/) `#terraform` channel. If you are not registered with that Slack Workspace yet, the up-to-date **public sign-up link** can be found in the "Stay Connected" section of the [Google Developer Center](https://cloud.google.com/developers#stay-connected). * [Terraform's community resources](https://www.terraform.io/docs/extend/community/index.html) * [HashiCorp support](https://support.hashicorp.com) for Terraform Enterprise customers @@ -53,22 +53,22 @@ and the [`google-beta` provider Releases](https://github.com/hashicorp/terraform for release notes and additional information. Per [Terraform Provider Versioning](https://www.hashicorp.com/blog/hashicorp-terraform-provider-versioning), -the Google provider follows [semantic versioning](https://semver.org/). +the Google Cloud provider follows [semantic versioning](https://semver.org/). In practice, patch / bugfix-only releases of the provider are infrequent. Most provider releases are either minor or major releases. ### Minor Releases -The Google provider currently aims to publish a minor release every week, +The Google Cloud provider currently aims to publish a minor release every week, although the timing of individual releases may differ if required by the provider team. ### Major Releases -The Google provider publishes major releases roughly yearly. An upgrade guide -will be published to help ease you through the transition between the prior -releases series and the new major release. +The Google Cloud provider publishes major releases roughly yearly. An upgrade +guide will be published to help ease you through the transition between the +prior releases series and the new major release. During major releases, all current deprecation warnings will be resolved, removing the field in question unless the deprecation warning message specifies @@ -80,7 +80,7 @@ lifecycle to give users plenty of time to safely update their configs. ## Features and Bug Requests -The Google provider's bugs and feature requests can be found in the [GitHub repo issues](https://github.com/hashicorp/terraform-provider-google/issues). +The Google Cloud provider's bugs and feature requests can be found in the [GitHub repo issues](https://github.com/hashicorp/terraform-provider-google/issues). Please avoid "me too" or "+1" comments. Instead, use a thumbs up [reaction](https://blog.github.com/2016-03-10-add-reactions-to-pull-requests-issues-and-comments/) on enhancement requests. Provider maintainers will often prioritize work based on the number of thumbs on an issue. @@ -110,7 +110,7 @@ page for details on configuring the provider. ## Contributing -If you'd like to help extend the Google provider, we gladly accept community +If you'd like to help extend the Google Cloud provider, we gladly accept community contributions! Development on the providers is done through the [Magic Modules](https://github.com/GoogleCloudPlatform/magic-modules) repository. Our full contribution guide is available on the diff --git a/website/docs/r/app_engine_application_url_dispatch_rules.html.markdown b/website/docs/r/app_engine_application_url_dispatch_rules.html.markdown index 82675247cfe..31d42e51bdd 100644 --- a/website/docs/r/app_engine_application_url_dispatch_rules.html.markdown +++ b/website/docs/r/app_engine_application_url_dispatch_rules.html.markdown @@ -52,7 +52,7 @@ resource "google_app_engine_application_url_dispatch_rules" "web_service" { resource "google_app_engine_standard_app_version" "admin_v3" { version_id = "v3" service = "admin" - runtime = "nodejs10" + runtime = "nodejs20" entrypoint { shell = "node ./app.js" diff --git a/website/docs/r/app_engine_service_network_settings.html.markdown b/website/docs/r/app_engine_service_network_settings.html.markdown index 3471c7fb5e2..1370576c9ae 100644 --- a/website/docs/r/app_engine_service_network_settings.html.markdown +++ b/website/docs/r/app_engine_service_network_settings.html.markdown @@ -51,7 +51,7 @@ resource "google_app_engine_standard_app_version" "internalapp" { service = "internalapp" delete_service_on_destroy = true - runtime = "nodejs10" + runtime = "nodejs20" entrypoint { shell = "node ./app.js" } diff --git a/website/docs/r/app_engine_service_split_traffic.html.markdown b/website/docs/r/app_engine_service_split_traffic.html.markdown index dbaeef2e202..ef6fbbc503a 100644 --- a/website/docs/r/app_engine_service_split_traffic.html.markdown +++ b/website/docs/r/app_engine_service_split_traffic.html.markdown @@ -46,7 +46,7 @@ resource "google_app_engine_standard_app_version" "liveapp_v1" { service = "liveapp" delete_service_on_destroy = true - runtime = "nodejs10" + runtime = "nodejs20" entrypoint { shell = "node ./app.js" } @@ -65,7 +65,7 @@ resource "google_app_engine_standard_app_version" "liveapp_v2" { service = "liveapp" noop_on_destroy = true - runtime = "nodejs10" + runtime = "nodejs20" entrypoint { shell = "node ./app.js" } diff --git a/website/docs/r/app_engine_standard_app_version.html.markdown b/website/docs/r/app_engine_standard_app_version.html.markdown index d131d713b9c..cbd6c630071 100644 --- a/website/docs/r/app_engine_standard_app_version.html.markdown +++ b/website/docs/r/app_engine_standard_app_version.html.markdown @@ -55,7 +55,7 @@ resource "google_project_iam_member" "storage_viewer" { resource "google_app_engine_standard_app_version" "myapp_v1" { version_id = "v1" service = "myapp" - runtime = "nodejs10" + runtime = "nodejs20" entrypoint { shell = "node ./app.js" @@ -92,7 +92,7 @@ resource "google_app_engine_standard_app_version" "myapp_v1" { resource "google_app_engine_standard_app_version" "myapp_v2" { version_id = "v2" service = "myapp" - runtime = "nodejs10" + runtime = "nodejs20" app_engine_apis = true entrypoint { diff --git a/website/docs/r/apphub_application.html.markdown b/website/docs/r/apphub_application.html.markdown new file mode 100644 index 00000000000..48ddde4291b --- /dev/null +++ b/website/docs/r/apphub_application.html.markdown @@ -0,0 +1,269 @@ +--- +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file in +# .github/CONTRIBUTING.md. +# +# ---------------------------------------------------------------------------- +subcategory: "App Hub" +description: |- + Application is a functional grouping of Services and Workloads that helps achieve a desired end-to-end business functionality. +--- + +# google\_apphub\_application + +Application is a functional grouping of Services and Workloads that helps achieve a desired end-to-end business functionality. Services and Workloads are owned by the Application. + + + + +## Example Usage - Application Basic + + +```hcl +resource "google_apphub_application" "example" { + location = "us-east1" + application_id = "example-application" + scope { + type = "REGIONAL" + } +} +``` + +## Example Usage - Application Full + + +```hcl +resource "google_apphub_application" "example2" { + location = "us-east1" + application_id = "example-application" + display_name = "Application Full" + scope { + type = "REGIONAL" + } + description = "Application for testing" + attributes { + environment { + type = "STAGING" + } + criticality { + type = "MISSION_CRITICAL" + } + business_owners { + display_name = "Alice" + email = "alice@google.com" + } + developer_owners { + display_name = "Bob" + email = "bob@google.com" + } + operator_owners { + display_name = "Charlie" + email = "charlie@google.com" + } + } +} +``` + +## Argument Reference + +The following arguments are supported: + + +* `scope` - + (Required) + Scope of an application. + Structure is [documented below](#nested_scope). + +* `location` - + (Required) + Part of `parent`. See documentation of `projectsId`. + +* `application_id` - + (Required) + Required. The Application identifier. + + +The `scope` block supports: + +* `type` - + (Required) + Required. Scope Type. + Possible values: + REGIONAL + Possible values are: `REGIONAL`. + +- - - + + +* `display_name` - + (Optional) + Optional. User-defined name for the Application. + +* `description` - + (Optional) + Optional. User-defined description of an Application. + +* `attributes` - + (Optional) + Consumer provided attributes. + Structure is [documented below](#nested_attributes). + +* `project` - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + + +The `attributes` block supports: + +* `criticality` - + (Optional) + Criticality of the Application, Service, or Workload + Structure is [documented below](#nested_criticality). + +* `environment` - + (Optional) + Environment of the Application, Service, or Workload + Structure is [documented below](#nested_environment). + +* `developer_owners` - + (Optional) + Optional. Developer team that owns development and coding. + Structure is [documented below](#nested_developer_owners). + +* `operator_owners` - + (Optional) + Optional. Operator team that ensures runtime and operations. + Structure is [documented below](#nested_operator_owners). + +* `business_owners` - + (Optional) + Optional. Business team that ensures user needs are met and value is delivered + Structure is [documented below](#nested_business_owners). + + +The `criticality` block supports: + +* `type` - + (Required) + Criticality type. + Possible values are: `MISSION_CRITICAL`, `HIGH`, `MEDIUM`, `LOW`. + +The `environment` block supports: + +* `type` - + (Required) + Environment type. + Possible values are: `PRODUCTION`, `STAGING`, `TEST`, `DEVELOPMENT`. + +The `developer_owners` block supports: + +* `display_name` - + (Optional) + Optional. Contact's name. + +* `email` - + (Required) + Required. Email address of the contacts. + +The `operator_owners` block supports: + +* `display_name` - + (Optional) + Optional. Contact's name. + +* `email` - + (Required) + Required. Email address of the contacts. + +The `business_owners` block supports: + +* `display_name` - + (Optional) + Optional. Contact's name. + +* `email` - + (Required) + Required. Email address of the contacts. + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `projects/{{project}}/locations/{{location}}/applications/{{application_id}}` + +* `name` - + Identifier. The resource name of an Application. Format: + "projects/{host-project-id}/locations/{location}/applications/{application-id}" + +* `create_time` - + Output only. Create time. + +* `update_time` - + Output only. Update time. + +* `uid` - + Output only. A universally unique identifier (in UUID4 format) for the `Application`. + +* `state` - + Output only. Application state. + Possible values: + STATE_UNSPECIFIED + CREATING + ACTIVE + DELETING + + +## Timeouts + +This resource provides the following +[Timeouts](https://developer.hashicorp.com/terraform/plugin/sdkv2/resources/retries-and-customizable-timeouts) configuration options: + +- `create` - Default is 20 minutes. +- `update` - Default is 20 minutes. +- `delete` - Default is 20 minutes. + +## Import + + +Application can be imported using any of these accepted formats: + +* `projects/{{project}}/locations/{{location}}/applications/{{application_id}}` +* `{{project}}/{{location}}/{{application_id}}` +* `{{location}}/{{application_id}}` + + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Application using one of the formats above. For example: + +```tf +import { + id = "projects/{{project}}/locations/{{location}}/applications/{{application_id}}" + to = google_apphub_application.default +} +``` + +When using the [`terraform import` command](https://developer.hashicorp.com/terraform/cli/commands/import), Application can be imported using one of the formats above. For example: + +``` +$ terraform import google_apphub_application.default projects/{{project}}/locations/{{location}}/applications/{{application_id}} +$ terraform import google_apphub_application.default {{project}}/{{location}}/{{application_id}} +$ terraform import google_apphub_application.default {{location}}/{{application_id}} +``` + +## User Project Overrides + +This resource supports [User Project Overrides](https://registry.terraform.io/providers/hashicorp/google/latest/docs/guides/provider_reference#user_project_override). diff --git a/website/docs/r/bigquery_routine.html.markdown b/website/docs/r/bigquery_routine.html.markdown index 98e99b1303a..1bd75bc74bd 100644 --- a/website/docs/r/bigquery_routine.html.markdown +++ b/website/docs/r/bigquery_routine.html.markdown @@ -29,11 +29,11 @@ To get more information about Routine, see: * [Routines Intro](https://cloud.google.com/bigquery/docs/reference/rest/v2/routines) -## Example Usage - Big Query Routine Basic +## Example Usage - Bigquery Routine Basic ```hcl @@ -50,11 +50,11 @@ resource "google_bigquery_routine" "sproc" { } ``` -## Example Usage - Big Query Routine Json +## Example Usage - Bigquery Routine Json ```hcl @@ -81,11 +81,11 @@ resource "google_bigquery_routine" "sproc" { } ``` -## Example Usage - Big Query Routine Tvf +## Example Usage - Bigquery Routine Tvf ```hcl @@ -112,11 +112,11 @@ resource "google_bigquery_routine" "sproc" { } ``` -## Example Usage - Big Query Routine Pyspark +## Example Usage - Bigquery Routine Pyspark ```hcl @@ -163,11 +163,11 @@ resource "google_bigquery_routine" "pyspark" { } ``` -## Example Usage - Big Query Routine Pyspark Mainfile +## Example Usage - Bigquery Routine Pyspark Mainfile ```hcl @@ -198,11 +198,11 @@ resource "google_bigquery_routine" "pyspark_mainfile" { } ``` -## Example Usage - Big Query Routine Spark Jar +## Example Usage - Bigquery Routine Spark Jar ```hcl @@ -235,6 +235,38 @@ resource "google_bigquery_routine" "spark_jar" { } } ``` +## Example Usage - Bigquery Routine Remote Function + + +```hcl +resource "google_bigquery_dataset" "test" { + dataset_id = "dataset_id" +} + +resource "google_bigquery_connection" "test" { + connection_id = "connection_id" + location = "US" + cloud_resource { } +} + +resource "google_bigquery_routine" "remote_function" { + dataset_id = google_bigquery_dataset.test.dataset_id + routine_id = "routine_id" + routine_type = "SCALAR_FUNCTION" + definition_body = "" + + return_type = "{\"typeKind\" : \"STRING\"}" + + remote_function_options { + endpoint = "https://us-east1-my_gcf_project.cloudfunctions.net/remote_add" + connection = google_bigquery_connection.test.name + max_batching_rows = "10" + user_defined_context = { + "z": "1.5", + } + } +} +``` ## Argument Reference @@ -311,6 +343,11 @@ The following arguments are supported: Optional. If language is one of "PYTHON", "JAVA", "SCALA", this field stores the options for spark stored procedure. Structure is [documented below](#nested_spark_options). +* `remote_function_options` - + (Optional) + Remote function specific options. + Structure is [documented below](#nested_remote_function_options). + * `project` - (Optional) The ID of the project in which the resource belongs. If it is not provided, the provider project is used. @@ -390,6 +427,32 @@ The following arguments are supported: The fully qualified name of a class in jarUris, for example, com.example.wordcount. Exactly one of mainClass and main_jar_uri field should be set for Java/Scala language type. +The `remote_function_options` block supports: + +* `endpoint` - + (Optional) + Endpoint of the user-provided remote service, e.g. + `https://us-east1-my_gcf_project.cloudfunctions.net/remote_add` + +* `connection` - + (Optional) + Fully qualified name of the user-provided connection object which holds + the authentication information to send requests to the remote service. + Format: "projects/{projectId}/locations/{locationId}/connections/{connectionId}" + +* `user_defined_context` - + (Optional) + User-defined context as a set of key/value pairs, which will be sent as function + invocation context together with batched arguments in the requests to the remote + service. The total number of bytes of keys and values must be less than 8KB. + An object containing a list of "key": value pairs. Example: + `{ "name": "wrench", "mass": "1.3kg", "count": "3" }`. + +* `max_batching_rows` - + (Optional) + Max number of rows in each batch sent to the remote service. If absent or if 0, + BigQuery dynamically decides the number of rows in a batch. + ## Attributes Reference In addition to the arguments listed above, the following computed attributes are exported: diff --git a/website/docs/r/certificate_manager_certificate.html.markdown b/website/docs/r/certificate_manager_certificate.html.markdown index 06207e1369b..13882dc583d 100644 --- a/website/docs/r/certificate_manager_certificate.html.markdown +++ b/website/docs/r/certificate_manager_certificate.html.markdown @@ -310,6 +310,35 @@ resource "google_certificate_manager_dns_authorization" "instance2" { domain = "subdomain2.hashicorptest.com" } ``` + +## Example Usage - Certificate Manager Google Managed Regional Certificate Dns Auth + + +```hcl +resource "google_certificate_manager_certificate" "default" { + name = "dns-cert" + description = "regional managed certs" + location = "us-central1" + managed { + domains = [ + google_certificate_manager_dns_authorization.instance.domain, + ] + dns_authorizations = [ + google_certificate_manager_dns_authorization.instance.id, + ] + } +} +resource "google_certificate_manager_dns_authorization" "instance" { + name = "dns-auth" + location = "us-central1" + description = "The default dnss" + domain = "subdomain.hashicorptest.com" +} +``` ## Argument Reference diff --git a/website/docs/r/certificate_manager_dns_authorization.html.markdown b/website/docs/r/certificate_manager_dns_authorization.html.markdown index 5c8798bc133..50efd628095 100644 --- a/website/docs/r/certificate_manager_dns_authorization.html.markdown +++ b/website/docs/r/certificate_manager_dns_authorization.html.markdown @@ -34,7 +34,8 @@ DnsAuthorization represents a HTTP-reachable backend for a DnsAuthorization. ```hcl resource "google_certificate_manager_dns_authorization" "default" { name = "dns-auth" - description = "The default dnss" + location = "global" + description = "The default dns" domain = "subdomain.hashicorptest.com" } @@ -50,6 +51,23 @@ output "record_data_to_insert" { value = google_certificate_manager_dns_authorization.default.dns_resource_record.0.data } ``` + +## Example Usage - Certificate Manager Dns Authorization Regional + + +```hcl +resource "google_certificate_manager_dns_authorization" "default" { + name = "dns-auth" + location = "us-central1" + description = "reginal dns" + type = "PER_PROJECT_RECORD" + domain = "subdomain.hashicorptest.com" +} +``` ## Argument Reference @@ -82,6 +100,20 @@ The following arguments are supported: **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field `effective_labels` for all of the labels present on the resource. +* `type` - + (Optional) + type of DNS authorization. If unset during the resource creation, FIXED_RECORD will + be used for global resources, and PER_PROJECT_RECORD will be used for other locations. + FIXED_RECORD DNS authorization uses DNS-01 validation method + PER_PROJECT_RECORD DNS authorization allows for independent management + of Google-managed certificates with DNS authorization across multiple + projects. + Possible values are: `FIXED_RECORD`, `PER_PROJECT_RECORD`. + +* `location` - + (Optional) + The Certificate Manager location. If not specified, "global" is used. + * `project` - (Optional) The ID of the project in which the resource belongs. If it is not provided, the provider project is used. @@ -90,7 +122,7 @@ The following arguments are supported: In addition to the arguments listed above, the following computed attributes are exported: -* `id` - an identifier for the resource with format `projects/{{project}}/locations/global/dnsAuthorizations/{{name}}` +* `id` - an identifier for the resource with format `projects/{{project}}/locations/{{location}}/dnsAuthorizations/{{name}}` * `dns_resource_record` - The structure describing the DNS Resource Record that needs to be added @@ -135,16 +167,16 @@ This resource provides the following DnsAuthorization can be imported using any of these accepted formats: -* `projects/{{project}}/locations/global/dnsAuthorizations/{{name}}` -* `{{project}}/{{name}}` -* `{{name}}` +* `projects/{{project}}/locations/{{location}}/dnsAuthorizations/{{name}}` +* `{{project}}/{{location}}/{{name}}` +* `{{location}}/{{name}}` In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import DnsAuthorization using one of the formats above. For example: ```tf import { - id = "projects/{{project}}/locations/global/dnsAuthorizations/{{name}}" + id = "projects/{{project}}/locations/{{location}}/dnsAuthorizations/{{name}}" to = google_certificate_manager_dns_authorization.default } ``` @@ -152,9 +184,9 @@ import { When using the [`terraform import` command](https://developer.hashicorp.com/terraform/cli/commands/import), DnsAuthorization can be imported using one of the formats above. For example: ``` -$ terraform import google_certificate_manager_dns_authorization.default projects/{{project}}/locations/global/dnsAuthorizations/{{name}} -$ terraform import google_certificate_manager_dns_authorization.default {{project}}/{{name}} -$ terraform import google_certificate_manager_dns_authorization.default {{name}} +$ terraform import google_certificate_manager_dns_authorization.default projects/{{project}}/locations/{{location}}/dnsAuthorizations/{{name}} +$ terraform import google_certificate_manager_dns_authorization.default {{project}}/{{location}}/{{name}} +$ terraform import google_certificate_manager_dns_authorization.default {{location}}/{{name}} ``` ## User Project Overrides diff --git a/website/docs/r/cloud_run_v2_service.html.markdown b/website/docs/r/cloud_run_v2_service.html.markdown index 447686960cd..e308ab7a26f 100644 --- a/website/docs/r/cloud_run_v2_service.html.markdown +++ b/website/docs/r/cloud_run_v2_service.html.markdown @@ -714,7 +714,8 @@ The following arguments are supported: * `cpu_idle` - (Optional) - Determines whether CPU should be throttled or not outside of requests. + Determines whether CPU is only allocated during requests. True by default if the parent `resources` field is not set. However, if + `resources` is set, this field must be explicitly set to true to preserve the default behavior. * `startup_cpu_boost` - (Optional) @@ -1062,6 +1063,11 @@ The following arguments are supported: One or more custom audiences that you want this service to support. Specify each custom audience as the full URL in a string. The custom audiences are encoded in the token and used to authenticate requests. For more information, see https://cloud.google.com/run/docs/configuring/custom-audiences. +* `scaling` - + (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)) + Scaling settings that apply to the whole service + Structure is [documented below](#nested_scaling). + * `traffic` - (Optional) Specifies how to distribute traffic over a collection of Revisions belonging to the Service. If traffic is empty or not provided, defaults to 100% traffic to the latest Ready Revision. @@ -1081,6 +1087,12 @@ The following arguments are supported: (Optional) If True, indicates to use the default project's binary authorization policy. If False, binary authorization will be disabled. +The `scaling` block supports: + +* `min_instance_count` - + (Optional) + Minimum number of instances for the service, to be divided among all revisions receiving traffic. + The `traffic` block supports: * `type` - diff --git a/website/docs/r/clouddeploy_automation.html.markdown b/website/docs/r/clouddeploy_automation.html.markdown index ee58a8a6998..4a05277b1be 100644 --- a/website/docs/r/clouddeploy_automation.html.markdown +++ b/website/docs/r/clouddeploy_automation.html.markdown @@ -21,8 +21,6 @@ description: |- An `Automation` enables the automation of manually driven actions for a Delivery Pipeline, which includes Release promotion amongst Targets, Rollout repair and Rollout deployment strategy advancement. -~> **Warning:** This resource is in beta, and should be used with the terraform-provider-google-beta provider. -See [Provider Versions](https://terraform.io/docs/providers/google/guides/provider_versions.html) for more details on beta resources. To get more information about Automation, see: @@ -35,7 +33,6 @@ To get more information about Automation, see: ```hcl resource "google_clouddeploy_automation" "b-automation" { - provider = google-beta name = "cd-automation" project = google_clouddeploy_delivery_pipeline.pipeline.project location = google_clouddeploy_delivery_pipeline.pipeline.location @@ -55,7 +52,6 @@ resource "google_clouddeploy_automation" "b-automation" { } resource "google_clouddeploy_delivery_pipeline" "pipeline" { - provider = google-beta name = "cd-pipeline" location = "us-central1" serial_pipeline { @@ -71,7 +67,6 @@ resource "google_clouddeploy_delivery_pipeline" "pipeline" { ```hcl resource "google_clouddeploy_automation" "f-automation" { - provider = google-beta name = "cd-automation" location = "us-central1" delivery_pipeline = google_clouddeploy_delivery_pipeline.pipeline.name @@ -112,7 +107,6 @@ resource "google_clouddeploy_automation" "f-automation" { } resource "google_clouddeploy_delivery_pipeline" "pipeline" { - provider = google-beta name = "cd-pipeline" location = "us-central1" serial_pipeline { diff --git a/website/docs/r/clouddeploy_custom_target_type_iam.html.markdown b/website/docs/r/clouddeploy_custom_target_type_iam.html.markdown new file mode 100644 index 00000000000..50185d09d23 --- /dev/null +++ b/website/docs/r/clouddeploy_custom_target_type_iam.html.markdown @@ -0,0 +1,154 @@ +--- +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file in +# .github/CONTRIBUTING.md. +# +# ---------------------------------------------------------------------------- +subcategory: "Cloud Deploy" +description: |- + Collection of resources to manage IAM policy for Cloud Deploy CustomTargetType +--- + +# IAM policy for Cloud Deploy CustomTargetType +Three different resources help you manage your IAM policy for Cloud Deploy CustomTargetType. Each of these resources serves a different use case: + +* `google_clouddeploy_custom_target_type_iam_policy`: Authoritative. Sets the IAM policy for the customtargettype and replaces any existing policy already attached. +* `google_clouddeploy_custom_target_type_iam_binding`: Authoritative for a given role. Updates the IAM policy to grant a role to a list of members. Other roles within the IAM policy for the customtargettype are preserved. +* `google_clouddeploy_custom_target_type_iam_member`: Non-authoritative. Updates the IAM policy to grant a role to a new member. Other members for the role for the customtargettype are preserved. + +A data source can be used to retrieve policy data in advent you do not need creation + +* `google_clouddeploy_custom_target_type_iam_policy`: Retrieves the IAM policy for the customtargettype + +~> **Note:** `google_clouddeploy_custom_target_type_iam_policy` **cannot** be used in conjunction with `google_clouddeploy_custom_target_type_iam_binding` and `google_clouddeploy_custom_target_type_iam_member` or they will fight over what your policy should be. + +~> **Note:** `google_clouddeploy_custom_target_type_iam_binding` resources **can be** used in conjunction with `google_clouddeploy_custom_target_type_iam_member` resources **only if** they do not grant privilege to the same role. + + + + +## google\_clouddeploy\_custom\_target\_type\_iam\_policy + +```hcl +data "google_iam_policy" "admin" { + binding { + role = "roles/viewer" + members = [ + "user:jane@example.com", + ] + } +} + +resource "google_clouddeploy_custom_target_type_iam_policy" "policy" { + project = google_clouddeploy_custom_target_type.custom-target-type.project + location = google_clouddeploy_custom_target_type.custom-target-type.location + name = google_clouddeploy_custom_target_type.custom-target-type.name + policy_data = data.google_iam_policy.admin.policy_data +} +``` + +## google\_clouddeploy\_custom\_target\_type\_iam\_binding + +```hcl +resource "google_clouddeploy_custom_target_type_iam_binding" "binding" { + project = google_clouddeploy_custom_target_type.custom-target-type.project + location = google_clouddeploy_custom_target_type.custom-target-type.location + name = google_clouddeploy_custom_target_type.custom-target-type.name + role = "roles/viewer" + members = [ + "user:jane@example.com", + ] +} +``` + +## google\_clouddeploy\_custom\_target\_type\_iam\_member + +```hcl +resource "google_clouddeploy_custom_target_type_iam_member" "member" { + project = google_clouddeploy_custom_target_type.custom-target-type.project + location = google_clouddeploy_custom_target_type.custom-target-type.location + name = google_clouddeploy_custom_target_type.custom-target-type.name + role = "roles/viewer" + member = "user:jane@example.com" +} +``` + + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) Used to find the parent resource to bind the IAM policy to +* `location` - (Required) The location of the source. Used to find the parent resource to bind the IAM policy to + +* `project` - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the project will be parsed from the identifier of the parent resource. If no project is provided in the parent identifier and no project is specified, the provider project is used. + +* `member/members` - (Required) Identities that will be granted the privilege in `role`. + Each entry can have one of the following values: + * **allUsers**: A special identifier that represents anyone who is on the internet; with or without a Google account. + * **allAuthenticatedUsers**: A special identifier that represents anyone who is authenticated with a Google account or a service account. + * **user:{emailid}**: An email address that represents a specific Google account. For example, alice@gmail.com or joe@example.com. + * **serviceAccount:{emailid}**: An email address that represents a service account. For example, my-other-app@appspot.gserviceaccount.com. + * **group:{emailid}**: An email address that represents a Google group. For example, admins@example.com. + * **domain:{domain}**: A G Suite domain (primary, instead of alias) name that represents all the users of that domain. For example, google.com or example.com. + * **projectOwner:projectid**: Owners of the given project. For example, "projectOwner:my-example-project" + * **projectEditor:projectid**: Editors of the given project. For example, "projectEditor:my-example-project" + * **projectViewer:projectid**: Viewers of the given project. For example, "projectViewer:my-example-project" + +* `role` - (Required) The role that should be applied. Only one + `google_clouddeploy_custom_target_type_iam_binding` can be used per role. Note that custom roles must be of the format + `[projects|organizations]/{parent-name}/roles/{role-name}`. + +* `policy_data` - (Required only by `google_clouddeploy_custom_target_type_iam_policy`) The policy data generated by + a `google_iam_policy` data source. + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are +exported: + +* `etag` - (Computed) The etag of the IAM policy. + +## Import + +For all import syntaxes, the "resource in question" can take any of the following forms: + +* projects/{{project}}/locations/{{location}}/customTargetTypes/{{name}} +* {{project}}/{{location}}/{{name}} +* {{location}}/{{name}} +* {{name}} + +Any variables not passed in the import command will be taken from the provider configuration. + +Cloud Deploy customtargettype IAM resources can be imported using the resource identifiers, role, and member. + +IAM member imports use space-delimited identifiers: the resource in question, the role, and the member identity, e.g. +``` +$ terraform import google_clouddeploy_custom_target_type_iam_member.editor "projects/{{project}}/locations/{{location}}/customTargetTypes/{{custom_target_type}} roles/viewer user:jane@example.com" +``` + +IAM binding imports use space-delimited identifiers: the resource in question and the role, e.g. +``` +$ terraform import google_clouddeploy_custom_target_type_iam_binding.editor "projects/{{project}}/locations/{{location}}/customTargetTypes/{{custom_target_type}} roles/viewer" +``` + +IAM policy imports use the identifier of the resource in question, e.g. +``` +$ terraform import google_clouddeploy_custom_target_type_iam_policy.editor projects/{{project}}/locations/{{location}}/customTargetTypes/{{custom_target_type}} +``` + +-> **Custom Roles**: If you're importing a IAM resource with a custom role, make sure to use the + full name of the custom role, e.g. `[projects/my-project|organizations/my-org]/roles/my-custom-role`. + +## User Project Overrides + +This resource supports [User Project Overrides](https://registry.terraform.io/providers/hashicorp/google/latest/docs/guides/provider_reference#user_project_override). diff --git a/website/docs/r/clouddeploy_target_iam.html.markdown b/website/docs/r/clouddeploy_target_iam.html.markdown new file mode 100644 index 00000000000..86a97ca65f2 --- /dev/null +++ b/website/docs/r/clouddeploy_target_iam.html.markdown @@ -0,0 +1,153 @@ +--- +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file in +# .github/CONTRIBUTING.md. +# +# ---------------------------------------------------------------------------- +subcategory: "Cloud Deploy" +description: |- + Collection of resources to manage IAM policy for Cloud Deploy Target +--- + +# IAM policy for Cloud Deploy Target +Three different resources help you manage your IAM policy for Cloud Deploy Target. Each of these resources serves a different use case: + +* `google_clouddeploy_target_iam_policy`: Authoritative. Sets the IAM policy for the target and replaces any existing policy already attached. +* `google_clouddeploy_target_iam_binding`: Authoritative for a given role. Updates the IAM policy to grant a role to a list of members. Other roles within the IAM policy for the target are preserved. +* `google_clouddeploy_target_iam_member`: Non-authoritative. Updates the IAM policy to grant a role to a new member. Other members for the role for the target are preserved. + +A data source can be used to retrieve policy data in advent you do not need creation + +* `google_clouddeploy_target_iam_policy`: Retrieves the IAM policy for the target + +~> **Note:** `google_clouddeploy_target_iam_policy` **cannot** be used in conjunction with `google_clouddeploy_target_iam_binding` and `google_clouddeploy_target_iam_member` or they will fight over what your policy should be. + +~> **Note:** `google_clouddeploy_target_iam_binding` resources **can be** used in conjunction with `google_clouddeploy_target_iam_member` resources **only if** they do not grant privilege to the same role. + + + + +## google\_clouddeploy\_target\_iam\_policy + +```hcl +data "google_iam_policy" "admin" { + binding { + role = "roles/viewer" + members = [ + "user:jane@example.com", + ] + } +} + +resource "google_clouddeploy_target_iam_policy" "policy" { + project = google_clouddeploy_target.default.project + location = google_clouddeploy_target.default.location + name = google_clouddeploy_target.default.name + policy_data = data.google_iam_policy.admin.policy_data +} +``` + +## google\_clouddeploy\_target\_iam\_binding + +```hcl +resource "google_clouddeploy_target_iam_binding" "binding" { + project = google_clouddeploy_target.default.project + location = google_clouddeploy_target.default.location + name = google_clouddeploy_target.default.name + role = "roles/viewer" + members = [ + "user:jane@example.com", + ] +} +``` + +## google\_clouddeploy\_target\_iam\_member + +```hcl +resource "google_clouddeploy_target_iam_member" "member" { + project = google_clouddeploy_target.default.project + location = google_clouddeploy_target.default.location + name = google_clouddeploy_target.default.name + role = "roles/viewer" + member = "user:jane@example.com" +} +``` + + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) Used to find the parent resource to bind the IAM policy to + +* `project` - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the project will be parsed from the identifier of the parent resource. If no project is provided in the parent identifier and no project is specified, the provider project is used. + +* `member/members` - (Required) Identities that will be granted the privilege in `role`. + Each entry can have one of the following values: + * **allUsers**: A special identifier that represents anyone who is on the internet; with or without a Google account. + * **allAuthenticatedUsers**: A special identifier that represents anyone who is authenticated with a Google account or a service account. + * **user:{emailid}**: An email address that represents a specific Google account. For example, alice@gmail.com or joe@example.com. + * **serviceAccount:{emailid}**: An email address that represents a service account. For example, my-other-app@appspot.gserviceaccount.com. + * **group:{emailid}**: An email address that represents a Google group. For example, admins@example.com. + * **domain:{domain}**: A G Suite domain (primary, instead of alias) name that represents all the users of that domain. For example, google.com or example.com. + * **projectOwner:projectid**: Owners of the given project. For example, "projectOwner:my-example-project" + * **projectEditor:projectid**: Editors of the given project. For example, "projectEditor:my-example-project" + * **projectViewer:projectid**: Viewers of the given project. For example, "projectViewer:my-example-project" + +* `role` - (Required) The role that should be applied. Only one + `google_clouddeploy_target_iam_binding` can be used per role. Note that custom roles must be of the format + `[projects|organizations]/{parent-name}/roles/{role-name}`. + +* `policy_data` - (Required only by `google_clouddeploy_target_iam_policy`) The policy data generated by + a `google_iam_policy` data source. + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are +exported: + +* `etag` - (Computed) The etag of the IAM policy. + +## Import + +For all import syntaxes, the "resource in question" can take any of the following forms: + +* projects/{{project}}/locations/{{location}}/targets/{{name}} +* {{project}}/{{location}}/{{name}} +* {{location}}/{{name}} +* {{name}} + +Any variables not passed in the import command will be taken from the provider configuration. + +Cloud Deploy target IAM resources can be imported using the resource identifiers, role, and member. + +IAM member imports use space-delimited identifiers: the resource in question, the role, and the member identity, e.g. +``` +$ terraform import google_clouddeploy_target_iam_member.editor "projects/{{project}}/locations/{{location}}/targets/{{target}} roles/viewer user:jane@example.com" +``` + +IAM binding imports use space-delimited identifiers: the resource in question and the role, e.g. +``` +$ terraform import google_clouddeploy_target_iam_binding.editor "projects/{{project}}/locations/{{location}}/targets/{{target}} roles/viewer" +``` + +IAM policy imports use the identifier of the resource in question, e.g. +``` +$ terraform import google_clouddeploy_target_iam_policy.editor projects/{{project}}/locations/{{location}}/targets/{{target}} +``` + +-> **Custom Roles**: If you're importing a IAM resource with a custom role, make sure to use the + full name of the custom role, e.g. `[projects/my-project|organizations/my-org]/roles/my-custom-role`. + +## User Project Overrides + +This resource supports [User Project Overrides](https://registry.terraform.io/providers/hashicorp/google/latest/docs/guides/provider_reference#user_project_override). diff --git a/website/docs/r/cloudfunctions_function.html.markdown b/website/docs/r/cloudfunctions_function.html.markdown index 8a8b26b8709..b24afb98853 100644 --- a/website/docs/r/cloudfunctions_function.html.markdown +++ b/website/docs/r/cloudfunctions_function.html.markdown @@ -149,6 +149,8 @@ Please refer to the field 'effective_labels' for all of the labels present on th * `build_environment_variables` - (Optional) A set of key/value environment variable pairs available during build time. +* `build_worker_pool` - (Optional) Name of the Cloud Build Custom Worker Pool that should be used to build the function. + * `vpc_connector` - (Optional) The VPC Network Connector that this cloud function can connect to. It should be set up as fully-qualified URI. The format of this field is `projects/*/locations/*/connectors/*`. * `vpc_connector_egress_settings` - (Optional) The egress settings for the connector, controlling what traffic is diverted through it. Allowed values are `ALL_TRAFFIC` and `PRIVATE_RANGES_ONLY`. Defaults to `PRIVATE_RANGES_ONLY`. If unset, this field preserves the previously set value. @@ -160,9 +162,9 @@ Please refer to the field 'effective_labels' for all of the labels present on th * `source_repository` - (Optional) Represents parameters related to source repository where a function is hosted. Cannot be set alongside `source_archive_bucket` or `source_archive_object`. Structure is [documented below](#nested_source_repository). It must match the pattern `projects/{project}/locations/{location}/repositories/{repository}`.* -* `docker_registry` - (Optional) Docker Registry to use for storing the function's Docker images. Allowed values are CONTAINER_REGISTRY (default) and ARTIFACT_REGISTRY. +* `docker_registry` - (Optional) Docker Registry to use for storing the function's Docker images. Allowed values are ARTIFACT_REGISTRY (default) and CONTAINER_REGISTRY. -* `docker_repository` - (Optional) User managed repository created in Artifact Registry optionally with a customer managed encryption key. If specified, deployments will use Artifact Registry. This is the repository to which the function docker image will be pushed after it is built by Cloud Build. If unspecified, Container Registry will be used by default, unless specified otherwise by other means. +* `docker_repository` - (Optional) User-managed repository created in Artifact Registry to which the function's Docker image will be pushed after it is built by Cloud Build. May optionally be encrypted with a customer-managed encryption key (CMEK). If unspecified and `docker_registry` is not explicitly set to `CONTAINER_REGISTRY`, GCF will create and use a default Artifact Registry repository named 'gcf-artifacts' in the region. * `kms_key_name` - (Optional) Resource name of a KMS crypto key (managed by the user) used to encrypt/decrypt function resources. It must match the pattern `projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}`. If specified, you must also provide an artifact registry repository using the `docker_repository` field that was created with the same KMS crypto key. Before deploying, please complete all pre-requisites described in https://cloud.google.com/functions/docs/securing/cmek#granting_service_accounts_access_to_the_key diff --git a/website/docs/r/composer_environment.html.markdown b/website/docs/r/composer_environment.html.markdown index 61e961b78b5..81865ec3790 100644 --- a/website/docs/r/composer_environment.html.markdown +++ b/website/docs/r/composer_environment.html.markdown @@ -229,7 +229,7 @@ The following arguments are supported: * `config` - (Optional) - Configuration parameters for this environment Structure is [documented below](#nested_config). + Configuration parameters for this environment Structure is [documented below](#nested_config_c1). * `labels` - (Optional) @@ -260,7 +260,7 @@ The following arguments are supported: (Optional) The ID of the project in which the resource belongs. If it is not provided, the provider project is used. -The `config` block supports: +The `config` block supports: * `node_count` - (Optional, Cloud Composer 1 only) @@ -268,28 +268,19 @@ The following arguments are supported: * `node_config` - (Optional) - The configuration used for the Kubernetes Engine cluster. Structure is [documented below](#nested_node_config). + The configuration used for the Kubernetes Engine cluster. Structure is [documented below](#nested_node_config_c1). * `recovery_config` - (Optional, Cloud Composer 2 only) - The configuration settings for recovery. Structure is [documented below](#nested_recovery_config). + The configuration settings for recovery. Structure is [documented below](#nested_recovery_config_c1). * `software_config` - (Optional) - The configuration settings for software inside the environment. Structure is [documented below](#nested_software_config). + The configuration settings for software inside the environment. Structure is [documented below](#nested_software_config_c1). * `private_environment_config` - (Optional) - The configuration used for the Private IP Cloud Composer environment. Structure is [documented below](#nested_private_environment_config). - -* `enable_private_environment` - - (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html), Cloud Composer 3 only) - If true, a private Composer environment will be created. - -* `enable_private_builds_only` - - (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html), Cloud Composer 3 only) - If true, builds performed during operations that install Python packages have only private connectivity to Google services. - If false, the builds also have access to the internet. + The configuration used for the Private IP Cloud Composer environment. Structure is [documented below](#nested_private_environment_config_c1). * `web_server_network_access_control` - The network-level access control policy for the Airflow web server. @@ -319,9 +310,9 @@ The following arguments are supported: master authorized networks will disallow all external traffic to access Kubernetes master through HTTPS except traffic from the given CIDR blocks, Google Compute Engine Public IPs and Google Prod IPs. Structure is - [documented below](#nested_master_authorized_networks_config). + [documented below](#nested_master_authorized_networks_config_c1). -The `node_config` block supports: +The `node_config` block supports: * `zone` - (Optional, Cloud Composer 1 only) @@ -382,7 +373,7 @@ The following arguments are supported: * `ip_allocation_policy` - (Optional) Configuration for controlling how IPs are allocated in the GKE cluster. - Structure is [documented below](#nested_ip_allocation_policy). + Structure is [documented below](#nested_ip_allocation_policy_c1). Cannot be updated. * `max_pods_per_node` - @@ -401,7 +392,7 @@ The following arguments are supported: all destination addresses, except between pods traffic. See the [documentation](https://cloud.google.com/composer/docs/enable-ip-masquerade-agent). -The `software_config` block supports: +The `software_config` block supports: * `airflow_config_overrides` - (Optional) Apache Airflow configuration properties to override. Property keys contain the section and property names, @@ -444,7 +435,7 @@ The following arguments are supported: ``` * `image_version` - - (Optional in Cloud Composer 1, required in Cloud Composer 2) +(Required) In Composer 1, use a specific Composer 1 version in this parameter. If omitted, the default is the latest version of Composer 2. The version of the software running in the environment. This encapsulates both the version of Cloud Composer functionality and the version of Apache Airflow. It must match the regular expression @@ -465,12 +456,8 @@ The following arguments are supported: (Optional, Cloud Composer 1 with Airflow 2 only) The number of schedulers for Airflow. -* `web_server_plugins_mode` - - (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html), Cloud Composer 3 only) - Web server plugins configuration. Should be either 'ENABLED' or 'DISABLED'. Defaults to 'ENABLED'. - -See [documentation](https://cloud.google.com/composer/docs/how-to/managing/configuring-private-ip) for setting up private environments. The `private_environment_config` block supports: +See [documentation](https://cloud.google.com/composer/docs/how-to/managing/configuring-private-ip) for setting up private environments. The `private_environment_config` block supports: * `connection_type` - (Optional, Cloud Composer 2 only) @@ -506,9 +493,9 @@ See [documentation](https://cloud.google.com/composer/docs/how-to/managing/confi The `web_server_network_access_control` supports: * `allowed_ip_range` - - A collection of allowed IP ranges with descriptions. Structure is [documented below](#nested_allowed_ip_range). + A collection of allowed IP ranges with descriptions. Structure is [documented below](#nested_allowed_ip_range_c1). -The `allowed_ip_range` supports: +The `allowed_ip_range` supports: * `value` - (Required) @@ -521,7 +508,7 @@ The `web_server_network_access_control` supports: (Optional) A description of this ip range. -The `ip_allocation_policy` block supports: +The `ip_allocation_policy` block supports: * `use_ip_aliases` - (Optional, Cloud Composer 1 only) @@ -560,7 +547,7 @@ The `web_server_network_access_control` supports: (e.g. 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16) to pick a specific range to use. Specify either `services_secondary_range_name` or `services_ipv4_cidr_block` but not both. -The `database_config` block supports: +The `database_config` block supports: * `machine_type` - (Optional) @@ -571,7 +558,7 @@ The `web_server_network_access_control` supports: (Optional) Preferred Cloud SQL database zone. -The `web_server_config` block supports: +The `web_server_config` block supports: * `machine_type` - (Required) @@ -580,7 +567,7 @@ The `web_server_network_access_control` supports: Value custom is returned only in response, if Airflow web server parameters were manually changed to a non-standard values. -The `encryption_config` block supports: +The `encryption_config` block supports: * `kms_key_name` - (Required) @@ -588,7 +575,7 @@ The `web_server_network_access_control` supports: be the fully qualified resource name, i.e. projects/project-id/locations/location/keyRings/keyring/cryptoKeys/key. Cannot be updated. -The `maintenance_window` block supports: +The `maintenance_window` block supports: * `start_time` - (Required) Start time of the first recurrence of the maintenance window. @@ -604,15 +591,15 @@ The `web_server_network_access_control` supports: The only allowed values for 'FREQ' field are 'FREQ=DAILY' and 'FREQ=WEEKLY;BYDAY=...'. Example values: 'FREQ=WEEKLY;BYDAY=TU,WE', 'FREQ=DAILY'. -The `master_authorized_networks_config` block supports: +The `master_authorized_networks_config` block supports: * `enabled` - (Required) Whether or not master authorized networks is enabled. * `cidr_blocks` - - `cidr_blocks `define up to 50 external networks that could access Kubernetes master through HTTPS. Structure is [documented below](#nested_cidr_blocks). + `cidr_blocks `define up to 50 external networks that could access Kubernetes master through HTTPS. Structure is [documented below](#nested_cidr_blocks_c1). -The `cidr_blocks` supports: +The `cidr_blocks` supports: * `display_name` - (Optional) @@ -632,7 +619,7 @@ The following arguments are supported: * `config` - (Optional) - Configuration parameters for this environment. Structure is documented below. + Configuration parameters for this environment. Structure is [documented below](#nested_config_c2). * `labels` - (Optional) @@ -656,24 +643,23 @@ The following arguments are supported: * `storage_config` - (Optional) - Configuration options for storage used by Composer environment. Structure is documented below. + Configuration options for storage used by Composer environment. Structure is [documented below](#nested_storage_config_c2). -The `config` block supports: +The `config` block supports: * `node_config` - (Optional) - The configuration used for the Kubernetes Engine cluster. Structure is documented below. + The configuration used for the Kubernetes Engine cluster. Structure is [documented below](#nested_node_config_c2). * `software_config` - (Optional) The configuration settings for software (Airflow) inside the environment. Structure is - documented below. + [documented below](#nested_software_config_c2). * `private_environment_config` - (Optional) - The configuration used for the Private IP Cloud Composer environment. Structure is documented - below. + The configuration used for the Private IP Cloud Composer environment. Structure is [documented below](#nested_private_environment_config_c2). * `encryption_config` - (Optional) @@ -685,12 +671,12 @@ The `config` block supports: The configuration settings for Cloud Composer maintenance windows. * `workloads_config` - - (Optional, Cloud Composer 2 only) + (Optional) The Kubernetes workloads configuration for GKE cluster associated with the Cloud Composer environment. * `environment_size` - - (Optional, Cloud Composer 2 only) + (Optional) The environment size controls the performance parameters of the managed Cloud Composer infrastructure that includes the Airflow database. Values for environment size are `ENVIRONMENT_SIZE_SMALL`, `ENVIRONMENT_SIZE_MEDIUM`, @@ -709,20 +695,20 @@ The `config` block supports: master authorized networks will disallow all external traffic to access Kubernetes master through HTTPS except traffic from the given CIDR blocks, Google Compute Engine Public IPs and Google Prod IPs. Structure is - documented below. + [documented below](#nested_master_authorized_networks_config_c1). * `data_retention_config` - (Optional, Cloud Composer 2.0.23 or newer only) Configuration setting for airflow data rentention mechanism. Structure is - [documented below](#nested_data_retention_config). + [documented below](#nested_data_retention_config_c2). -The `data_retention_config` block supports: +The `data_retention_config` block supports: * `task_logs_retention_config` - (Optional) The configuration setting for Task Logs. Structure is - [documented below](#nested_task_logs_retention_config). + [documented below](#nested_task_logs_retention_config_c2). -The `task_logs_retention_config` block supports: +The `task_logs_retention_config` block supports: * `storage_mode` - (Optional) The mode of storage for Airflow workers task logs. Values for storage mode are @@ -730,14 +716,14 @@ The `config` block supports: `CLOUD_LOGGING_AND_CLOUD_STORAGE` to store logs in cloud logging and cloud storage. -The `storage_config` block supports: +The `storage_config` block supports: * `bucket` - (Required) Name of an existing Cloud Storage bucket to be used by the environment. -The `node_config` block supports: +The `node_config` block supports: * `network` - (Optional) @@ -773,7 +759,7 @@ The `node_config` block supports: * `ip_allocation_policy` - (Optional) Configuration for controlling how IPs are allocated in the GKE cluster. - Structure is documented below. + Structure is [documented below](#nested_ip_allocation_policy_c2). Cannot be updated. * `enable_ip_masq_agent` - @@ -783,12 +769,7 @@ The `node_config` block supports: packets from node IP addresses instead of Pod IP addresses See the [documentation](https://cloud.google.com/composer/docs/enable-ip-masquerade-agent). -* `composer_internal_ipv4_cidr_block` - - (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html), Cloud Composer 3 only) - At least /20 IPv4 cidr range that will be used by Composer internal components. - Cannot be updated. - -The `software_config` block supports: +The `software_config` block supports: * `airflow_config_overrides` - (Optional) Apache Airflow configuration properties to override. Property keys contain the section and property names, @@ -831,10 +812,8 @@ The `software_config` block supports: ``` * `image_version` - - (Required in Cloud Composer 2, optional in Cloud Composer 1) +(Optional) If omitted, the default is the latest version of Composer 2. - **In Cloud Composer 2, you must specify an image with Cloud Composer 2**. Otherwise, the default image for Cloud Composer 1 is used. For more information about Cloud Composer images, see - [Cloud Composer version list](https://cloud.google.com/composer/docs/concepts/versioning/composer-versions). The version of the software running in the environment. This encapsulates both the version of Cloud Composer functionality and the version of Apache Airflow. It must match the regular expression @@ -853,14 +832,14 @@ The `software_config` block supports: (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html), Cloud Composer environments in versions composer-2.1.2-airflow-*.*.* and newer) The configuration for Cloud Data Lineage integration. Structure is - [documented below](#nested_cloud_data_lineage_integration). + [documented below](#nested_cloud_data_lineage_integration_c2). -The `cloud_data_lineage_integration` block supports: +The `cloud_data_lineage_integration` block supports: * `enabled` - (Required) Whether or not Cloud Data Lineage integration is enabled. -See [documentation](https://cloud.google.com/composer/docs/how-to/managing/configuring-private-ip) for setting up private environments. The `private_environment_config` block supports: +See [documentation](https://cloud.google.com/composer/docs/how-to/managing/configuring-private-ip) for setting up private environments. The `private_environment_config` block supports: * `enable_private_endpoint` - If true, access to the public endpoint of the GKE cluster is denied. @@ -894,7 +873,7 @@ See [documentation](https://cloud.google.com/composer/docs/how-to/managing/confi versions `composer-2.*.*-airflow-*.*.*` and newer. -The `ip_allocation_policy` block supports: +The `ip_allocation_policy` block supports: * `cluster_secondary_range_name` - (Optional) @@ -951,7 +930,7 @@ The `ip_allocation_policy` block supports: The only allowed values for 'FREQ' field are 'FREQ=DAILY' and 'FREQ=WEEKLY;BYDAY=...'. Example values: 'FREQ=WEEKLY;BYDAY=TU,WE', 'FREQ=DAILY'. -The `recovery_config` block supports: +The `recovery_config` block supports: * `scheduled_snapshots_config` - (Optional) @@ -993,6 +972,345 @@ The `workloads_config` block supports: (Optional) Configuration for resources used by Airflow workers. +The `scheduler` block supports: + +* `cpu` - + (Optional) + The number of CPUs for a single Airflow scheduler. + +* `memory_gb` - + (Optional) + The amount of memory (GB) for a single Airflow scheduler. + +* `storage_gb` - + (Optional) + The amount of storage (GB) for a single Airflow scheduler. + +* `count` - + (Optional) + The number of schedulers. + +The `triggerer` block supports: + +* `cpu` - + (Required) + The number of CPUs for a single Airflow triggerer. + +* `memory_gb` - + (Required) + The amount of memory (GB) for a single Airflow triggerer. + +* `count` - + (Required) + The number of Airflow triggerers. + +The `web_server` block supports: + +* `cpu` - + (Optional) + The number of CPUs for the Airflow web server. + +* `memory_gb` - + (Optional) + The amount of memory (GB) for the Airflow web server. + +* `storage_gb` - + (Optional) + The amount of storage (GB) for the Airflow web server. + +The `worker` block supports: + +* `cpu` - + (Optional) + The number of CPUs for a single Airflow worker. + +* `memory_gb` - + (Optional) + The amount of memory (GB) for a single Airflow worker. + +* `storage_gb` + (Optional) + The amount of storage (GB) for a single Airflow worker. + +* `min_count` - + (Optional) + The minimum number of Airflow workers that the environment can run. The number of workers in the + environment does not go above this number, even if a lower number of workers can handle the load. + +* `max_count` - + (Optional) + The maximum number of Airflow workers that the environment can run. The number of workers in the + environment does not go above this number, even if a higher number of workers is required to + handle the load. + + +## Argument Reference - Cloud Composer 3 + +**Please note: This documentation corresponds to Composer 3, which is not yet released.** + +The following arguments are supported: + +* `name` - + (Required) + Name of the environment + +* `config` - + (Optional) + Configuration parameters for this environment. Structure is [documented below](#nested_config_c3). + +* `labels` - + (Optional) + User-defined labels for this environment. The labels map can contain + no more than 64 entries. Entries of the labels map are UTF8 strings + that comply with the following restrictions: + Label keys must be between 1 and 63 characters long and must conform + to the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`. + Label values must be between 0 and 63 characters long and must + conform to the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`. + No more than 64 labels can be associated with a given environment. + Both keys and values must be <= 128 bytes in size. + +* `region` - + (Optional) + The location or Compute Engine region for the environment. + +* `project` - + (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + +* `storage_config` - + (Optional) + Configuration options for storage used by Composer environment. Structure is [documented below](#nested_storage_config_c3). + + +The `config` block supports: + +* `node_config` - + (Optional) + The configuration used for the Kubernetes Engine cluster. Structure is [documented below](#nested_node_config_c3). + +* `software_config` - + (Optional) + The configuration settings for software (Airflow) inside the environment. Structure is [documented below](#nested_software_config_c3). + +* `enable_private_environment` - + (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html), Cloud Composer 3 only) + If true, a private Composer environment will be created. + +* `enable_private_builds_only` - + (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html), Cloud Composer 3 only) + If true, builds performed during operations that install Python packages have only private connectivity to Google services. + If false, the builds also have access to the internet. + +* `encryption_config` - + (Optional) + The encryption options for the Cloud Composer environment and its + dependencies. + +* `maintenance_window` - + (Optional) + The configuration settings for Cloud Composer maintenance windows. + +* `workloads_config` - + (Optional) + The Kubernetes workloads configuration for GKE cluster associated with the + Cloud Composer environment. + +* `environment_size` - + (Optional) + The environment size controls the performance parameters of the managed + Cloud Composer infrastructure that includes the Airflow database. Values for + environment size are `ENVIRONMENT_SIZE_SMALL`, `ENVIRONMENT_SIZE_MEDIUM`, + and `ENVIRONMENT_SIZE_LARGE`. + +* `data_retention_config` - + (Optional, Cloud Composer 2.0.23 or later only) + Configuration setting for Airflow database retention mechanism. Structure is + [documented below](#nested_data_retention_config_c3). + +The `data_retention_config` block supports: +* `task_logs_retention_config` - + (Optional) + The configuration setting for Airflow task logs. Structure is + [documented below](#nested_task_logs_retention_config_c3). + +The `task_logs_retention_config` block supports: +* `storage_mode` - + (Optional) + The mode of storage for Airflow task logs. Values for storage mode are + `CLOUD_LOGGING_ONLY` to only store logs in cloud logging and + `CLOUD_LOGGING_AND_CLOUD_STORAGE` to store logs in cloud logging and cloud storage. + + +The `storage_config` block supports: + +* `bucket` - + (Required) + Name of an existing Cloud Storage bucket to be used by the environment. + + +The `node_config` block supports: + +* `network` - + (Optional) + The Compute Engine network to be used for machine + communications, specified as a self-link, relative resource name + (for example "projects/{project}/global/networks/{network}"), by name. + + The network must belong to the environment's project. If unspecified, the "default" network ID in the environment's + project is used. If a Custom Subnet Network is provided, subnetwork must also be provided. + +* `subnetwork` - + (Optional) + The Compute Engine subnetwork to be used for machine + communications, specified as a self-link, relative resource name (for example, + "projects/{project}/regions/{region}/subnetworks/{subnetwork}"), or by name. If subnetwork is provided, + network must also be provided and the subnetwork must belong to the enclosing environment's project and region. + +* `composer_network_attachment` - + (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html), Cloud Composer 3 only) + PSC (Private Service Connect) Network entry point. Customers can pre-create the Network Attachment + and point Cloud Composer environment to use. It is possible to share network attachment among many environments, + provided enough IP addresses are available. + +* `service_account` - + (Optional) + The Google Cloud Platform Service Account to be used by the + node VMs. If a service account is not specified, the "default" + Compute Engine service account is used. Cannot be updated. If given, + note that the service account must have `roles/composer.worker` + for any GCP resources created under the Cloud Composer Environment. + +* `tags` - + (Optional) + The list of instance tags applied to all node VMs. Tags are + used to identify valid sources or targets for network + firewalls. Each tag within the list must comply with RFC1035. + Cannot be updated. + +* `composer_internal_ipv4_cidr_block` - + (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html), Cloud Composer 3 only) + /20 IPv4 cidr range that will be used by Composer internal components. + Cannot be updated. + +The `software_config` block supports: + +* `airflow_config_overrides` - + (Optional) Apache Airflow configuration properties to override. Property keys contain the section and property names, + separated by a hyphen, for example "core-dags_are_paused_at_creation". + + Section names must not contain hyphens ("-"), opening square brackets ("["), or closing square brackets ("]"). + The property name must not be empty and cannot contain "=" or ";". Section and property names cannot contain + characters: "." Apache Airflow configuration property names must be written in snake_case. Property values can + contain any character, and can be written in any lower/upper case format. Certain Apache Airflow configuration + property values are [blacklisted](https://cloud.google.com/composer/docs/concepts/airflow-configurations#airflow_configuration_blacklists), + and cannot be overridden. + +* `pypi_packages` - + (Optional) + Custom Python Package Index (PyPI) packages to be installed + in the environment. Keys refer to the lowercase package name (e.g. "numpy"). Values are the lowercase extras and + version specifier (e.g. "==1.12.0", "[devel,gcp_api]", "[devel]>=1.8.2, <1.9.2"). To specify a package without + pinning it to a version specifier, use the empty string as the value. + +* `env_variables` - + (Optional) + Additional environment variables to provide to the Apache Airflow scheduler, worker, and webserver processes. + Environment variable names must match the regular expression `[a-zA-Z_][a-zA-Z0-9_]*`. + They cannot specify Apache Airflow software configuration overrides (they cannot match the regular expression + `AIRFLOW__[A-Z0-9_]+__[A-Z0-9_]+`), and they cannot match any of the following reserved names: + ``` + AIRFLOW_HOME + C_FORCE_ROOT + CONTAINER_NAME + DAGS_FOLDER + GCP_PROJECT + GCS_BUCKET + GKE_CLUSTER_NAME + SQL_DATABASE + SQL_INSTANCE + SQL_PASSWORD + SQL_PROJECT + SQL_REGION + SQL_USER + ``` + +* `image_version` - + (Required) If omitted, the default is the latest version of Composer 2. + + In Cloud Composer 3, you can only specify 3 in the Cloud Composer portion of the image version. Example: composer-3-airflow-x.y.z-build.t. + + The Apache Airflow portion of the image version is a full semantic version that points to one of the + supported Apache Airflow versions, or an alias in the form of only major, major.minor or major.minor.patch versions specified. + Like in Composer 1 and 2, a given Airflow version is released multiple times in Composer, with different patches + and versions of dependencies. To distinguish between these versions in Composer 3, you can optionally specify a + build number to pin to a specific Airflow release. + Example: composer-3-airflow-2.6.3-build.4. + + The image version in Composer 3 must match the regular expression: + `composer-(([0-9]+)(\.[0-9]+\.[0-9]+(-preview\.[0-9]+)?)?|latest)-airflow-(([0-9]+)((\.[0-9]+)(\.[0-9]+)?)?(-build\.[0-9]+)?)` + Example: composer-3-airflow-2.6.3-build.4 + + **Important**: In-place upgrade for Composer 3 is not yet supported. + +* `cloud_data_lineage_integration` - + (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html), + Cloud Composer environments in versions composer-2.1.2-airflow-*.*.* and later) + The configuration for Cloud Data Lineage integration. Structure is + [documented below](#nested_cloud_data_lineage_integration_c3). + +* `web_server_plugins_mode` - + (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html), Cloud Composer 3 only) + Web server plugins configuration. Can be either 'ENABLED' or 'DISABLED'. Defaults to 'ENABLED'. + +The `cloud_data_lineage_integration` block supports: +* `enabled` - + (Required) + Whether or not Cloud Data Lineage integration is enabled. + +The `encryption_config` block supports: + +* `kms_key_name` - + (Required) + Customer-managed Encryption Key available through Google's Key Management Service. It must + be the fully qualified resource name, + i.e. projects/project-id/locations/location/keyRings/keyring/cryptoKeys/key. Cannot be updated. + +The `maintenance_window` block supports: + +* `start_time` - + (Required) + Start time of the first recurrence of the maintenance window. + +* `end_time` - + (Required) + Maintenance window end time. It is used only to calculate the duration of the maintenance window. + The value for end-time must be in the future, relative to 'start_time'. + +* `recurrence` - + (Required) + Maintenance window recurrence. Format is a subset of RFC-5545 (https://tools.ietf.org/html/rfc5545) 'RRULE'. + The only allowed values for 'FREQ' field are 'FREQ=DAILY' and 'FREQ=WEEKLY;BYDAY=...'. + Example values: 'FREQ=WEEKLY;BYDAY=TU,WE', 'FREQ=DAILY'. + +The `workloads_config` block supports: + +* `scheduler` - + (Optional) + Configuration for resources used by Airflow scheduler. + +* `triggerer` - + (Optional) + Configuration for resources used by Airflow triggerer. + +* `web_server` - + (Optional) + Configuration for resources used by Airflow web server. + +* `worker` - + (Optional) + Configuration for resources used by Airflow workers. + * `dag_processor` - (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html), Cloud Composer 3 only) Configuration for resources used by DAG processor. diff --git a/website/docs/r/compute_instance_group_manager.html.markdown b/website/docs/r/compute_instance_group_manager.html.markdown index f6dead3b438..834633b6b6d 100644 --- a/website/docs/r/compute_instance_group_manager.html.markdown +++ b/website/docs/r/compute_instance_group_manager.html.markdown @@ -153,7 +153,7 @@ The following arguments are supported: * `auto_healing_policies` - (Optional) The autohealing policies for this managed instance group. You can specify only one value. Structure is [documented below](#nested_auto_healing_policies). For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/creating-groups-of-managed-instances#monitoring_groups). -* `all_instances_config` - (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)) +* `all_instances_config` - (Optional) Properties to set on all instances in the group. After setting allInstancesConfig on the group, you must update the group's instances to apply the configuration. @@ -205,12 +205,13 @@ update_policy { ```hcl instance_lifecycle_policy { - force_update_on_repair = "YES" + force_update_on_repair = "YES" + default_action_on_failure = "DO_NOTHING" } ``` -* `force_update_on_repair` - (Optional, (https://terraform.io/docs/providers/google/guides/provider_versions.html)), Specifies whether to apply the group's latest configuration when repairing a VM. Valid options are: `YES`, `NO`. If `YES` and you updated the group's instance template or per-instance configurations after the VM was created, then these changes are applied when VM is repaired. If `NO` (default), then updates are applied in accordance with the group's update policy type. - +* `force_update_on_repair` - (Optional), Specifies whether to apply the group's latest configuration when repairing a VM. Valid options are: `YES`, `NO`. If `YES` and you updated the group's instance template or per-instance configurations after the VM was created, then these changes are applied when VM is repaired. If `NO` (default), then updates are applied in accordance with the group's update policy type. +* `default_action_on_failure` - (Optional), Default behavior for all instance or health check failures. Valid options are: `REPAIR`, `DO_NOTHING`. If `DO_NOTHING` then instances will not be repaired. If `REPAIR` (default), then failed instances will be repaired. - - - The `all_instances_config` block supports: @@ -226,9 +227,9 @@ all_instances_config { } ``` -* `metadata` - (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)), The metadata key-value pairs that you want to patch onto the instance. For more information, see [Project and instance metadata](https://cloud.google.com/compute/docs/metadata#project_and_instance_metadata). +* `metadata` - (Optional), The metadata key-value pairs that you want to patch onto the instance. For more information, see [Project and instance metadata](https://cloud.google.com/compute/docs/metadata#project_and_instance_metadata). -* `labels` - (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)), The label key-value pairs that you want to patch onto the instance. +* `labels` - (Optional), The label key-value pairs that you want to patch onto the instance. - - - diff --git a/website/docs/r/compute_region_autoscaler.html.markdown b/website/docs/r/compute_region_autoscaler.html.markdown index 43cdf51e0c6..847ea2b7ac3 100644 --- a/website/docs/r/compute_region_autoscaler.html.markdown +++ b/website/docs/r/compute_region_autoscaler.html.markdown @@ -287,7 +287,7 @@ The following arguments are supported: The metric must have a value type of INT64 or DOUBLE. * `single_instance_assignment` - - (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)) + (Optional) If scaling is based on a per-group metric value that represents the total amount of work to be done or resource usage, set this value to an amount assigned for a single instance of the scaled group. @@ -321,7 +321,7 @@ The following arguments are supported: Possible values are: `GAUGE`, `DELTA_PER_SECOND`, `DELTA_PER_MINUTE`. * `filter` - - (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)) + (Optional) A filter string to be used as the filter string for a Stackdriver Monitoring TimeSeries.list API call. This filter is used to select a specific TimeSeries for diff --git a/website/docs/r/compute_region_instance_group_manager.html.markdown b/website/docs/r/compute_region_instance_group_manager.html.markdown index 0a16ac30b14..8dfbf3ee9a9 100644 --- a/website/docs/r/compute_region_instance_group_manager.html.markdown +++ b/website/docs/r/compute_region_instance_group_manager.html.markdown @@ -155,7 +155,7 @@ The following arguments are supported: * `auto_healing_policies` - (Optional) The autohealing policies for this managed instance group. You can specify only one value. Structure is documented below. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/creating-groups-of-managed-instances#monitoring_groups). -* `all_instances_config` - (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)) +* `all_instances_config` - (Optional) Properties to set on all instances in the group. After setting allInstancesConfig on the group, you must update the group's instances to apply the configuration. @@ -215,11 +215,14 @@ update_policy { ```hcl instance_lifecycle_policy { - force_update_on_repair = "YES" + force_update_on_repair = "YES" + default_action_on_failure = "DO_NOTHING" } ``` -* `force_update_on_repair` - (Optional, (https://terraform.io/docs/providers/google/guides/provider_versions.html)), Specifies whether to apply the group's latest configuration when repairing a VM. Valid options are: YES, NO. If YES and you updated the group's instance template or per-instance configurations after the VM was created, then these changes are applied when VM is repaired. If NO (default), then updates are applied in accordance with the group's update policy type. +* `force_update_on_repair` - (Optional), Specifies whether to apply the group's latest configuration when repairing a VM. Valid options are: `YES`, `NO`. If `YES` and you updated the group's instance template or per-instance configurations after the VM was created, then these changes are applied when VM is repaired. If `NO` (default), then updates are applied in accordance with the group's update policy type. +* `default_action_on_failure` - (Optional), Default behavior for all instance or health check failures. Valid options are: `REPAIR`, `DO_NOTHING`. If `DO_NOTHING` then instances will not be repaired. If `REPAIR` (default), then failed instances will be repaired. + - - - The `all_instances_config` block supports: @@ -235,9 +238,9 @@ all_instances_config { } ``` -* `metadata` - (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)), The metadata key-value pairs that you want to patch onto the instance. For more information, see [Project and instance metadata](https://cloud.google.com/compute/docs/metadata#project_and_instance_metadata). +* `metadata` - (Optional), The metadata key-value pairs that you want to patch onto the instance. For more information, see [Project and instance metadata](https://cloud.google.com/compute/docs/metadata#project_and_instance_metadata). -* `labels` - (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)), The label key-value pairs that you want to patch onto the instance. +* `labels` - (Optional), The label key-value pairs that you want to patch onto the instance. - - - diff --git a/website/docs/r/compute_region_network_endpoint_group.html.markdown b/website/docs/r/compute_region_network_endpoint_group.html.markdown index 18a41f98145..74d423b65e8 100644 --- a/website/docs/r/compute_region_network_endpoint_group.html.markdown +++ b/website/docs/r/compute_region_network_endpoint_group.html.markdown @@ -193,6 +193,24 @@ resource "google_storage_bucket_object" "appengine_neg" { source = "./test-fixtures/hello-world.zip" } ``` + +## Example Usage - Region Network Endpoint Group Appengine Empty + + +```hcl +// App Engine Example +resource "google_compute_region_network_endpoint_group" "appengine_neg" { + name = "appengine-neg" + network_endpoint_type = "SERVERLESS" + region = "us-central1" + app_engine { + } +} +```
Open in Cloud Shell diff --git a/website/docs/r/compute_region_target_https_proxy.html.markdown b/website/docs/r/compute_region_target_https_proxy.html.markdown index fa758a2b734..9d766e96903 100644 --- a/website/docs/r/compute_region_target_https_proxy.html.markdown +++ b/website/docs/r/compute_region_target_https_proxy.html.markdown @@ -94,6 +94,44 @@ resource "google_compute_region_health_check" "default" { } } ``` + +## Example Usage - Region Target Https Proxy Certificate Manager Certificate + + +```hcl +resource "google_compute_region_target_https_proxy" "default" { + name = "target-http-proxy" + url_map = google_compute_region_url_map.default.id + certificate_manager_certificates = ["//certificatemanager.googleapis.com/${google_certificate_manager_certificate.default.id}"] # [google_certificate_manager_certificate.default.id] is also acceptable +} + +resource "google_certificate_manager_certificate" "default" { + name = "my-certificate" + location = "us-central1" + self_managed { + pem_certificate = file("test-fixtures/cert.pem") + pem_private_key = file("test-fixtures/private-key.pem") + } +} + +resource "google_compute_region_url_map" "default" { + name = "url-map" + default_service = google_compute_region_backend_service.default.id + region = "us-central1" +} + +resource "google_compute_region_backend_service" "default" { + name = "backend-service" + region = "us-central1" + protocol = "HTTPS" + timeout_sec = 30 + load_balancing_scheme = "INTERNAL_MANAGED" +} +``` ## Argument Reference @@ -110,12 +148,6 @@ The following arguments are supported: characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. -* `ssl_certificates` - - (Required) - A list of RegionSslCertificate resources that are used to authenticate - connections between users and the load balancer. Currently, exactly - one SSL certificate must be specified. - * `url_map` - (Required) A reference to the RegionUrlMap resource that defines the mapping from URL @@ -129,6 +161,19 @@ The following arguments are supported: (Optional) An optional description of this resource. +* `certificate_manager_certificates` - + (Optional) + URLs to certificate manager certificate resources that are used to authenticate connections between users and the load balancer. + Currently, you may specify up to 15 certificates. Certificate manager certificates do not apply when the load balancing scheme is set to INTERNAL_SELF_MANAGED. + sslCertificates and certificateManagerCertificates fields can not be defined together. + Accepted format is `//certificatemanager.googleapis.com/projects/{project}/locations/{location}/certificates/{resourceName}` or just the self_link `projects/{project}/locations/{location}/certificates/{resourceName}` + +* `ssl_certificates` - + (Optional) + URLs to SslCertificate resources that are used to authenticate connections between users and the load balancer. + At least one SSL certificate must be specified. Currently, you may specify up to 15 SSL certificates. + sslCertificates do not apply when the load balancing scheme is set to INTERNAL_SELF_MANAGED. + * `ssl_policy` - (Optional) A reference to the Region SslPolicy resource that will be associated with diff --git a/website/docs/r/compute_service_attachment.html.markdown b/website/docs/r/compute_service_attachment.html.markdown index 22973fef45b..a8f268c167e 100644 --- a/website/docs/r/compute_service_attachment.html.markdown +++ b/website/docs/r/compute_service_attachment.html.markdown @@ -204,6 +204,113 @@ resource "google_compute_subnetwork" "psc_ilb_producer_subnetwork" { ip_cidr_range = "10.0.0.0/16" } +resource "google_compute_subnetwork" "psc_ilb_nat" { + name = "psc-ilb-nat" + region = "us-west2" + + network = google_compute_network.psc_ilb_network.id + purpose = "PRIVATE_SERVICE_CONNECT" + ip_cidr_range = "10.1.0.0/16" +} +``` + +## Example Usage - Service Attachment Explicit Networks + + +```hcl +resource "google_compute_service_attachment" "psc_ilb_service_attachment" { + name = "my-psc-ilb" + region = "us-west2" + description = "A service attachment configured with Terraform" + + enable_proxy_protocol = false + + connection_preference = "ACCEPT_MANUAL" + nat_subnets = [google_compute_subnetwork.psc_ilb_nat.id] + target_service = google_compute_forwarding_rule.psc_ilb_target_service.id + + consumer_accept_lists { + network_url = google_compute_network.psc_ilb_consumer_network.self_link + connection_limit = 1 + } +} + +resource "google_compute_network" "psc_ilb_consumer_network" { + name = "psc-ilb-consumer-network" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "psc_ilb_consumer_subnetwork" { + name = "psc-ilb-consumer-network" + ip_cidr_range = "10.0.0.0/16" + region = "us-west2" + network = google_compute_network.psc_ilb_consumer_network.id +} + +resource "google_compute_address" "psc_ilb_consumer_address" { + name = "psc-ilb-consumer-address" + region = "us-west2" + + subnetwork = google_compute_subnetwork.psc_ilb_consumer_subnetwork.id + address_type = "INTERNAL" +} + +resource "google_compute_forwarding_rule" "psc_ilb_consumer" { + name = "psc-ilb-consumer-forwarding-rule" + region = "us-west2" + + target = google_compute_service_attachment.psc_ilb_service_attachment.id + load_balancing_scheme = "" # need to override EXTERNAL default when target is a service attachment + network = google_compute_network.psc_ilb_consumer_network.id + subnetwork = google_compute_subnetwork.psc_ilb_consumer_subnetwork.id + ip_address = google_compute_address.psc_ilb_consumer_address.id +} + +resource "google_compute_forwarding_rule" "psc_ilb_target_service" { + name = "producer-forwarding-rule" + region = "us-west2" + + load_balancing_scheme = "INTERNAL" + backend_service = google_compute_region_backend_service.producer_service_backend.id + all_ports = true + network = google_compute_network.psc_ilb_network.name + subnetwork = google_compute_subnetwork.psc_ilb_producer_subnetwork.name +} + +resource "google_compute_region_backend_service" "producer_service_backend" { + name = "producer-service" + region = "us-west2" + + health_checks = [google_compute_health_check.producer_service_health_check.id] +} + +resource "google_compute_health_check" "producer_service_health_check" { + name = "producer-service-health-check" + + check_interval_sec = 1 + timeout_sec = 1 + tcp_health_check { + port = "80" + } +} + +resource "google_compute_network" "psc_ilb_network" { + name = "psc-ilb-network" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "psc_ilb_producer_subnetwork" { + name = "psc-ilb-producer-subnetwork" + region = "us-west2" + + network = google_compute_network.psc_ilb_network.id + ip_cidr_range = "10.0.0.0/16" +} + resource "google_compute_subnetwork" "psc_ilb_nat" { name = "psc-ilb-nat" region = "us-west2" @@ -371,8 +478,14 @@ The following arguments are supported: The `consumer_accept_lists` block supports: * `project_id_or_num` - - (Required) + (Optional) A project that is allowed to connect to this service attachment. + Only one of project_id_or_num and network_url may be set. + +* `network_url` - + (Optional) + The network that is allowed to connect to this service attachment. + Only one of project_id_or_num and network_url may be set. * `connection_limit` - (Required) diff --git a/website/docs/r/container_cluster.html.markdown b/website/docs/r/container_cluster.html.markdown index 3c72beba2d5..983dd05befe 100644 --- a/website/docs/r/container_cluster.html.markdown +++ b/website/docs/r/container_cluster.html.markdown @@ -790,8 +790,7 @@ The `master_authorized_networks_config.cidr_blocks` block supports: * `disk_type` - (Optional) Type of the disk attached to each node (e.g. 'pd-standard', 'pd-balanced' or 'pd-ssd'). If unspecified, the default disk type is 'pd-standard' -* `enable_confidential_storage` - (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)) -Enabling Confidential Storage will create boot disk with confidential mode. It is disabled by default. +* `enable_confidential_storage` - (Optional) Enabling Confidential Storage will create boot disk with confidential mode. It is disabled by default. * `ephemeral_storage_config` - (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)) Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk. Structure is [documented below](#nested_ephemeral_storage_config). diff --git a/website/docs/r/dataflow_flex_template_job.html.markdown b/website/docs/r/dataflow_flex_template_job.html.markdown index d592b3fdb39..992c142e8e5 100644 --- a/website/docs/r/dataflow_flex_template_job.html.markdown +++ b/website/docs/r/dataflow_flex_template_job.html.markdown @@ -118,6 +118,10 @@ provided, the provider project is used. * `region` - (Optional) The region in which the created job should run. +* `service_account_email` - (Optional) Service account email to run the workers as. + +* `subnetwork` - (Optional) Compute Engine subnetwork for launching instances to run your pipeline. + ## Attributes Reference In addition to the arguments listed above, the following computed attributes are exported: diff --git a/website/docs/r/firebase_app_check_device_check_config.html.markdown b/website/docs/r/firebase_app_check_device_check_config.html.markdown new file mode 100644 index 00000000000..5825fd2a099 --- /dev/null +++ b/website/docs/r/firebase_app_check_device_check_config.html.markdown @@ -0,0 +1,161 @@ +--- +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file in +# .github/CONTRIBUTING.md. +# +# ---------------------------------------------------------------------------- +subcategory: "Firebase App Check" +description: |- + An app's DeviceCheck configuration object. +--- + +# google\_firebase\_app\_check\_device\_check\_config + +An app's DeviceCheck configuration object. Note that the Team ID registered with your +app is used as part of the validation process. Make sure your `google_firebase_apple_app` has a team_id present. + + +To get more information about DeviceCheckConfig, see: + +* [API documentation](https://firebase.google.com/docs/reference/appcheck/rest/v1/projects.apps.deviceCheckConfig) +* How-to Guides + * [Official Documentation](https://firebase.google.com/docs/app-check) + +~> **Warning:** All arguments including the following potentially sensitive +values will be stored in the raw state as plain text: `private_key`. +[Read more about sensitive data in state](https://www.terraform.io/language/state/sensitive-data). + +## Example Usage - Firebase App Check Device Check Config Full + + +```hcl +resource "google_firebase_apple_app" "default" { + provider = google-beta + + project = "my-project-name" + display_name = "Apple app" + bundle_id = "bundle.id.devicecheck" + team_id = "9987654321" +} + +# It takes a while for App Check to recognize the new app +# If your app already exists, you don't have to wait 30 seconds. +resource "time_sleep" "wait_30s" { + depends_on = [google_firebase_apple_app.default] + create_duration = "30s" +} + +resource "google_firebase_app_check_device_check_config" "default" { + provider = google-beta + + project = "my-project-name" + app_id = google_firebase_apple_app.default.app_id + token_ttl = "7200s" + key_id = "Key ID" + private_key = file("path/to/private-key.p8") + + depends_on = [time_sleep.wait_30s] + + lifecycle { + precondition { + condition = google_firebase_apple_app.default.team_id != "" + error_message = "Provide a Team ID on the Apple App to use App Check" + } + } +} +``` + +## Argument Reference + +The following arguments are supported: + + +* `key_id` - + (Required) + The key identifier of a private key enabled with DeviceCheck, created in your Apple Developer account. + +* `private_key` - + (Required) + The contents of the private key (.p8) file associated with the key specified by keyId. + **Note**: This property is sensitive and will not be displayed in the plan. + +* `app_id` - + (Required) + The ID of an + [Apple App](https://firebase.google.com/docs/reference/firebase-management/rest/v1beta1/projects.iosApps#IosApp.FIELDS.app_id). + + +- - - + + +* `token_ttl` - + (Optional) + Specifies the duration for which App Check tokens exchanged from DeviceCheck artifacts will be valid. + If unset, a default value of 1 hour is assumed. Must be between 30 minutes and 7 days, inclusive. + A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". + +* `project` - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `projects/{{project}}/apps/{{app_id}}/deviceCheckConfig` + +* `name` - + The relative resource name of the DeviceCheck configuration object + +* `private_key_set` - + Whether the privateKey field was previously set. Since App Check will never return the + privateKey field, this field is the only way to find out whether it was previously set. + + +## Timeouts + +This resource provides the following +[Timeouts](https://developer.hashicorp.com/terraform/plugin/sdkv2/resources/retries-and-customizable-timeouts) configuration options: + +- `create` - Default is 20 minutes. +- `update` - Default is 20 minutes. +- `delete` - Default is 20 minutes. + +## Import + + +DeviceCheckConfig can be imported using any of these accepted formats: + +* `projects/{{project}}/apps/{{app_id}}/deviceCheckConfig` +* `{{project}}/{{app_id}}` +* `{{app_id}}` + + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import DeviceCheckConfig using one of the formats above. For example: + +```tf +import { + id = "projects/{{project}}/apps/{{app_id}}/deviceCheckConfig" + to = google_firebase_app_check_device_check_config.default +} +``` + +When using the [`terraform import` command](https://developer.hashicorp.com/terraform/cli/commands/import), DeviceCheckConfig can be imported using one of the formats above. For example: + +``` +$ terraform import google_firebase_app_check_device_check_config.default projects/{{project}}/apps/{{app_id}}/deviceCheckConfig +$ terraform import google_firebase_app_check_device_check_config.default {{project}}/{{app_id}} +$ terraform import google_firebase_app_check_device_check_config.default {{app_id}} +``` + +## User Project Overrides + +This resource supports [User Project Overrides](https://registry.terraform.io/providers/hashicorp/google/latest/docs/guides/provider_reference#user_project_override). diff --git a/website/docs/r/firestore_database.html.markdown b/website/docs/r/firestore_database.html.markdown index bb484e15edc..380e3f4c6dc 100644 --- a/website/docs/r/firestore_database.html.markdown +++ b/website/docs/r/firestore_database.html.markdown @@ -61,6 +61,61 @@ resource "google_firestore_database" "database" { deletion_policy = "DELETE" } ``` +## Example Usage - Firestore Cmek Database + + +```hcl +data "google_project" "project" { + provider = google-beta +} + +resource "google_firestore_database" "database" { + provider = google-beta + + project = "my-project-name" + name = "cmek-database-id" + location_id = "nam5" + type = "FIRESTORE_NATIVE" + concurrency_mode = "OPTIMISTIC" + app_engine_integration_mode = "DISABLED" + point_in_time_recovery_enablement = "POINT_IN_TIME_RECOVERY_ENABLED" + delete_protection_state = "DELETE_PROTECTION_ENABLED" + deletion_policy = "DELETE" + cmek_config { + kms_key_name = google_kms_crypto_key.crypto_key.id + } + + depends_on = [ + google_kms_crypto_key_iam_binding.firestore_cmek_keyuser + ] +} + +resource "google_kms_crypto_key" "crypto_key" { + provider = google-beta + + name = "kms-key" + key_ring = google_kms_key_ring.key_ring.id + purpose = "ENCRYPT_DECRYPT" +} + +resource "google_kms_key_ring" "key_ring" { + provider = google-beta + + name = "kms-key-ring" + location = "us" +} + +resource "google_kms_crypto_key_iam_binding" "firestore_cmek_keyuser" { + provider = google-beta + + crypto_key_id = google_kms_crypto_key.crypto_key.id + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + + members = [ + "serviceAccount:service-${data.google_project.project.number}@gcp-sa-firestore.iam.gserviceaccount.com", + ] +} +``` ## Example Usage - Firestore Default Database In Datastore Mode @@ -88,6 +143,61 @@ resource "google_firestore_database" "datastore_mode_database" { deletion_policy = "DELETE" } ``` +## Example Usage - Firestore Cmek Database In Datastore Mode + + +```hcl +data "google_project" "project" { + provider = google-beta +} + +resource "google_firestore_database" "database" { + provider = google-beta + + project = "my-project-name" + name = "cmek-database-id" + location_id = "nam5" + type = "DATASTORE_MODE" + concurrency_mode = "OPTIMISTIC" + app_engine_integration_mode = "DISABLED" + point_in_time_recovery_enablement = "POINT_IN_TIME_RECOVERY_ENABLED" + delete_protection_state = "DELETE_PROTECTION_ENABLED" + deletion_policy = "DELETE" + cmek_config { + kms_key_name = google_kms_crypto_key.crypto_key.id + } + + depends_on = [ + google_kms_crypto_key_iam_binding.firestore_cmek_keyuser + ] +} + +resource "google_kms_crypto_key" "crypto_key" { + provider = google-beta + + name = "kms-key" + key_ring = google_kms_key_ring.key_ring.id + purpose = "ENCRYPT_DECRYPT" +} + +resource "google_kms_key_ring" "key_ring" { + provider = google-beta + + name = "kms-key-ring" + location = "us" +} + +resource "google_kms_crypto_key_iam_binding" "firestore_cmek_keyuser" { + provider = google-beta + + crypto_key_id = google_kms_crypto_key.crypto_key.id + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + + members = [ + "serviceAccount:service-${data.google_project.project.number}@gcp-sa-firestore.iam.gserviceaccount.com", + ] +} +``` ## Argument Reference @@ -147,6 +257,13 @@ The following arguments are supported: **Note:** Additionally, to delete this database using `terraform destroy`, `deletion_policy` must be set to `DELETE`. Possible values are: `DELETE_PROTECTION_STATE_UNSPECIFIED`, `DELETE_PROTECTION_ENABLED`, `DELETE_PROTECTION_DISABLED`. +* `cmek_config` - + (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)) + The CMEK (Customer Managed Encryption Key) configuration for a Firestore + database. If not present, the database is secured by the default Google + encryption key. + Structure is [documented below](#nested_cmek_config). + * `project` - (Optional) The ID of the project in which the resource belongs. If it is not provided, the provider project is used. @@ -157,6 +274,30 @@ The default value is `ABANDON`. See also `delete_protection`. +The `cmek_config` block supports: + +* `kms_key_name` - + (Required) + The resource ID of a Cloud KMS key. If set, the database created will + be a Customer-managed Encryption Key (CMEK) database encrypted with + this key. This feature is allowlist only in initial launch. + Only keys in the same location as this database are allowed to be used + for encryption. For Firestore's nam5 multi-region, this corresponds to Cloud KMS + multi-region us. For Firestore's eur3 multi-region, this corresponds to + Cloud KMS multi-region europe. See https://cloud.google.com/kms/docs/locations. + This value should be the KMS key resource ID in the format of + `projects/{project_id}/locations/{kms_location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}`. + How to retrive this resource ID is listed at + https://cloud.google.com/kms/docs/getting-resource-ids#getting_the_id_for_a_key_and_version. + +* `active_key_version` - + (Output) + Currently in-use KMS key versions (https://cloud.google.com/kms/docs/resource-hierarchy#key_versions). + During key rotation (https://cloud.google.com/kms/docs/key-rotation), there can be + multiple in-use key versions. + The expected format is + `projects/{project_id}/locations/{kms_location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}/cryptoKeyVersions/{key_version}`. + ## Attributes Reference In addition to the arguments listed above, the following computed attributes are exported: diff --git a/website/docs/r/gke_hub_feature_membership.html.markdown b/website/docs/r/gke_hub_feature_membership.html.markdown index ed571b6d7bd..bd79aaa35da 100644 --- a/website/docs/r/gke_hub_feature_membership.html.markdown +++ b/website/docs/r/gke_hub_feature_membership.html.markdown @@ -504,6 +504,10 @@ The following arguments are supported: (Optional) The maximum number of audit violations to be stored in a constraint. If not set, the default of 20 will be used. + * `deployment_configs` - + (Optional) + Map of deployment configs to deployments ("admission", "audit", "mutation"). + * `policy_content` - (Optional) Specifies the desired policy content on the cluster. Structure is [documented below](#nested_policy_content). @@ -514,12 +518,97 @@ The following arguments are supported: (Optional) Specifies the list of backends Policy Controller will export to. Must be one of `CLOUD_MONITORING` or `PROMETHEUS`. Defaults to [`CLOUD_MONITORING`, `PROMETHEUS`]. Specifying an empty value `[]` disables metrics export. +The `deployment_configs` block supports: + +* `component_name` - + (Required) + The name of the component. One of `admission` `audit` or `mutation` + +* `container_resources` - + (Optional) + Container resource requirements. + +* `pod_affinity` - + (Optional) + Pod affinity configuration. Possible values: AFFINITY_UNSPECIFIED, NO_AFFINITY, ANTI_AFFINITY + +* `pod_tolerations` - + (Optional) + Pod tolerations of node taints. + +* `replica_count` - + (Optional) + Pod replica count. + +The `container_resources` block supports: + +* `limits` - + (Optional) + Limits describes the maximum amount of compute resources allowed for use by the running container. + +* `requests` - + (Optional) + Requests describes the amount of compute resources reserved for the container by the kube-scheduler. + +The `limits` block supports: + +* `cpu` - + (Optional) + CPU requirement expressed in Kubernetes resource units. + +* `memory` - + (Optional) + Memory requirement expressed in Kubernetes resource units. + +The `requests` block supports: + +* `cpu` - + (Optional) + CPU requirement expressed in Kubernetes resource units. + +* `memory` - + (Optional) + Memory requirement expressed in Kubernetes resource units. + +The `pod_tolerations` block supports: + +* `effect` - + (Optional) + Matches a taint effect. + +* `key` - + (Optional) + Matches a taint key (not necessarily unique). + +* `operator` - + (Optional) + Matches a taint operator. + +* `value` - + (Optional) + Matches a taint value. + The `policy_content` block supports: +* `bundles` - + (Optional) + map of bundle name to BundleInstallSpec. The bundle name maps to the `bundleName` key in the `policycontroller.gke.io/constraintData` annotation on a constraint. + * `template_library` (Optional) Configures the installation of the Template Library. Structure is [documented below](#nested_template_library). +The `template_library` block supports: +The `bundles` block supports: + +* `bundle_name` - + (Required) + The name of the bundle. + +* `exempted_namespaces` - + (Optional) + The set of namespaces to be exempted from the bundle. + The `template_library` block supports: * `installation` diff --git a/website/docs/r/gke_hub_scope.html.markdown b/website/docs/r/gke_hub_scope.html.markdown index 18ebee7af49..b97d103753a 100644 --- a/website/docs/r/gke_hub_scope.html.markdown +++ b/website/docs/r/gke_hub_scope.html.markdown @@ -34,6 +34,11 @@ To get more information about Scope, see: ```hcl resource "google_gke_hub_scope" "scope" { scope_id = "my-scope" + namespace_labels = { + keyb = "valueb" + keya = "valuea" + keyc = "valuec" + } labels = { keyb = "valueb" keya = "valuea" @@ -55,6 +60,14 @@ The following arguments are supported: - - - +* `namespace_labels` - + (Optional) + Scope-level cluster namespace labels. For the member clusters bound + to the Scope, these labels are applied to each namespace under the + Scope. Scope-level labels take precedence over Namespace-level + labels (`namespace_labels` in the Fleet Namespace resource) if they + share a key. Keys and values must be Kubernetes-conformant. + * `labels` - (Optional) Labels for this Scope. diff --git a/website/docs/r/gkeonprem_vmware_cluster.html.markdown b/website/docs/r/gkeonprem_vmware_cluster.html.markdown index d9a9cb52245..04228480346 100644 --- a/website/docs/r/gkeonprem_vmware_cluster.html.markdown +++ b/website/docs/r/gkeonprem_vmware_cluster.html.markdown @@ -95,6 +95,7 @@ resource "google_gkeonprem_vmware_cluster" "cluster-f5lb" { gateway="test-gateway" } } + vcenter_network = "test-vcenter-network" } control_plane_node { cpus = 4 @@ -427,7 +428,7 @@ The following arguments are supported: Structure is [documented below](#nested_dhcp_ip_config). * `vcenter_network` - - (Output) + (Optional) vcenter_network specifies vCenter network name. Inherited from the admin cluster. * `host_config` - diff --git a/website/docs/r/google_project_iam.html.markdown b/website/docs/r/google_project_iam.html.markdown index 419315c503d..33ee748e341 100644 --- a/website/docs/r/google_project_iam.html.markdown +++ b/website/docs/r/google_project_iam.html.markdown @@ -228,7 +228,7 @@ An [`import` block](https://developer.hashicorp.com/terraform/language/import) ( ```tf import { - id = ""{{project_id}} roles/viewer user:foo@example.com"m" + id = "{{project_id}} roles/viewer user:foo@example.com" to = google_project_iam_member.default } ``` diff --git a/website/docs/r/kms_ekm_connection.html.markdown b/website/docs/r/kms_ekm_connection.html.markdown new file mode 100644 index 00000000000..fb945ee2f12 --- /dev/null +++ b/website/docs/r/kms_ekm_connection.html.markdown @@ -0,0 +1,203 @@ +--- +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file in +# .github/CONTRIBUTING.md. +# +# ---------------------------------------------------------------------------- +subcategory: "Cloud Key Management Service" +description: |- + `Ekm Connections` are used to control the connection settings for an `EXTERNAL_VPC` CryptoKey. +--- + +# google\_kms\_ekm\_connection + +`Ekm Connections` are used to control the connection settings for an `EXTERNAL_VPC` CryptoKey. +It is used to connect customer's external key manager to Google Cloud EKM. + + +~> **Note:** Ekm Connections cannot be deleted from Google Cloud Platform. + + +To get more information about EkmConnection, see: + +* [API documentation](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.ekmConnections) +* How-to Guides + * [Creating a Ekm Connection](https://cloud.google.com/kms/docs/create-ekm-connection) + +## Example Usage - Kms Ekm Connection Basic + + +```hcl +resource "google_kms_ekm_connection" "example-ekmconnection" { + name = "ekmconnection_example" + location = "us-central1" + key_management_mode = "MANUAL" + service_resolvers { + service_directory_service = "projects/project_id/locations/us-central1/namespaces/namespace_name/services/service_name" + hostname = "example-ekm.goog" + server_certificates { + raw_der = "==HAwIBCCAr6gAwIBAgIUWR+EV4lqiV7Ql12VY==" + } + } +} +``` + +## Argument Reference + +The following arguments are supported: + + +* `name` - + (Required) + The resource name for the EkmConnection. + +* `service_resolvers` - + (Required) + A list of ServiceResolvers where the EKM can be reached. There should be one ServiceResolver per EKM replica. Currently, only a single ServiceResolver is supported + Structure is [documented below](#nested_service_resolvers). + +* `location` - + (Required) + The location for the EkmConnection. + A full list of valid locations can be found by running `gcloud kms locations list`. + + +The `service_resolvers` block supports: + +* `service_directory_service` - + (Required) + Required. The resource name of the Service Directory service pointing to an EKM replica, in the format projects/*/locations/*/namespaces/*/services/* + +* `hostname` - + (Required) + Required. The hostname of the EKM replica used at TLS and HTTP layers. + +* `server_certificates` - + (Required) + Required. A list of leaf server certificates used to authenticate HTTPS connections to the EKM replica. Currently, a maximum of 10 Certificate is supported. + Structure is [documented below](#nested_server_certificates). + +* `endpoint_filter` - + (Optional) + Optional. The filter applied to the endpoints of the resolved service. If no filter is specified, all endpoints will be considered. An endpoint will be chosen arbitrarily from the filtered list for each request. For endpoint filter syntax and examples, see https://cloud.google.com/service-directory/docs/reference/rpc/google.cloud.servicedirectory.v1#resolveservicerequest. + + +The `server_certificates` block supports: + +* `raw_der` - + (Required) + Required. The raw certificate bytes in DER format. A base64-encoded string. + +* `parsed` - + (Output) + Output only. True if the certificate was parsed successfully. + +* `issuer` - + (Output) + Output only. The issuer distinguished name in RFC 2253 format. Only present if parsed is true. + +* `subject` - + (Output) + Output only. The subject distinguished name in RFC 2253 format. Only present if parsed is true. + +* `not_before_time` - + (Output) + Output only. The certificate is not valid before this time. Only present if parsed is true. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + +* `not_after_time` - + (Output) + Output only. The certificate is not valid after this time. Only present if parsed is true. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + +* `sha256_fingerprint` - + (Output) + Output only. The SHA-256 certificate fingerprint as a hex string. Only present if parsed is true. + +* `serial_number` - + (Output) + Output only. The certificate serial number as a hex string. Only present if parsed is true. + +* `subject_alternative_dns_names` - + (Output) + Output only. The subject Alternative DNS names. Only present if parsed is true. + +- - - + + +* `key_management_mode` - + (Optional) + Optional. Describes who can perform control plane operations on the EKM. If unset, this defaults to MANUAL + Default value is `MANUAL`. + Possible values are: `MANUAL`, `CLOUD_KMS`. + +* `etag` - + (Optional) + Optional. Etag of the currently stored EkmConnection. + +* `crypto_space_path` - + (Optional) + Optional. Identifies the EKM Crypto Space that this EkmConnection maps to. Note: This field is required if KeyManagementMode is CLOUD_KMS. + +* `project` - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `projects/{{project}}/locations/{{location}}/ekmConnections/{{name}}` + +* `create_time` - + Output only. The time at which the EkmConnection was created. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + + +## Timeouts + +This resource provides the following +[Timeouts](https://developer.hashicorp.com/terraform/plugin/sdkv2/resources/retries-and-customizable-timeouts) configuration options: + +- `create` - Default is 20 minutes. +- `update` - Default is 20 minutes. +- `delete` - Default is 20 minutes. + +## Import + + +EkmConnection can be imported using any of these accepted formats: + +* `projects/{{project}}/locations/{{location}}/ekmConnections/{{name}}` +* `{{project}}/{{location}}/{{name}}` +* `{{location}}/{{name}}` + + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import EkmConnection using one of the formats above. For example: + +```tf +import { + id = "projects/{{project}}/locations/{{location}}/ekmConnections/{{name}}" + to = google_kms_ekm_connection.default +} +``` + +When using the [`terraform import` command](https://developer.hashicorp.com/terraform/cli/commands/import), EkmConnection can be imported using one of the formats above. For example: + +``` +$ terraform import google_kms_ekm_connection.default projects/{{project}}/locations/{{location}}/ekmConnections/{{name}} +$ terraform import google_kms_ekm_connection.default {{project}}/{{location}}/{{name}} +$ terraform import google_kms_ekm_connection.default {{location}}/{{name}} +``` + +## User Project Overrides + +This resource supports [User Project Overrides](https://registry.terraform.io/providers/hashicorp/google/latest/docs/guides/provider_reference#user_project_override). diff --git a/website/docs/r/securityposture_posture.html.markdown b/website/docs/r/securityposture_posture.html.markdown index 50ac2e4d2b9..fc17c038d73 100644 --- a/website/docs/r/securityposture_posture.html.markdown +++ b/website/docs/r/securityposture_posture.html.markdown @@ -36,7 +36,7 @@ To get more information about Posture, see: ```hcl resource "google_securityposture_posture" "posture1"{ - posture_id = "posture_1" + posture_id = "posture_example" parent = "organizations/123456789" location = "global" state = "ACTIVE" diff --git a/website/docs/r/securityposture_posture_deployment.html.markdown b/website/docs/r/securityposture_posture_deployment.html.markdown index c4de68a6ca1..6e05e66369b 100644 --- a/website/docs/r/securityposture_posture_deployment.html.markdown +++ b/website/docs/r/securityposture_posture_deployment.html.markdown @@ -35,7 +35,7 @@ To get more information about PostureDeployment, see: ```hcl -resource "google_securityposture_posture" "posture1" { +resource "google_securityposture_posture" "posture_1" { posture_id = "posture_1" parent = "organizations/123456789" location = "global" @@ -64,8 +64,8 @@ resource "google_securityposture_posture_deployment" "postureDeployment" { location = "global" description = "a new posture deployment" target_resource = "projects/1111111111111" - posture_id = google_securityposture_posture.posture1.name - posture_revision_id = google_securityposture_posture.posture1.revision_id + posture_id = google_securityposture_posture.posture_1.name + posture_revision_id = google_securityposture_posture.posture_1.revision_id } ``` diff --git a/website/docs/r/spanner_database_iam.html.markdown b/website/docs/r/spanner_database_iam.html.markdown index 1de101713eb..ef09772c1fd 100644 --- a/website/docs/r/spanner_database_iam.html.markdown +++ b/website/docs/r/spanner_database_iam.html.markdown @@ -39,6 +39,32 @@ resource "google_spanner_database_iam_policy" "database" { } ``` +With IAM Conditions: + +```hcl +data "google_iam_policy" "admin" { + binding { + role = "roles/editor" + + members = [ + "user:jane@example.com", + ] + + condition { + title = "My Role" + description = "Grant permissions on my_role" + expression = "(resource.type == \"spanner.googleapis.com/DatabaseRole\" && (resource.name.endsWith(\"/myrole\")))" + } + } +} + +resource "google_spanner_database_iam_policy" "database" { + instance = "your-instance-name" + database = "your-database-name" + policy_data = data.google_iam_policy.admin.policy_data +} +``` + ## google\_spanner\_database\_iam\_binding ```hcl @@ -53,6 +79,26 @@ resource "google_spanner_database_iam_binding" "database" { } ``` +With IAM Conditions: + +```hcl +resource "google_spanner_database_iam_binding" "database" { + instance = "your-instance-name" + database = "your-database-name" + role = "roles/compute.networkUser" + + members = [ + "user:jane@example.com", + ] + + condition { + title = "My Role" + description = "Grant permissions on my_role" + expression = "(resource.type == \"spanner.googleapis.com/DatabaseRole\" && (resource.name.endsWith(\"/myrole\")))" + } +} +``` + ## google\_spanner\_database\_iam\_member ```hcl @@ -64,6 +110,23 @@ resource "google_spanner_database_iam_member" "database" { } ``` +With IAM Conditions: + +```hcl +resource "google_spanner_database_iam_member" "database" { + instance = "your-instance-name" + database = "your-database-name" + role = "roles/compute.networkUser" + member = "user:jane@example.com" + + condition { + title = "My Role" + description = "Grant permissions on my_role" + expression = "(resource.type == \"spanner.googleapis.com/DatabaseRole\" && (resource.name.endsWith(\"/myrole\")))" + } +} +``` + ## Argument Reference The following arguments are supported: @@ -91,6 +154,23 @@ The following arguments are supported: * `project` - (Optional) The ID of the project in which the resource belongs. If it is not provided, the provider project is used. +* `condition` - (Optional) An [IAM Condition](https://cloud.google.com/iam/docs/conditions-overview) for a given binding. + Structure is [documented below](#nested_condition). + +--- + +The `condition` block supports: + +* `expression` - (Required) Textual representation of an expression in Common Expression Language syntax. + +* `title` - (Required) A title for the expression, i.e. a short string describing its purpose. + +* `description` - (Optional) An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. + +~> **Warning:** Terraform considers the `role` and condition contents (`title`+`description`+`expression`) as the +identifier for the binding. This means that if any part of the condition is changed out-of-band, Terraform will +consider it to be an entirely different resource and will treat it as such. + ## Attributes Reference In addition to the arguments listed above, the following computed attributes are diff --git a/website/docs/r/workstations_workstation_config.html.markdown b/website/docs/r/workstations_workstation_config.html.markdown index 833f73a5fe7..0cb74ccda21 100644 --- a/website/docs/r/workstations_workstation_config.html.markdown +++ b/website/docs/r/workstations_workstation_config.html.markdown @@ -561,6 +561,11 @@ The following arguments are supported: Directories to persist across workstation sessions. Structure is [documented below](#nested_persistent_directories). +* `ephemeral_directories` - + (Optional) + Ephemeral directories which won't persist across workstation sessions. + Structure is [documented below](#nested_ephemeral_directories). + * `container` - (Optional) Container that will be run for each workstation using this configuration when that workstation is started. @@ -716,6 +721,41 @@ The following arguments are supported: (Optional) Name of the snapshot to use as the source for the disk. This can be the snapshot's `self_link`, `id`, or a string in the format of `projects/{project}/global/snapshots/{snapshot}`. If set, `sizeGb` and `fsType` must be empty. Can only be updated if it has an existing value. +The `ephemeral_directories` block supports: + +* `mount_path` - + (Optional) + Location of this directory in the running workstation. + +* `gce_pd` - + (Optional) + An EphemeralDirectory backed by a Compute Engine persistent disk. + Structure is [documented below](#nested_gce_pd). + + +The `gce_pd` block supports: + +* `disk_type` - + (Optional) + Type of the disk to use. Defaults to `"pd-standard"`. + +* `source_snapshot` - + (Optional) + Name of the snapshot to use as the source for the disk. + Must be empty if `sourceImage` is set. + Must be empty if `read_only` is false. + Updating `source_snapshot` will update content in the ephemeral directory after the workstation is restarted. + +* `source_image` - + (Optional) + Name of the disk image to use as the source for the disk. + Must be empty `sourceSnapshot` is set. + Updating `sourceImage` will update content in the ephemeral directory after the workstation is restarted. + +* `read_only` - + (Optional) + Whether the disk is read only. If true, the disk may be shared by multiple VMs and `sourceSnapshot` must be set. + The `container` block supports: * `image` -