diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb index 3fb7c963acf1..1a6ad62d42cf 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb @@ -651,7 +651,7 @@ func ResourceComputeInstance() *schema.Resource { }, }, - "params": { + "params": { Type: schema.TypeList, MaxItems: 1, Optional: true, @@ -673,7 +673,7 @@ func ResourceComputeInstance() *schema.Resource { Optional: true, Elem: &schema.Schema{Type: schema.TypeString}, Description: `A set of key/value label pairs assigned to the instance. - + **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.`, }, @@ -1422,7 +1422,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err return fmt.Errorf("Error creating instance while setting the security policies: %s", err) } <% end -%> - + err = waitUntilInstanceHasDesiredStatus(config, d) if err != nil { return fmt.Errorf("Error waiting for status: %s", err) @@ -1936,7 +1936,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err return fmt.Errorf("Instance had unexpected number of network interfaces: %d", len(instance.NetworkInterfaces)) } - <% unless version == 'ga' -%> + <% unless version == 'ga' -%> updateSecurityPolicy := false for i := 0; i < len(instance.NetworkInterfaces); i++ { prefix := fmt.Sprintf("network_interface.%d", i) diff --git a/mmv1/third_party/terraform/services/container/node_config.go.erb b/mmv1/third_party/terraform/services/container/node_config.go.erb index af3492f29413..8d53ce85b51d 100644 --- a/mmv1/third_party/terraform/services/container/node_config.go.erb +++ b/mmv1/third_party/terraform/services/container/node_config.go.erb @@ -675,6 +675,11 @@ func schemaNodeConfig() *schema.Schema { }, }, }, + "resource_manager_tags": { + Type: schema.TypeMap, + Optional: true, + Description: `A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty.`, + }, <% unless version == 'ga' -%> "enable_confidential_storage": { Type: schema.TypeBool, @@ -884,6 +889,10 @@ func expandNodeConfig(v interface{}) *container.NodeConfig { nc.ResourceLabels = m } + if v, ok := nodeConfig["resource_manager_tags"]; ok && len(v.(map[string]interface{})) > 0 { + nc.ResourceManagerTags = expandResourceManagerTags(v) + } + if v, ok := nodeConfig["tags"]; ok { tagsList := v.([]interface{}) tags := []string{} @@ -974,7 +983,7 @@ func expandNodeConfig(v interface{}) *container.NodeConfig { nc.EnableConfidentialStorage = v.(bool) } <% end -%> - + <% unless version == "ga" -%> if v, ok := nodeConfig["host_maintenance_policy"]; ok { nc.HostMaintenancePolicy = expandHostMaintenancePolicy(v) @@ -988,6 +997,19 @@ func expandNodeConfig(v interface{}) *container.NodeConfig { return nc } +func expandResourceManagerTags(v interface{}) *container.ResourceManagerTags { + rmts := make(map[string]string) + + if v != nil { + rmts = tpgresource.ConvertStringMap(v.(map[string]interface{})) + } + + return &container.ResourceManagerTags{ + Tags: rmts, + ForceSendFields: []string{"Tags"}, + } +} + func expandWorkloadMetadataConfig(v interface{}) *container.WorkloadMetadataConfig { if v == nil { return nil @@ -1213,7 +1235,8 @@ func flattenNodeConfig(c *container.NodeConfig, v interface{}) []map[string]inte "advanced_machine_features": flattenAdvancedMachineFeaturesConfig(c.AdvancedMachineFeatures), "sole_tenant_config": flattenSoleTenantConfig(c.SoleTenantConfig), "fast_socket": flattenFastSocket(c.FastSocket), - <% unless version == 'ga' -%> + "resource_manager_tags": flattenResourceManagerTags(c.ResourceManagerTags), + <% unless version == 'ga' -%> "enable_confidential_storage": c.EnableConfidentialStorage, <% end -%> }) @@ -1225,6 +1248,19 @@ func flattenNodeConfig(c *container.NodeConfig, v interface{}) []map[string]inte return config } +func flattenResourceManagerTags(c *container.ResourceManagerTags) map[string]interface{} { + rmt := make(map[string]interface{}) + + if c != nil { + for k, v := range c.Tags { + rmt[k] = v + } + + } + + return rmt +} + func flattenAdvancedMachineFeaturesConfig(c *container.AdvancedMachineFeatures) []map[string]interface{} { result := []map[string]interface{}{} if c != nil { diff --git a/mmv1/third_party/terraform/services/container/resource_container_cluster.go.erb b/mmv1/third_party/terraform/services/container/resource_container_cluster.go.erb index 0ab9a425f653..00a3f3aaab84 100644 --- a/mmv1/third_party/terraform/services/container/resource_container_cluster.go.erb +++ b/mmv1/third_party/terraform/services/container/resource_container_cluster.go.erb @@ -97,6 +97,7 @@ var ( forceNewClusterNodeConfigFields = []string{ "labels", "workload_metadata_config", + "resource_manager_tags", } suppressDiffForAutopilot = schema.SchemaDiffSuppressFunc(func(k, oldValue, newValue string, d *schema.ResourceData) bool { @@ -1424,11 +1425,11 @@ func ResourceContainerCluster() *schema.Resource { "node_pool_defaults": clusterSchemaNodePoolDefaults(), "node_pool_auto_config": { - Type: schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, - Description: `Node pool configs that apply to all auto-provisioned node pools in autopilot clusters and node auto-provisioning enabled clusters.`, + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Description: `Node pool configs that apply to all auto-provisioned node pools in autopilot clusters and node auto-provisioning enabled clusters.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "network_tags": { @@ -5266,7 +5267,7 @@ func expandFleet(configured interface{}) *container.Fleet { return &container.Fleet{ Project: config["project"].(string), } -} +} func expandEnableK8sBetaApis(configured interface{}, enabledAPIs []string) *container.K8sBetaAPIConfig { l := configured.([]interface{}) @@ -5424,6 +5425,7 @@ func expandNodePoolAutoConfig(configured interface{}) *container.NodePoolAutoCon if v, ok := config["network_tags"]; ok && len(v.([]interface{})) > 0 { npac.NetworkTags = expandNodePoolAutoConfigNetworkTags(v) } + return npac } @@ -6129,7 +6131,7 @@ func flattenFleet(c *container.Fleet) []map[string]interface{} { membership_id = match[4] membership_location = match[3] } - + return []map[string]interface{}{ { "project": c.Project, diff --git a/mmv1/third_party/terraform/services/container/resource_container_cluster_test.go.erb b/mmv1/third_party/terraform/services/container/resource_container_cluster_test.go.erb index c57254c7e06d..a542384379f6 100644 --- a/mmv1/third_party/terraform/services/container/resource_container_cluster_test.go.erb +++ b/mmv1/third_party/terraform/services/container/resource_container_cluster_test.go.erb @@ -56,6 +56,43 @@ func TestAccContainerCluster_basic(t *testing.T) { }) } +func TestAccContainerCluster_resourceManagerTags(t *testing.T) { + t.Parallel() + + pid := envvar.GetTestProjectFromEnv() + + randomSuffix := acctest.RandString(t, 10) + clusterName := fmt.Sprintf("tf-test-cluster-%s", randomSuffix) + + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + ExternalProviders: map[string]resource.ExternalProvider{ + "time": {}, + }, + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_resourceManagerTags(pid, clusterName, networkName, subnetworkName, randomSuffix), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("google_container_cluster.primary", "self_link"), + resource.TestCheckResourceAttrSet("google_container_cluster.primary", "node_config.0.resource_manager_tags.%"), + ), + }, + { + ResourceName: "google_container_cluster.primary", + ImportStateId: fmt.Sprintf("us-central1-a/%s", clusterName), + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + }, + }) +} + func TestAccContainerCluster_networkingModeRoutes(t *testing.T) { t.Parallel() @@ -2843,9 +2880,9 @@ func TestAccContainerCluster_withAutopilot(t *testing.T) { clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, + PreCheck: func() { acctest.AccTestPreCheck(t) }, ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), Steps: []resource.TestStep{ { Config: testAccContainerCluster_withAutopilot(pid, containerNetName, clusterName, "us-central1", true, false, ""), @@ -2856,7 +2893,7 @@ func TestAccContainerCluster_withAutopilot(t *testing.T) { { ResourceName: "google_container_cluster.with_autopilot", ImportState: true, - ImportStateVerify: true, + ImportStateVerify: true, ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, }, }, @@ -2872,9 +2909,9 @@ func TestAccContainerClusterCustomServiceAccount_withAutopilot(t *testing.T) { serviceAccountName := fmt.Sprintf("tf-test-sa-%s", acctest.RandString(t, 10)) acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, + PreCheck: func() { acctest.AccTestPreCheck(t) }, ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), Steps: []resource.TestStep{ { Config: testAccContainerCluster_withAutopilot(pid, containerNetName, clusterName, "us-central1", true, false, serviceAccountName), @@ -2891,7 +2928,7 @@ func TestAccContainerClusterCustomServiceAccount_withAutopilot(t *testing.T) { { ResourceName: "google_container_cluster.with_autopilot", ImportState: true, - ImportStateVerify: true, + ImportStateVerify: true, ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, }, }, @@ -2906,9 +2943,9 @@ func TestAccContainerCluster_errorAutopilotLocation(t *testing.T) { clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, + PreCheck: func() { acctest.AccTestPreCheck(t) }, ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), Steps: []resource.TestStep{ { Config: testAccContainerCluster_withAutopilot(pid, containerNetName, clusterName, "us-central1-a", true, false, ""), @@ -2926,9 +2963,9 @@ func TestAccContainerCluster_withAutopilotNetworkTags(t *testing.T) { clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, + PreCheck: func() { acctest.AccTestPreCheck(t) }, ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), Steps: []resource.TestStep{ { Config: testAccContainerCluster_withAutopilot(pid, containerNetName, clusterName, "us-central1", true, true, ""), @@ -4444,11 +4481,11 @@ func testAccContainerCluster_withIncompatibleMasterVersionNodeVersion(name strin resource "google_container_cluster" "gke_cluster" { name = "%s" location = "us-central1" - + min_master_version = "1.10.9-gke.5" node_version = "1.10.6-gke.11" initial_node_count = 1 - + } `, name) } @@ -6674,7 +6711,7 @@ resource "google_container_cluster" "with_autoprovisioning" { min_master_version = data.google_container_engine_versions.central1a.latest_master_version initial_node_count = 1 deletion_protection = false - + network = "%s" subnetwork = "%s" @@ -9250,7 +9287,7 @@ resource "google_compute_resource_policy" "policy" { resource "google_container_cluster" "cluster" { name = "%s" location = "us-central1-a" - + node_pool { name = "%s" initial_node_count = 2 @@ -9285,7 +9322,7 @@ func testAccContainerCluster_additional_pod_ranges_config(name string, nameCount } `, podRangeNamesStr) } - + return fmt.Sprintf(` resource "google_compute_network" "main" { name = "%s" @@ -9561,3 +9598,86 @@ func testAccContainerCluster_withWorkloadALTSConfig(projectID, name, networkName `, projectID, networkName, subnetworkName, name, enable) } <% end -%> + +func testAccContainerCluster_resourceManagerTags(projectID, clusterName, networkName, subnetworkName, randomSuffix string) string { + return fmt.Sprintf(` +data "google_project" "project" { + project_id = "%[1]s" +} + +resource "google_project_iam_binding" "tagHoldAdmin" { + project = "%[1]s" + role = "roles/resourcemanager.tagHoldAdmin" + members = [ + "serviceAccount:service-${data.google_project.project.number}@container-engine-robot.iam.gserviceaccount.com", + ] +} + +resource "google_project_iam_binding" "tagUser" { + project = "%[1]s" + role = "roles/resourcemanager.tagUser" + members = [ + "serviceAccount:service-${data.google_project.project.number}@container-engine-robot.iam.gserviceaccount.com", + "serviceAccount:${data.google_project.project.number}@cloudservices.gserviceaccount.com", + ] + + depends_on = [google_project_iam_binding.tagHoldAdmin] +} + +resource "time_sleep" "wait_120_seconds" { + create_duration = "120s" + + depends_on = [ + google_project_iam_binding.tagHoldAdmin, + google_project_iam_binding.tagUser + ] +} + +resource "google_tags_tag_key" "key" { + parent = "projects/%[1]s" + short_name = "foobarbaz-%[2]s" + description = "For foo/bar resources" + purpose = "GCE_FIREWALL" + purpose_data = { + network = "%[1]s/%[4]s" + } +} + +resource "google_tags_tag_value" "value" { + parent = "tagKeys/${google_tags_tag_key.key.name}" + short_name = "foo-%[2]s" + description = "For foo resources" +} + +data "google_container_engine_versions" "uscentral1a" { + location = "us-central1-a" +} + +resource "google_container_cluster" "primary" { + name = "%[3]s" + location = "us-central1-a" + min_master_version = data.google_container_engine_versions.uscentral1a.release_channel_latest_version["STABLE"] + initial_node_count = 1 + + node_config { + machine_type = "n1-standard-1" // can't be e2 because of local-ssd + disk_size_gb = 15 + + resource_manager_tags = { + "tagKeys/${google_tags_tag_key.key.name}" = "tagValues/${google_tags_tag_value.value.name}" + } + } + + deletion_protection = false + network = "%[4]s" + subnetwork = "%[5]s" + + timeouts { + create = "30m" + update = "40m" + } + + depends_on = [time_sleep.wait_120_seconds] +} +`, projectID, randomSuffix, clusterName, networkName, subnetworkName) +} diff --git a/mmv1/third_party/terraform/services/container/resource_container_node_pool.go.erb b/mmv1/third_party/terraform/services/container/resource_container_node_pool.go.erb index a4208ccfd799..025ed420fdde 100644 --- a/mmv1/third_party/terraform/services/container/resource_container_node_pool.go.erb +++ b/mmv1/third_party/terraform/services/container/resource_container_node_pool.go.erb @@ -1605,6 +1605,48 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node log.Printf("[INFO] Updated tags for node pool %s", name) } + if d.HasChange(prefix + "node_config.0.resource_manager_tags") { + req := &container.UpdateNodePoolRequest{ + Name: name, + } + if v, ok := d.GetOk(prefix + "node_config.0.resource_manager_tags"); ok { + req.ResourceManagerTags = expandResourceManagerTags(v) + } + + // sets resource manager tags to the empty list when user removes a previously defined list of tags entriely + // aka the node pool goes from having tags to no longer having any + if req.ResourceManagerTags == nil { + tags := make(map[string]string) + rmTags := &container.ResourceManagerTags{ + Tags: tags, + } + req.ResourceManagerTags = rmTags + } + + updateF := func() error { + clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req) + if config.UserProjectOverride { + clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) + } + op, err := clusterNodePoolsUpdateCall.Do() + if err != nil { + return err + } + + // Wait until it's updated + return ContainerOperationWait(config, op, + nodePoolInfo.project, + nodePoolInfo.location, + "updating GKE node pool resource manager tags", userAgent, + timeout) + } + + if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + return err + } + log.Printf("[INFO] Updated resource manager tags for node pool %s", name) + } + if d.HasChange(prefix + "node_config.0.resource_labels") { req := &container.UpdateNodePoolRequest{ Name: name, diff --git a/mmv1/third_party/terraform/services/container/resource_container_node_pool_test.go.erb b/mmv1/third_party/terraform/services/container/resource_container_node_pool_test.go.erb index e06180e6c0e9..42939e9b7e9b 100644 --- a/mmv1/third_party/terraform/services/container/resource_container_node_pool_test.go.erb +++ b/mmv1/third_party/terraform/services/container/resource_container_node_pool_test.go.erb @@ -37,6 +37,61 @@ func TestAccContainerNodePool_basic(t *testing.T) { }) } +func TestAccContainerNodePool_resourceManagerTags(t *testing.T) { + t.Parallel() + pid := envvar.GetTestProjectFromEnv() + + randomSuffix := acctest.RandString(t, 10) + clusterName := fmt.Sprintf("tf-test-cluster-%s", randomSuffix) + + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + ExternalProviders: map[string]resource.ExternalProvider{ + "time": {}, + }, + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_resourceManagerTags(pid, clusterName, networkName, subnetworkName, randomSuffix), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("google_container_node_pool.primary_nodes", "node_config.0.resource_manager_tags.%"), + ), + }, + { + ResourceName: "google_container_node_pool.primary_nodes", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "cluster"}, + }, + { + Config: testAccContainerNodePool_resourceManagerTagsUpdate1(pid, clusterName, networkName, subnetworkName, randomSuffix), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("google_container_node_pool.primary_nodes", "node_config.0.resource_manager_tags.%"), + ), + }, + { + ResourceName: "google_container_node_pool.primary_nodes", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "cluster"}, + }, + { + Config: testAccContainerNodePool_resourceManagerTagsUpdate2(pid, clusterName, networkName, subnetworkName, randomSuffix), + }, + { + ResourceName: "google_container_node_pool.primary_nodes", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "cluster"}, + }, + }, + }) +} + func TestAccContainerNodePool_basicWithClusterId(t *testing.T) { t.Parallel() @@ -4132,7 +4187,7 @@ resource "google_container_node_pool" "with_confidential_boot_disk" { name = "%s" location = "us-central1-a" cluster = google_container_cluster.cluster.name - + node_config { image_type = "COS_CONTAINERD" boot_disk_kms_key = "%s" @@ -4149,7 +4204,7 @@ resource "google_container_node_pool" "with_confidential_boot_disk" { } func TestAccContainerNodePool_withoutConfidentialBootDisk(t *testing.T) { - t.Parallel() + t.Parallel() cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) np := fmt.Sprintf("tf-test-np-%s", acctest.RandString(t, 10)) @@ -4193,7 +4248,7 @@ resource "google_container_node_pool" "without_confidential_boot_disk" { name = "%s" location = "us-central1-a" cluster = google_container_cluster.cluster.name - + node_config { image_type = "COS_CONTAINERD" oauth_scopes = [ @@ -4208,3 +4263,348 @@ resource "google_container_node_pool" "without_confidential_boot_disk" { `, cluster, networkName, subnetworkName, np) } <% end -%> + +func testAccContainerNodePool_resourceManagerTags(projectID, clusterName, networkName, subnetworkName, randomSuffix string) string { + return fmt.Sprintf(` +data "google_project" "project" { + project_id = "%[1]s" +} + +resource "google_project_iam_binding" "tagHoldAdmin" { + project = "%[1]s" + role = "roles/resourcemanager.tagHoldAdmin" + members = [ + "serviceAccount:service-${data.google_project.project.number}@container-engine-robot.iam.gserviceaccount.com", + ] +} + +resource "google_project_iam_binding" "tagUser" { + project = "%[1]s" + role = "roles/resourcemanager.tagUser" + members = [ + "serviceAccount:service-${data.google_project.project.number}@container-engine-robot.iam.gserviceaccount.com", + "serviceAccount:${data.google_project.project.number}@cloudservices.gserviceaccount.com", + ] + + depends_on = [google_project_iam_binding.tagHoldAdmin] +} + +resource "time_sleep" "wait_120_seconds" { + create_duration = "120s" + + depends_on = [ + google_project_iam_binding.tagHoldAdmin, + google_project_iam_binding.tagUser + ] +} + +resource "google_tags_tag_key" "key1" { + parent = "projects/%[1]s" + short_name = "foobarbaz1-%[2]s" + description = "For foo/bar1 resources" + purpose = "GCE_FIREWALL" + purpose_data = { + network = "%[1]s/%[4]s" + } +} + +resource "google_tags_tag_value" "value1" { + parent = "tagKeys/${google_tags_tag_key.key1.name}" + short_name = "foo1-%[2]s" + description = "For foo1 resources" +} + +resource "google_tags_tag_key" "key2" { + parent = "projects/%[1]s" + short_name = "foobarbaz2-%[2]s" + description = "For foo/bar2 resources" + purpose = "GCE_FIREWALL" + purpose_data = { + network = "%[1]s/%[4]s" + } + + depends_on = [google_tags_tag_key.key1] +} + +resource "google_tags_tag_value" "value2" { + parent = "tagKeys/${google_tags_tag_key.key2.name}" + short_name = "foo2-%[2]s" + description = "For foo2 resources" +} + +data "google_container_engine_versions" "uscentral1a" { + location = "us-central1-a" +} + +resource "google_container_cluster" "primary" { + name = "%[3]s" + location = "us-central1-a" + min_master_version = data.google_container_engine_versions.uscentral1a.release_channel_latest_version["STABLE"] + + # We can't create a cluster with no node pool defined, but we want to only use + # separately managed node pools. So we create the smallest possible default + # node pool and immediately delete it. + remove_default_node_pool = true + initial_node_count = 1 + + deletion_protection = false + network = "%[4]s" + subnetwork = "%[5]s" + + timeouts { + create = "30m" + update = "40m" + } + + depends_on = [time_sleep.wait_120_seconds] +} + +# Separately Managed Node Pool +resource "google_container_node_pool" "primary_nodes" { + name = google_container_cluster.primary.name + location = "us-central1-a" + cluster = google_container_cluster.primary.name + + version = data.google_container_engine_versions.uscentral1a.release_channel_latest_version["STABLE"] + node_count = 1 + + node_config { + machine_type = "n1-standard-1" // can't be e2 because of local-ssd + disk_size_gb = 15 + + resource_manager_tags = { + "tagKeys/${google_tags_tag_key.key1.name}" = "tagValues/${google_tags_tag_value.value1.name}" + } + } +} +`, projectID, randomSuffix, clusterName, networkName, subnetworkName) +} + +func testAccContainerNodePool_resourceManagerTagsUpdate1(projectID, clusterName, networkName, subnetworkName, randomSuffix string) string { + return fmt.Sprintf(` +data "google_project" "project" { + project_id = "%[1]s" +} + +resource "google_project_iam_binding" "tagHoldAdmin" { + project = "%[1]s" + role = "roles/resourcemanager.tagHoldAdmin" + members = [ + "serviceAccount:service-${data.google_project.project.number}@container-engine-robot.iam.gserviceaccount.com", + ] +} + +resource "google_project_iam_binding" "tagUser" { + project = "%[1]s" + role = "roles/resourcemanager.tagUser" + members = [ + "serviceAccount:service-${data.google_project.project.number}@container-engine-robot.iam.gserviceaccount.com", + "serviceAccount:${data.google_project.project.number}@cloudservices.gserviceaccount.com", + ] + + depends_on = [google_project_iam_binding.tagHoldAdmin] +} + +resource "time_sleep" "wait_120_seconds" { + create_duration = "120s" + + depends_on = [ + google_project_iam_binding.tagHoldAdmin, + google_project_iam_binding.tagUser + ] +} + +resource "google_tags_tag_key" "key1" { + parent = "projects/%[1]s" + short_name = "foobarbaz1-%[2]s" + description = "For foo/bar1 resources" + purpose = "GCE_FIREWALL" + purpose_data = { + network = "%[1]s/%[4]s" + } +} + +resource "google_tags_tag_value" "value1" { + parent = "tagKeys/${google_tags_tag_key.key1.name}" + short_name = "foo1-%[2]s" + description = "For foo1 resources" +} + +resource "google_tags_tag_key" "key2" { + parent = "projects/%[1]s" + short_name = "foobarbaz2-%[2]s" + description = "For foo/bar2 resources" + purpose = "GCE_FIREWALL" + purpose_data = { + network = "%[1]s/%[4]s" + } + + depends_on = [google_tags_tag_key.key1] +} + +resource "google_tags_tag_value" "value2" { + parent = "tagKeys/${google_tags_tag_key.key2.name}" + short_name = "foo2-%[2]s" + description = "For foo2 resources" +} + +data "google_container_engine_versions" "uscentral1a" { + location = "us-central1-a" +} + +resource "google_container_cluster" "primary" { + name = "%[3]s" + location = "us-central1-a" + min_master_version = data.google_container_engine_versions.uscentral1a.release_channel_latest_version["STABLE"] + + # We can't create a cluster with no node pool defined, but we want to only use + # separately managed node pools. So we create the smallest possible default + # node pool and immediately delete it. + remove_default_node_pool = true + initial_node_count = 1 + + deletion_protection = false + network = "%[4]s" + subnetwork = "%[5]s" + + timeouts { + create = "30m" + update = "40m" + } + + depends_on = [time_sleep.wait_120_seconds] +} + +# Separately Managed Node Pool +resource "google_container_node_pool" "primary_nodes" { + name = google_container_cluster.primary.name + location = "us-central1-a" + cluster = google_container_cluster.primary.name + + version = data.google_container_engine_versions.uscentral1a.release_channel_latest_version["STABLE"] + node_count = 1 + + node_config { + machine_type = "n1-standard-1" // can't be e2 because of local-ssd + disk_size_gb = 15 + + resource_manager_tags = { + "tagKeys/${google_tags_tag_key.key1.name}" = "tagValues/${google_tags_tag_value.value1.name}" + "tagKeys/${google_tags_tag_key.key2.name}" = "tagValues/${google_tags_tag_value.value2.name}" + } + } +} +`, projectID, randomSuffix, clusterName, networkName, subnetworkName) +} + +func testAccContainerNodePool_resourceManagerTagsUpdate2(projectID, clusterName, networkName, subnetworkName, randomSuffix string) string { + return fmt.Sprintf(` +data "google_project" "project" { + project_id = "%[1]s" +} + +resource "google_project_iam_binding" "tagHoldAdmin" { + project = "%[1]s" + role = "roles/resourcemanager.tagHoldAdmin" + members = [ + "serviceAccount:service-${data.google_project.project.number}@container-engine-robot.iam.gserviceaccount.com", + ] +} + +resource "google_project_iam_binding" "tagUser" { + project = "%[1]s" + role = "roles/resourcemanager.tagUser" + members = [ + "serviceAccount:service-${data.google_project.project.number}@container-engine-robot.iam.gserviceaccount.com", + "serviceAccount:${data.google_project.project.number}@cloudservices.gserviceaccount.com", + ] + + depends_on = [google_project_iam_binding.tagHoldAdmin] +} + +resource "time_sleep" "wait_120_seconds" { + create_duration = "120s" + + depends_on = [ + google_project_iam_binding.tagHoldAdmin, + google_project_iam_binding.tagUser + ] +} + +resource "google_tags_tag_key" "key1" { + parent = "projects/%[1]s" + short_name = "foobarbaz1-%[2]s" + description = "For foo/bar1 resources" + purpose = "GCE_FIREWALL" + purpose_data = { + network = "%[1]s/%[4]s" + } +} + +resource "google_tags_tag_value" "value1" { + parent = "tagKeys/${google_tags_tag_key.key1.name}" + short_name = "foo1-%[2]s" + description = "For foo1 resources" +} + +resource "google_tags_tag_key" "key2" { + parent = "projects/%[1]s" + short_name = "foobarbaz2-%[2]s" + description = "For foo/bar2 resources" + purpose = "GCE_FIREWALL" + purpose_data = { + network = "%[1]s/%[4]s" + } + + depends_on = [google_tags_tag_key.key1] +} + +resource "google_tags_tag_value" "value2" { + parent = "tagKeys/${google_tags_tag_key.key2.name}" + short_name = "foo2-%[2]s" + description = "For foo2 resources" +} + +data "google_container_engine_versions" "uscentral1a" { + location = "us-central1-a" +} + +resource "google_container_cluster" "primary" { + name = "%[3]s" + location = "us-central1-a" + min_master_version = data.google_container_engine_versions.uscentral1a.release_channel_latest_version["STABLE"] + + # We can't create a cluster with no node pool defined, but we want to only use + # separately managed node pools. So we create the smallest possible default + # node pool and immediately delete it. + remove_default_node_pool = true + initial_node_count = 1 + + deletion_protection = false + network = "%[4]s" + subnetwork = "%[5]s" + + timeouts { + create = "30m" + update = "40m" + } + + depends_on = [time_sleep.wait_120_seconds] +} + +# Separately Managed Node Pool +resource "google_container_node_pool" "primary_nodes" { + name = google_container_cluster.primary.name + location = "us-central1-a" + cluster = google_container_cluster.primary.name + + version = data.google_container_engine_versions.uscentral1a.release_channel_latest_version["STABLE"] + node_count = 1 + + node_config { + machine_type = "n1-standard-1" // can't be e2 because of local-ssd + disk_size_gb = 15 + } +} +`, projectID, randomSuffix, clusterName, networkName, subnetworkName) +} diff --git a/mmv1/third_party/terraform/website/docs/r/container_cluster.html.markdown b/mmv1/third_party/terraform/website/docs/r/container_cluster.html.markdown index 46fafb9df471..506516412fdc 100644 --- a/mmv1/third_party/terraform/website/docs/r/container_cluster.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/container_cluster.html.markdown @@ -906,6 +906,8 @@ gvnic { * `tags` - (Optional) The list of instance tags applied to all nodes. Tags are used to identify valid sources or targets for network firewalls. +* `resource_manager_tags` - (Optional) A map of resource manager tag keys and values to be attached to the nodes for managing Compute Engine firewalls using Network Firewall Policies. Tags must be according to specifications found [here](https://cloud.google.com/vpc/docs/tags-firewalls-overview#specifications). A maximum of 5 tag key-value pairs can be specified. Existing tags will be replaced with new values. Tags must be in one of the following formats ([KEY]=[VALUE]) 1. `tagKeys/{tag_key_id}=tagValues/{tag_value_id}` 2. `{org_id}/{tag_key_name}={tag_value_name}` 3. `{project_id}/{tag_key_name}={tag_value_name}`. + * `taint` - (Optional) A list of [Kubernetes taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) to apply to nodes. This field will only report drift on taint keys that are @@ -999,7 +1001,7 @@ sole_tenant_config { * `count` (Required) - The number of the guest accelerator cards exposed to this instance. * `gpu_driver_installation_config` (Optional) - Configuration for auto installation of GPU driver. Structure is [documented below](#nested_gpu_driver_installation_config). - + * `gpu_partition_size` (Optional) - Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig [user guide](https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning). * `gpu_sharing_config` (Optional) - Configuration for GPU sharing. Structure is [documented below](#nested_gpu_sharing_config). @@ -1346,7 +1348,7 @@ exported: * `node_config.0.effective_taints` - List of kubernetes taints applied to each node. Structure is [documented above](#nested_taint). -* `fleet.0.membership` - The resource name of the fleet Membership resource associated to this cluster with format `//gkehub.googleapis.com/projects/{{project}}/locations/{{location}}/memberships/{{name}}`. See the official doc for [fleet management](https://cloud.google.com/kubernetes-engine/docs/fleets-overview). +* `fleet.0.membership` - The resource name of the fleet Membership resource associated to this cluster with format `//gkehub.googleapis.com/projects/{{project}}/locations/{{location}}/memberships/{{name}}`. See the official doc for [fleet management](https://cloud.google.com/kubernetes-engine/docs/fleets-overview). * `fleet.0.membership_id` - The short name of the fleet membership, extracted from `fleet.0.membership`. You can use this field to configure `membership_id` under [google_gkehub_feature_membership](https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/gke_hub_feature_membership).