diff --git a/.changelog/12382.txt b/.changelog/12382.txt new file mode 100644 index 00000000000..73dde1a6b01 --- /dev/null +++ b/.changelog/12382.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +container: added `node_pool_autoconfig.linux_node_config.cgroup_mode` field to `google_container_cluster` resource +``` \ No newline at end of file diff --git a/google/services/container/node_config.go b/google/services/container/node_config.go index cca905b1f24..1fd947df654 100644 --- a/google/services/container/node_config.go +++ b/google/services/container/node_config.go @@ -787,6 +787,29 @@ func schemaNodePoolAutoConfigNodeKubeletConfig() *schema.Schema { } } +// Separate since this currently only supports a single value -- a subset of +// the overall LinuxNodeConfig +func schemaNodePoolAutoConfigLinuxNodeConfig() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Linux node configuration options.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cgroup_mode": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice([]string{"CGROUP_MODE_UNSPECIFIED", "CGROUP_MODE_V1", "CGROUP_MODE_V2"}, false), + Description: `cgroupMode specifies the cgroup mode to be used on the node.`, + DiffSuppressFunc: tpgresource.EmptyOrDefaultStringSuppress("CGROUP_MODE_UNSPECIFIED"), + }, + }, + }, + } +} + func expandNodeConfigDefaults(configured interface{}) *container.NodeConfigDefaults { configs := configured.([]interface{}) if len(configs) == 0 || configs[0] == nil { diff --git a/google/services/container/resource_container_cluster.go b/google/services/container/resource_container_cluster.go index b01b8049845..6723021558e 100644 --- a/google/services/container/resource_container_cluster.go +++ b/google/services/container/resource_container_cluster.go @@ -1363,6 +1363,7 @@ func ResourceContainerCluster() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "node_kubelet_config": schemaNodePoolAutoConfigNodeKubeletConfig(), + "linux_node_config": schemaNodePoolAutoConfigLinuxNodeConfig(), "network_tags": { Type: schema.TypeList, Optional: true, @@ -2596,6 +2597,34 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er } } + if linuxNodeConfig, ok := d.GetOk("node_pool_auto_config.0.linux_node_config"); ok { + name := containerClusterFullName(project, location, clusterName) + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredNodePoolAutoConfigLinuxNodeConfig: expandLinuxNodeConfig(linuxNodeConfig), + }, + } + + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() error { + clusterUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Update(name, req) + if config.UserProjectOverride { + clusterUpdateCall.Header().Add("X-Goog-User-Project", project) + } + op, err = clusterUpdateCall.Do() + return err + }, + }) + if err != nil { + return errwrap.Wrapf("Error updating LinuxNodeConfig: {{err}}", err) + } + + err = ContainerOperationWait(config, op, project, location, "updating LinuxNodeConfig", userAgent, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return errwrap.Wrapf("Error while waiting to update LinuxNodeConfig: {{err}}", err) + } + } + if err := resourceContainerClusterRead(d, meta); err != nil { return err } @@ -4180,6 +4209,24 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er log.Printf("[INFO] GKE cluster %s node pool auto config resource manager tags have been updated", d.Id()) } + if d.HasChange("node_pool_auto_config.0.linux_node_config") { + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredNodePoolAutoConfigLinuxNodeConfig: expandLinuxNodeConfig( + d.Get("node_pool_auto_config.0.linux_node_config"), + ), + }, + } + + updateF := updateFunc(req, "updating GKE cluster node pool auto config linux node config") + // Call update serially. + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s node pool auto config linux_node_config parameters have been updated", d.Id()) + } + d.Partial(false) if _, err := containerClusterAwaitRestingState(config, project, location, clusterName, userAgent, d.Timeout(schema.TimeoutUpdate)); err != nil { @@ -6224,6 +6271,11 @@ func flattenNodePoolAutoConfig(c *container.NodePoolAutoConfig) []map[string]int if c.ResourceManagerTags != nil { result["resource_manager_tags"] = flattenResourceManagerTags(c.ResourceManagerTags) } + if c.LinuxNodeConfig != nil { + result["linux_node_config"] = []map[string]interface{}{ + {"cgroup_mode": c.LinuxNodeConfig.CgroupMode}, + } + } return []map[string]interface{}{result} } diff --git a/google/services/container/resource_container_cluster_test.go b/google/services/container/resource_container_cluster_test.go index cee2517df88..50eedbb154c 100644 --- a/google/services/container/resource_container_cluster_test.go +++ b/google/services/container/resource_container_cluster_test.go @@ -11631,3 +11631,79 @@ resource "google_container_cluster" "primary" { } }`, name, enabled) } + +func TestAccContainerCluster_withCgroupMode(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withCgroupMode(clusterName, "CGROUP_MODE_V2"), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttrSet("google_container_cluster.primary", "node_pool_auto_config.0.linux_node_config.0.cgroup_mode"), + resource.TestCheckResourceAttr("google_container_cluster.primary", "node_pool_auto_config.0.linux_node_config.0.cgroup_mode", "CGROUP_MODE_V2"), + ), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withCgroupModeUpdate(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_autopilot_minimal(clusterName), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_withCgroupMode(clusterName, "CGROUP_MODE_V2"), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttrSet("google_container_cluster.primary", "node_pool_auto_config.0.linux_node_config.0.cgroup_mode"), + resource.TestCheckResourceAttr("google_container_cluster.primary", "node_pool_auto_config.0.linux_node_config.0.cgroup_mode", "CGROUP_MODE_V2"), + ), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func testAccContainerCluster_withCgroupMode(name string, cgroupMode string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "primary" { + name = "%s" + enable_autopilot = true + deletion_protection = false + node_pool_auto_config { + linux_node_config { + cgroup_mode = "%s" + } + } +} + `, name, cgroupMode) +} diff --git a/website/docs/r/container_cluster.html.markdown b/website/docs/r/container_cluster.html.markdown index 368c1b2a441..13ba10f4bde 100644 --- a/website/docs/r/container_cluster.html.markdown +++ b/website/docs/r/container_cluster.html.markdown @@ -1110,6 +1110,8 @@ Structure is [documented below](#nested_node_kubelet_config). * `network_tags` (Optional) - The network tag config for the cluster's automatically provisioned node pools. Structure is [documented below](#nested_network_tags). +* `linux_node_config` (Optional) - Linux system configuration for the cluster's automatically provisioned node pools. Only `cgroup_mode` field is supported in `node_pool_auto_config`. Structure is [documented below](#nested_linux_node_config). + The `node_kubelet_config` block supports: * `insecure_kubelet_readonly_port_enabled` - (Optional) Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.