Skip to content

Commit

Permalink
Add support for cgroup_mode in node_pool_auto_config (#12382) (#20460)
Browse files Browse the repository at this point in the history
[upstream:c3694fbae88f0857de97ab135cb75c6104ed524a]

Signed-off-by: Modular Magician <[email protected]>
  • Loading branch information
modular-magician authored Nov 22, 2024
1 parent 03714e5 commit fee773f
Show file tree
Hide file tree
Showing 5 changed files with 156 additions and 0 deletions.
3 changes: 3 additions & 0 deletions .changelog/12382.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
```release-note:enhancement
container: added `node_pool_autoconfig.linux_node_config.cgroup_mode` field to `google_container_cluster` resource
```
23 changes: 23 additions & 0 deletions google/services/container/node_config.go
Original file line number Diff line number Diff line change
Expand Up @@ -787,6 +787,29 @@ func schemaNodePoolAutoConfigNodeKubeletConfig() *schema.Schema {
}
}

// Separate since this currently only supports a single value -- a subset of
// the overall LinuxNodeConfig
func schemaNodePoolAutoConfigLinuxNodeConfig() *schema.Schema {
return &schema.Schema{
Type: schema.TypeList,
Optional: true,
MaxItems: 1,
Description: `Linux node configuration options.`,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cgroup_mode": {
Type: schema.TypeString,
Optional: true,
Computed: true,
ValidateFunc: validation.StringInSlice([]string{"CGROUP_MODE_UNSPECIFIED", "CGROUP_MODE_V1", "CGROUP_MODE_V2"}, false),
Description: `cgroupMode specifies the cgroup mode to be used on the node.`,
DiffSuppressFunc: tpgresource.EmptyOrDefaultStringSuppress("CGROUP_MODE_UNSPECIFIED"),
},
},
},
}
}

func expandNodeConfigDefaults(configured interface{}) *container.NodeConfigDefaults {
configs := configured.([]interface{})
if len(configs) == 0 || configs[0] == nil {
Expand Down
52 changes: 52 additions & 0 deletions google/services/container/resource_container_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -1363,6 +1363,7 @@ func ResourceContainerCluster() *schema.Resource {
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"node_kubelet_config": schemaNodePoolAutoConfigNodeKubeletConfig(),
"linux_node_config": schemaNodePoolAutoConfigLinuxNodeConfig(),
"network_tags": {
Type: schema.TypeList,
Optional: true,
Expand Down Expand Up @@ -2596,6 +2597,34 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er
}
}

if linuxNodeConfig, ok := d.GetOk("node_pool_auto_config.0.linux_node_config"); ok {
name := containerClusterFullName(project, location, clusterName)
req := &container.UpdateClusterRequest{
Update: &container.ClusterUpdate{
DesiredNodePoolAutoConfigLinuxNodeConfig: expandLinuxNodeConfig(linuxNodeConfig),
},
}

err = transport_tpg.Retry(transport_tpg.RetryOptions{
RetryFunc: func() error {
clusterUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Update(name, req)
if config.UserProjectOverride {
clusterUpdateCall.Header().Add("X-Goog-User-Project", project)
}
op, err = clusterUpdateCall.Do()
return err
},
})
if err != nil {
return errwrap.Wrapf("Error updating LinuxNodeConfig: {{err}}", err)
}

err = ContainerOperationWait(config, op, project, location, "updating LinuxNodeConfig", userAgent, d.Timeout(schema.TimeoutCreate))
if err != nil {
return errwrap.Wrapf("Error while waiting to update LinuxNodeConfig: {{err}}", err)
}
}

if err := resourceContainerClusterRead(d, meta); err != nil {
return err
}
Expand Down Expand Up @@ -4180,6 +4209,24 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er
log.Printf("[INFO] GKE cluster %s node pool auto config resource manager tags have been updated", d.Id())
}

if d.HasChange("node_pool_auto_config.0.linux_node_config") {
req := &container.UpdateClusterRequest{
Update: &container.ClusterUpdate{
DesiredNodePoolAutoConfigLinuxNodeConfig: expandLinuxNodeConfig(
d.Get("node_pool_auto_config.0.linux_node_config"),
),
},
}

updateF := updateFunc(req, "updating GKE cluster node pool auto config linux node config")
// Call update serially.
if err := transport_tpg.LockedCall(lockKey, updateF); err != nil {
return err
}

log.Printf("[INFO] GKE cluster %s node pool auto config linux_node_config parameters have been updated", d.Id())
}

d.Partial(false)

if _, err := containerClusterAwaitRestingState(config, project, location, clusterName, userAgent, d.Timeout(schema.TimeoutUpdate)); err != nil {
Expand Down Expand Up @@ -6224,6 +6271,11 @@ func flattenNodePoolAutoConfig(c *container.NodePoolAutoConfig) []map[string]int
if c.ResourceManagerTags != nil {
result["resource_manager_tags"] = flattenResourceManagerTags(c.ResourceManagerTags)
}
if c.LinuxNodeConfig != nil {
result["linux_node_config"] = []map[string]interface{}{
{"cgroup_mode": c.LinuxNodeConfig.CgroupMode},
}
}

return []map[string]interface{}{result}
}
Expand Down
76 changes: 76 additions & 0 deletions google/services/container/resource_container_cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -11631,3 +11631,79 @@ resource "google_container_cluster" "primary" {
}
}`, name, enabled)
}

func TestAccContainerCluster_withCgroupMode(t *testing.T) {
t.Parallel()

clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10))
acctest.VcrTest(t, resource.TestCase{
PreCheck: func() { acctest.AccTestPreCheck(t) },
ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t),
CheckDestroy: testAccCheckContainerClusterDestroyProducer(t),
Steps: []resource.TestStep{
{
Config: testAccContainerCluster_withCgroupMode(clusterName, "CGROUP_MODE_V2"),
Check: resource.ComposeAggregateTestCheckFunc(
resource.TestCheckResourceAttrSet("google_container_cluster.primary", "node_pool_auto_config.0.linux_node_config.0.cgroup_mode"),
resource.TestCheckResourceAttr("google_container_cluster.primary", "node_pool_auto_config.0.linux_node_config.0.cgroup_mode", "CGROUP_MODE_V2"),
),
},
{
ResourceName: "google_container_cluster.primary",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"deletion_protection"},
},
},
})
}

func TestAccContainerCluster_withCgroupModeUpdate(t *testing.T) {
t.Parallel()

clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10))
acctest.VcrTest(t, resource.TestCase{
PreCheck: func() { acctest.AccTestPreCheck(t) },
ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t),
CheckDestroy: testAccCheckContainerClusterDestroyProducer(t),
Steps: []resource.TestStep{
{
Config: testAccContainerCluster_autopilot_minimal(clusterName),
},
{
ResourceName: "google_container_cluster.primary",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"deletion_protection"},
},
{
Config: testAccContainerCluster_withCgroupMode(clusterName, "CGROUP_MODE_V2"),
Check: resource.ComposeAggregateTestCheckFunc(
resource.TestCheckResourceAttrSet("google_container_cluster.primary", "node_pool_auto_config.0.linux_node_config.0.cgroup_mode"),
resource.TestCheckResourceAttr("google_container_cluster.primary", "node_pool_auto_config.0.linux_node_config.0.cgroup_mode", "CGROUP_MODE_V2"),
),
},
{
ResourceName: "google_container_cluster.primary",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"deletion_protection"},
},
},
})
}

func testAccContainerCluster_withCgroupMode(name string, cgroupMode string) string {
return fmt.Sprintf(`
resource "google_container_cluster" "primary" {
name = "%s"
enable_autopilot = true
deletion_protection = false
node_pool_auto_config {
linux_node_config {
cgroup_mode = "%s"
}
}
}
`, name, cgroupMode)
}
2 changes: 2 additions & 0 deletions website/docs/r/container_cluster.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -1110,6 +1110,8 @@ Structure is [documented below](#nested_node_kubelet_config).

* `network_tags` (Optional) - The network tag config for the cluster's automatically provisioned node pools. Structure is [documented below](#nested_network_tags).

* `linux_node_config` (Optional) - Linux system configuration for the cluster's automatically provisioned node pools. Only `cgroup_mode` field is supported in `node_pool_auto_config`. Structure is [documented below](#nested_linux_node_config).

<a name="nested_node_kubelet_config"></a>The `node_kubelet_config` block supports:

* `insecure_kubelet_readonly_port_enabled` - (Optional) Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
Expand Down

0 comments on commit fee773f

Please sign in to comment.