Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add support for Capacity Intelligence Autoscaling #12453

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions .changelog/6370.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
```release-note:enhancement
container: added `autoscaling.total_min_node_count`, `autoscaling.total_max_node_count`, and `autoscaling.location_policy` to `google_container_node_pool` resource
```
```release-note:enhancement
container: added `autoscaling.total_min_node_count`, `autoscaling.total_max_node_count`, and `autoscaling.location_policy` to `google_container_cluster.node_pool`
```
117 changes: 117 additions & 0 deletions google/resource_container_cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1195,6 +1195,68 @@ func TestAccContainerCluster_withNodePoolAutoscaling(t *testing.T) {
})
}

func TestAccContainerCluster_withNodePoolCIA(t *testing.T) {
t.Parallel()

clusterName := fmt.Sprintf("tf-test-cluster-nodepool-%s", randString(t, 10))
npName := fmt.Sprintf("tf-test-cluster-nodepool-%s", randString(t, 10))

vcrTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t),
Steps: []resource.TestStep{
{
Config: testAccContainerRegionalCluster_withNodePoolCIA(clusterName, npName),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.autoscaling.0.min_node_count", "0"),
resource.TestCheckResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.autoscaling.0.max_node_count", "0"),
resource.TestCheckResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.autoscaling.0.total_min_node_count", "3"),
resource.TestCheckResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.autoscaling.0.total_max_node_count", "21"),
resource.TestCheckResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.autoscaling.0.location_policy", "BALANCED"),
),
},
{
ResourceName: "google_container_cluster.with_node_pool",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"min_master_version"},
},
{
Config: testAccContainerRegionalClusterUpdate_withNodePoolCIA(clusterName, npName),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.autoscaling.0.min_node_count", "0"),
resource.TestCheckResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.autoscaling.0.max_node_count", "0"),
resource.TestCheckResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.autoscaling.0.total_min_node_count", "4"),
resource.TestCheckResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.autoscaling.0.total_max_node_count", "32"),
resource.TestCheckResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.autoscaling.0.location_policy", "ANY"),
),
},
{
ResourceName: "google_container_cluster.with_node_pool",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"min_master_version"},
},
{
Config: testAccContainerRegionalCluster_withNodePoolBasic(clusterName, npName),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckNoResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.autoscaling.0.min_node_count"),
resource.TestCheckNoResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.autoscaling.0.max_node_count"),
resource.TestCheckNoResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.autoscaling.0.total_min_node_count"),
resource.TestCheckNoResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.autoscaling.0.total_max_node_count"),
),
},
{
ResourceName: "google_container_cluster.with_node_pool",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"min_master_version"},
},
},
})
}

func TestAccContainerCluster_withNodePoolNamePrefix(t *testing.T) {
// Randomness
skipIfVcr(t)
Expand Down Expand Up @@ -3730,6 +3792,61 @@ resource "google_container_cluster" "with_node_pool" {
`, cluster, np)
}

func testAccContainerRegionalCluster_withNodePoolCIA(cluster, np string) string {
return fmt.Sprintf(`
resource "google_container_cluster" "with_node_pool" {
name = "%s"
location = "us-central1"
min_master_version = "1.24"

node_pool {
name = "%s"
initial_node_count = 2
autoscaling {
total_min_node_count = 3
total_max_node_count = 21
location_policy = "BALANCED"
}
}
}
`, cluster, np)
}

func testAccContainerRegionalClusterUpdate_withNodePoolCIA(cluster, np string) string {
return fmt.Sprintf(`
resource "google_container_cluster" "with_node_pool" {
name = "%s"
location = "us-central1"
min_master_version = "1.24"

node_pool {
name = "%s"
initial_node_count = 2
autoscaling {
total_min_node_count = 4
total_max_node_count = 32
location_policy = "ANY"
}
}
}
`, cluster, np)
}

func testAccContainerRegionalCluster_withNodePoolBasic(cluster, nodePool string) string {
return fmt.Sprintf(`
resource "google_container_cluster" "with_node_pool" {
name = "%s"
location = "us-central1"
min_master_version = "1.24"

node_pool {
name = "%s"
initial_node_count = 2
}
}
`, cluster, nodePool)
}

func testAccContainerCluster_withNodePoolNamePrefix(cluster, npPrefix string) string {
return fmt.Sprintf(`
resource "google_container_cluster" "with_node_pool_name_prefix" {
Expand Down
60 changes: 45 additions & 15 deletions google/resource_container_node_pool.go
Original file line number Diff line number Diff line change
Expand Up @@ -84,16 +84,37 @@ var schemaNodePool = map[string]*schema.Schema{
Schema: map[string]*schema.Schema{
"min_node_count": {
Type: schema.TypeInt,
Required: true,
Optional: true,
ValidateFunc: validation.IntAtLeast(0),
Description: `Minimum number of nodes in the NodePool. Must be >=0 and <= max_node_count.`,
Description: `Minimum number of nodes per zone in the node pool. Must be >=0 and <= max_node_count. Cannot be used with total limits.`,
},

"max_node_count": {
Type: schema.TypeInt,
Required: true,
ValidateFunc: validation.IntAtLeast(1),
Description: `Maximum number of nodes in the NodePool. Must be >= min_node_count.`,
Optional: true,
ValidateFunc: validation.IntAtLeast(0),
Description: `Maximum number of nodes per zone in the node pool. Must be >= min_node_count. Cannot be used with total limits.`,
},

"total_min_node_count": {
Type: schema.TypeInt,
Optional: true,
ValidateFunc: validation.IntAtLeast(0),
Description: `Minimum number of all nodes in the node pool. Must be >=0 and <= total_max_node_count. Cannot be used with per zone limits.`,
},

"total_max_node_count": {
Type: schema.TypeInt,
Optional: true,
ValidateFunc: validation.IntAtLeast(0),
Description: `Maximum number of all nodes in the node pool. Must be >= total_min_node_count. Cannot be used with per zone limits.`,
},

"location_policy": {
Type: schema.TypeString,
Optional: true,
ValidateFunc: validation.StringInSlice([]string{"BALANCED", "ANY"}, false),
Description: `Location policy specifies the algorithm used when scaling-up the node pool. "BALANCED" - Is a best effort policy that aims to balance the sizes of available zones. "ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations, and reduces preemption risk for Spot VMs.`,
},
},
},
Expand Down Expand Up @@ -695,10 +716,13 @@ func expandNodePool(d *schema.ResourceData, prefix string) (*container.NodePool,
if v, ok := d.GetOk(prefix + "autoscaling"); ok {
autoscaling := v.([]interface{})[0].(map[string]interface{})
np.Autoscaling = &container.NodePoolAutoscaling{
Enabled: true,
MinNodeCount: int64(autoscaling["min_node_count"].(int)),
MaxNodeCount: int64(autoscaling["max_node_count"].(int)),
ForceSendFields: []string{"MinNodeCount"},
Enabled: true,
MinNodeCount: int64(autoscaling["min_node_count"].(int)),
MaxNodeCount: int64(autoscaling["max_node_count"].(int)),
TotalMinNodeCount: int64(autoscaling["total_min_node_count"].(int)),
TotalMaxNodeCount: int64(autoscaling["total_max_node_count"].(int)),
LocationPolicy: autoscaling["location_policy"].(string),
ForceSendFields: []string{"MinNodeCount", "MaxNodeCount", "TotalMinNodeCount", "TotalMaxNodeCount"},
}
}

Expand Down Expand Up @@ -787,8 +811,11 @@ func flattenNodePool(d *schema.ResourceData, config *Config, np *container.NodeP
if np.Autoscaling.Enabled {
nodePool["autoscaling"] = []map[string]interface{}{
{
"min_node_count": np.Autoscaling.MinNodeCount,
"max_node_count": np.Autoscaling.MaxNodeCount,
"min_node_count": np.Autoscaling.MinNodeCount,
"max_node_count": np.Autoscaling.MaxNodeCount,
"total_min_node_count": np.Autoscaling.TotalMinNodeCount,
"total_max_node_count": np.Autoscaling.TotalMaxNodeCount,
"location_policy": np.Autoscaling.LocationPolicy,
},
}
} else {
Expand Down Expand Up @@ -839,10 +866,13 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node
if v, ok := d.GetOk(prefix + "autoscaling"); ok {
autoscaling := v.([]interface{})[0].(map[string]interface{})
update.DesiredNodePoolAutoscaling = &container.NodePoolAutoscaling{
Enabled: true,
MinNodeCount: int64(autoscaling["min_node_count"].(int)),
MaxNodeCount: int64(autoscaling["max_node_count"].(int)),
ForceSendFields: []string{"MinNodeCount"},
Enabled: true,
MinNodeCount: int64(autoscaling["min_node_count"].(int)),
MaxNodeCount: int64(autoscaling["max_node_count"].(int)),
TotalMinNodeCount: int64(autoscaling["total_min_node_count"].(int)),
TotalMaxNodeCount: int64(autoscaling["total_max_node_count"].(int)),
LocationPolicy: autoscaling["location_policy"].(string),
ForceSendFields: []string{"MinNodeCount", "TotalMinNodeCount"},
}
} else {
update.DesiredNodePoolAutoscaling = &container.NodePoolAutoscaling{
Expand Down
124 changes: 124 additions & 0 deletions google/resource_container_node_pool_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -478,6 +478,63 @@ func TestAccContainerNodePool_regionalAutoscaling(t *testing.T) {
})
}

//This test exists to validate a node pool with total size *and* and update to it.
func TestAccContainerNodePool_totalSize(t *testing.T) {
t.Parallel()

cluster := fmt.Sprintf("tf-test-cluster-%s", randString(t, 10))
np := fmt.Sprintf("tf-test-nodepool-%s", randString(t, 10))

vcrTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t),
Steps: []resource.TestStep{
{
Config: testAccContainerNodePool_totalSize(cluster, np),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr("google_container_node_pool.np", "autoscaling.0.total_min_node_count", "4"),
resource.TestCheckResourceAttr("google_container_node_pool.np", "autoscaling.0.total_max_node_count", "12"),
resource.TestCheckResourceAttr("google_container_node_pool.np", "autoscaling.0.location_policy", "BALANCED"),
),
},
{
ResourceName: "google_container_node_pool.np",
ImportState: true,
ImportStateVerify: true,
},
{
Config: testAccContainerNodePool_updateTotalSize(cluster, np),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr("google_container_node_pool.np", "autoscaling.0.total_min_node_count", "2"),
resource.TestCheckResourceAttr("google_container_node_pool.np", "autoscaling.0.total_max_node_count", "22"),
resource.TestCheckResourceAttr("google_container_node_pool.np", "autoscaling.0.location_policy", "ANY"),
),
},
{
ResourceName: "google_container_node_pool.np",
ImportState: true,
ImportStateVerify: true,
},
{
Config: testAccContainerNodePool_basicTotalSize(cluster, np),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckNoResourceAttr("google_container_node_pool.np", "autoscaling.0.min_node_count"),
resource.TestCheckNoResourceAttr("google_container_node_pool.np", "autoscaling.0.max_node_count"),
),
},
{
ResourceName: "google_container_node_pool.np",
ImportState: true,
ImportStateVerify: true,
// autoscaling.# = 0 is equivalent to no autoscaling at all,
// but will still cause an import diff
ImportStateVerifyIgnore: []string{"autoscaling.#"},
},
},
})
}

func TestAccContainerNodePool_autoscaling(t *testing.T) {
t.Parallel()

Expand Down Expand Up @@ -1087,6 +1144,73 @@ resource "google_container_node_pool" "np" {
`, cluster, np)
}

func testAccContainerNodePool_totalSize(cluster, np string) string {
return fmt.Sprintf(`
resource "google_container_cluster" "cluster" {
name = "%s"
location = "us-central1"
initial_node_count = 3
min_master_version = "1.24"
}

resource "google_container_node_pool" "np" {
name = "%s"
location = "us-central1"
cluster = google_container_cluster.cluster.name
initial_node_count = 2
autoscaling {
total_min_node_count = 4
total_max_node_count = 12
location_policy = "BALANCED"
}
}
`, cluster, np)
}

func testAccContainerNodePool_updateTotalSize(cluster, np string) string {
return fmt.Sprintf(`
resource "google_container_cluster" "cluster" {
name = "%s"
location = "us-central1"
initial_node_count = 3
min_master_version = "1.24"
}

resource "google_container_node_pool" "np" {
name = "%s"
location = "us-central1"
cluster = google_container_cluster.cluster.name
initial_node_count = 2
autoscaling {
total_min_node_count = 2
total_max_node_count = 22
location_policy = "ANY"
}
}
`, cluster, np)
}

func testAccContainerNodePool_basicTotalSize(cluster, np string) string {
return fmt.Sprintf(`
provider "google" {
user_project_override = true
}
resource "google_container_cluster" "cluster" {
name = "%s"
location = "us-central1"
initial_node_count = 3
min_master_version = "1.24"
}

resource "google_container_node_pool" "np" {
name = "%s"
location = "us-central1"
cluster = google_container_cluster.cluster.name
initial_node_count = 2
}
`, cluster, np)
}

func testAccContainerNodePool_autoscaling(cluster, np string) string {
return fmt.Sprintf(`
resource "google_container_cluster" "cluster" {
Expand Down
20 changes: 16 additions & 4 deletions website/docs/r/container_node_pool.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -171,12 +171,24 @@ cluster.
* `placement_policy` - (Optional, [Beta](https://terraform.io/docs/providers/google/provider_versions.html)) Specifies a custom placement policy for the
nodes.

<a name="nested_autoscaling"></a>The `autoscaling` block supports:
<a name="nested_autoscaling"></a>The `autoscaling` block supports (either total or per zone limits are required):

* `min_node_count` - (Required) Minimum number of nodes in the NodePool. Must be >=0 and
<= `max_node_count`.
* `min_node_count` - (Optional) Minimum number of nodes per zone in the NodePool.
Must be >=0 and <= `max_node_count`. Cannot be used with total limits.

* `max_node_count` - (Required) Maximum number of nodes in the NodePool. Must be >= min_node_count.
* `max_node_count` - (Optional) Maximum number of nodes per zone in the NodePool.
Must be >= min_node_count. Cannot be used with total limits.

* `total_min_node_count` - (Optional) Total minimum number of nodes in the NodePool.
Must be >=0 and <= `total_max_node_count`. Cannot be used with per zone limits.

* `total_max_node_count` - (Optional) Total maximum number of nodes in the NodePool.
Must be >= total_min_node_count. Cannot be used with per zone limits.

* `location_policy` - (Optional) Location policy specifies the algorithm used when scaling-up the node pool. \
"BALANCED" - Is a best effort policy that aims to balance the sizes of available zones. \
"ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations,
and reduce preemption risk for Spot VMs.

<a name="nested_management"></a>The `management` block supports:

Expand Down