From eb2bb96a395c1d16bd89b24ccd6f1a9d462917fc Mon Sep 17 00:00:00 2001 From: Taylor Ludwig Date: Fri, 8 Nov 2019 16:02:21 -0800 Subject: [PATCH 1/6] Fix node_count when autoscaling disabled on node pool. Fixes #311 Dont set initial_node_count when autoscaling is disabled on node pool. Use new node pool var when setting desired size of pool - matches provider var --- autogen/cluster.tf | 10 ++++++---- cluster.tf | 10 ++++++---- modules/beta-private-cluster-update-variant/cluster.tf | 10 ++++++---- modules/beta-private-cluster/cluster.tf | 10 ++++++---- modules/beta-public-cluster/cluster.tf | 10 ++++++---- modules/private-cluster-update-variant/cluster.tf | 10 ++++++---- modules/private-cluster/cluster.tf | 10 ++++++---- 7 files changed, 42 insertions(+), 28 deletions(-) diff --git a/autogen/cluster.tf b/autogen/cluster.tf index 0bac34a37e..7353468cd7 100644 --- a/autogen/cluster.tf +++ b/autogen/cluster.tf @@ -316,16 +316,18 @@ resource "google_container_node_pool" "pools" { "version", local.node_version, ) - initial_node_count = lookup( + + initial_node_count = lookup(var.node_pools[count.index], "autoscaling", true) ? lookup( var.node_pools[count.index], "initial_node_count", - lookup(var.node_pools[count.index], "min_count", 1), - ) + lookup(var.node_pools[count.index], "min_count", 1) + ) : null + {% if beta_cluster %} max_pods_per_node = lookup(var.node_pools[count.index], "max_pods_per_node", null) {% endif %} - node_count = lookup(var.node_pools[count.index], "autoscaling", true) ? null : lookup(var.node_pools[count.index], "min_count", 1) + node_count = lookup(var.node_pools[count.index], "autoscaling", true) ? null : lookup(var.node_pools[count.index], "node_count", 1) dynamic "autoscaling" { for_each = lookup(var.node_pools[count.index], "autoscaling", true) ? [var.node_pools[count.index]] : [] diff --git a/cluster.tf b/cluster.tf index 7e5f53ee47..072a60fb14 100644 --- a/cluster.tf +++ b/cluster.tf @@ -137,13 +137,15 @@ resource "google_container_node_pool" "pools" { "version", local.node_version, ) - initial_node_count = lookup( + + initial_node_count = lookup(var.node_pools[count.index], "autoscaling", true) ? lookup( var.node_pools[count.index], "initial_node_count", - lookup(var.node_pools[count.index], "min_count", 1), - ) + lookup(var.node_pools[count.index], "min_count", 1) + ) : null + - node_count = lookup(var.node_pools[count.index], "autoscaling", true) ? null : lookup(var.node_pools[count.index], "min_count", 1) + node_count = lookup(var.node_pools[count.index], "autoscaling", true) ? null : lookup(var.node_pools[count.index], "node_count", 1) dynamic "autoscaling" { for_each = lookup(var.node_pools[count.index], "autoscaling", true) ? [var.node_pools[count.index]] : [] diff --git a/modules/beta-private-cluster-update-variant/cluster.tf b/modules/beta-private-cluster-update-variant/cluster.tf index 6039cecd98..c92695ce49 100644 --- a/modules/beta-private-cluster-update-variant/cluster.tf +++ b/modules/beta-private-cluster-update-variant/cluster.tf @@ -290,14 +290,16 @@ resource "google_container_node_pool" "pools" { "version", local.node_version, ) - initial_node_count = lookup( + + initial_node_count = lookup(var.node_pools[count.index], "autoscaling", true) ? lookup( var.node_pools[count.index], "initial_node_count", - lookup(var.node_pools[count.index], "min_count", 1), - ) + lookup(var.node_pools[count.index], "min_count", 1) + ) : null + max_pods_per_node = lookup(var.node_pools[count.index], "max_pods_per_node", null) - node_count = lookup(var.node_pools[count.index], "autoscaling", true) ? null : lookup(var.node_pools[count.index], "min_count", 1) + node_count = lookup(var.node_pools[count.index], "autoscaling", true) ? null : lookup(var.node_pools[count.index], "node_count", 1) dynamic "autoscaling" { for_each = lookup(var.node_pools[count.index], "autoscaling", true) ? [var.node_pools[count.index]] : [] diff --git a/modules/beta-private-cluster/cluster.tf b/modules/beta-private-cluster/cluster.tf index 10e12a9ba0..fb9b1ee683 100644 --- a/modules/beta-private-cluster/cluster.tf +++ b/modules/beta-private-cluster/cluster.tf @@ -218,14 +218,16 @@ resource "google_container_node_pool" "pools" { "version", local.node_version, ) - initial_node_count = lookup( + + initial_node_count = lookup(var.node_pools[count.index], "autoscaling", true) ? lookup( var.node_pools[count.index], "initial_node_count", - lookup(var.node_pools[count.index], "min_count", 1), - ) + lookup(var.node_pools[count.index], "min_count", 1) + ) : null + max_pods_per_node = lookup(var.node_pools[count.index], "max_pods_per_node", null) - node_count = lookup(var.node_pools[count.index], "autoscaling", true) ? null : lookup(var.node_pools[count.index], "min_count", 1) + node_count = lookup(var.node_pools[count.index], "autoscaling", true) ? null : lookup(var.node_pools[count.index], "node_count", 1) dynamic "autoscaling" { for_each = lookup(var.node_pools[count.index], "autoscaling", true) ? [var.node_pools[count.index]] : [] diff --git a/modules/beta-public-cluster/cluster.tf b/modules/beta-public-cluster/cluster.tf index b5f896bc1b..88725475a8 100644 --- a/modules/beta-public-cluster/cluster.tf +++ b/modules/beta-public-cluster/cluster.tf @@ -213,14 +213,16 @@ resource "google_container_node_pool" "pools" { "version", local.node_version, ) - initial_node_count = lookup( + + initial_node_count = lookup(var.node_pools[count.index], "autoscaling", true) ? lookup( var.node_pools[count.index], "initial_node_count", - lookup(var.node_pools[count.index], "min_count", 1), - ) + lookup(var.node_pools[count.index], "min_count", 1) + ) : null + max_pods_per_node = lookup(var.node_pools[count.index], "max_pods_per_node", null) - node_count = lookup(var.node_pools[count.index], "autoscaling", true) ? null : lookup(var.node_pools[count.index], "min_count", 1) + node_count = lookup(var.node_pools[count.index], "autoscaling", true) ? null : lookup(var.node_pools[count.index], "node_count", 1) dynamic "autoscaling" { for_each = lookup(var.node_pools[count.index], "autoscaling", true) ? [var.node_pools[count.index]] : [] diff --git a/modules/private-cluster-update-variant/cluster.tf b/modules/private-cluster-update-variant/cluster.tf index 615fe84bcc..d7fc2dd736 100644 --- a/modules/private-cluster-update-variant/cluster.tf +++ b/modules/private-cluster-update-variant/cluster.tf @@ -214,13 +214,15 @@ resource "google_container_node_pool" "pools" { "version", local.node_version, ) - initial_node_count = lookup( + + initial_node_count = lookup(var.node_pools[count.index], "autoscaling", true) ? lookup( var.node_pools[count.index], "initial_node_count", - lookup(var.node_pools[count.index], "min_count", 1), - ) + lookup(var.node_pools[count.index], "min_count", 1) + ) : null + - node_count = lookup(var.node_pools[count.index], "autoscaling", true) ? null : lookup(var.node_pools[count.index], "min_count", 1) + node_count = lookup(var.node_pools[count.index], "autoscaling", true) ? null : lookup(var.node_pools[count.index], "node_count", 1) dynamic "autoscaling" { for_each = lookup(var.node_pools[count.index], "autoscaling", true) ? [var.node_pools[count.index]] : [] diff --git a/modules/private-cluster/cluster.tf b/modules/private-cluster/cluster.tf index 3c42e64325..c8051255bf 100644 --- a/modules/private-cluster/cluster.tf +++ b/modules/private-cluster/cluster.tf @@ -142,13 +142,15 @@ resource "google_container_node_pool" "pools" { "version", local.node_version, ) - initial_node_count = lookup( + + initial_node_count = lookup(var.node_pools[count.index], "autoscaling", true) ? lookup( var.node_pools[count.index], "initial_node_count", - lookup(var.node_pools[count.index], "min_count", 1), - ) + lookup(var.node_pools[count.index], "min_count", 1) + ) : null + - node_count = lookup(var.node_pools[count.index], "autoscaling", true) ? null : lookup(var.node_pools[count.index], "min_count", 1) + node_count = lookup(var.node_pools[count.index], "autoscaling", true) ? null : lookup(var.node_pools[count.index], "node_count", 1) dynamic "autoscaling" { for_each = lookup(var.node_pools[count.index], "autoscaling", true) ? [var.node_pools[count.index]] : [] From 8f231589e5fcc7e2682ca838bc11f225374b57d2 Mon Sep 17 00:00:00 2001 From: Taylor Ludwig Date: Fri, 8 Nov 2019 16:03:09 -0800 Subject: [PATCH 2/6] update test and example to show usage of a node pool with autoscaling off --- examples/node_pool/main.tf | 12 +++ test/integration/node_pool/controls/gcloud.rb | 97 ++++++++++++++++++- .../integration/node_pool/controls/kubectl.rb | 15 +++ 3 files changed, 122 insertions(+), 2 deletions(-) diff --git a/examples/node_pool/main.tf b/examples/node_pool/main.tf index c7a7f852ae..17b3bf8f67 100644 --- a/examples/node_pool/main.tf +++ b/examples/node_pool/main.tf @@ -59,12 +59,20 @@ module "gke" { auto_repair = false service_account = var.compute_engine_service_account }, + { + name = "pool-03" + autoscaling = false + node_count = 2 + service_account = var.compute_engine_service_account + auto_upgrade = true + }, ] node_pools_oauth_scopes = { all = [] pool-01 = [] pool-02 = [] + pool-03 = [] } node_pools_metadata = { @@ -73,6 +81,7 @@ module "gke" { shutdown-script = file("${path.module}/data/shutdown-script.sh") } pool-02 = {} + pool-03 = {} } node_pools_labels = { @@ -83,6 +92,7 @@ module "gke" { pool-01-example = true } pool-02 = {} + pool-03 = {} } node_pools_taints = { @@ -101,6 +111,7 @@ module "gke" { }, ] pool-02 = [] + pool-03 = [] } node_pools_tags = { @@ -111,6 +122,7 @@ module "gke" { "pool-01-example", ] pool-02 = [] + pool-03 = [] } } diff --git a/test/integration/node_pool/controls/gcloud.rb b/test/integration/node_pool/controls/gcloud.rb index 6ff5fdd201..675a2e39ae 100644 --- a/test/integration/node_pool/controls/gcloud.rb +++ b/test/integration/node_pool/controls/gcloud.rb @@ -36,8 +36,8 @@ describe "node pools" do let(:node_pools) { data['nodePools'].reject { |p| p['name'] == "default-pool" } } - it "has 2" do - expect(node_pools.count).to eq 2 + it "has 3" do + expect(node_pools.count).to eq 3 end describe "pool-01" do @@ -279,6 +279,99 @@ ) end end + + describe "pool-03" do + it "exists" do + expect(data['nodePools']).to include( + including( + "name" => "pool-03", + ) + ) + end + + it "is the expected machine type" do + expect(data['nodePools']).to include( + including( + "name" => "pool-03", + "config" => including( + "machineType" => "n1-standard-2", + ), + ) + ) + end + + it "has autoscaling disabled" do + expect(data['nodePools']).not_to include( + including( + "name" => "pool-03", + "autoscaling" => including( + "enabled" => true, + ), + ) + ) + end + + it "has the expected node count" do + expect(data['nodePools']).to include( + including( + "name" => "pool-03", + "initialNodeCount" => 2 + ) + ) + end + + it "has autorepair enabled" do + expect(data['nodePools']).to include( + including( + "name" => "pool-03", + "management" => including( + "autoRepair" => true, + ), + ) + ) + end + + it "has automatic upgrades enabled" do + expect(data['nodePools']).to include( + including( + "name" => "pool-03", + "management" => including( + "autoUpgrade" => true, + ), + ) + ) + end + + it "has the expected labels" do + expect(data['nodePools']).to include( + including( + "name" => "pool-03", + "config" => including( + "labels" => { + "all-pools-example" => "true", + "cluster_name" => cluster_name, + "node_pool" => "pool-03", + }, + ), + ) + ) + end + + it "has the expected network tags" do + expect(data['nodePools']).to include( + including( + "name" => "pool-03", + "config" => including( + "tags" => match_array([ + "all-node-example", + "gke-#{cluster_name}", + "gke-#{cluster_name}-pool-03", + ]), + ), + ) + ) + end + end end end end diff --git a/test/integration/node_pool/controls/kubectl.rb b/test/integration/node_pool/controls/kubectl.rb index 471f9cb33f..811ebcda0f 100644 --- a/test/integration/node_pool/controls/kubectl.rb +++ b/test/integration/node_pool/controls/kubectl.rb @@ -72,6 +72,21 @@ all_nodes.select { |n| n.metadata.labels.node_pool == "pool-02" } end + it "has the expected taints" do + expect(taints).to include( + { + effect: "PreferNoSchedule", + key: "all-pools-example", + value: "true", + } + ) + end + end + describe "pool-03" do + let(:nodes) do + all_nodes.select { |n| n.metadata.labels.node_pool == "pool-03" } + end + it "has the expected taints" do expect(taints).to include( { From 23f30582aff5c271154bc1a4ed26b39af1163221 Mon Sep 17 00:00:00 2001 From: Taylor Ludwig Date: Fri, 8 Nov 2019 16:21:26 -0800 Subject: [PATCH 3/6] fix formatting --- examples/node_pool/main.tf | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/node_pool/main.tf b/examples/node_pool/main.tf index 17b3bf8f67..bf892be2a9 100644 --- a/examples/node_pool/main.tf +++ b/examples/node_pool/main.tf @@ -60,9 +60,9 @@ module "gke" { service_account = var.compute_engine_service_account }, { - name = "pool-03" - autoscaling = false - node_count = 2 + name = "pool-03" + autoscaling = false + node_count = 2 service_account = var.compute_engine_service_account auto_upgrade = true }, From 7749bb654b77521378033888e6d5300a9ac65e50 Mon Sep 17 00:00:00 2001 From: Taylor Ludwig Date: Mon, 11 Nov 2019 11:23:52 -0800 Subject: [PATCH 4/6] force codebuild From f50930d94beb2dc4c3aa43c51f051803aa66dd9e Mon Sep 17 00:00:00 2001 From: Taylor Ludwig Date: Tue, 12 Nov 2019 14:33:23 -0800 Subject: [PATCH 5/6] add to changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ba31673202..226520c116 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ Extending the adopted spec, each change should have a link to its corresponding * Support for Shielded Nodes beta feature via `enabled_shielded_nodes` variable. [#300] * Support for setting node_locations on node pools. [#303] +* Fix for specifying `node_count` on node pools when autoscaling is disabled. [#313] ## [v5.1.1] - 2019-10-25 From 304b8f1872f1e0370ea066ecb36b0aebae55e490 Mon Sep 17 00:00:00 2001 From: Taylor Ludwig Date: Tue, 12 Nov 2019 14:34:28 -0800 Subject: [PATCH 6/6] move node_pool example to us-central1 where there is cpu quota available after merging with master us-east4 maxes out default quota of 72 cpus in the region --- examples/node_pool/main.tf | 2 +- test/fixtures/shared/variables.tf | 5 ++--- test/integration/node_pool/controls/gcloud.rb | 4 ++-- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/examples/node_pool/main.tf b/examples/node_pool/main.tf index ad8d076f98..5bc0f53407 100644 --- a/examples/node_pool/main.tf +++ b/examples/node_pool/main.tf @@ -60,7 +60,7 @@ module "gke" { }, { name = "pool-03" - node_locations = "us-east4-b,us-east4-c" + node_locations = "${var.region}-b,${var.region}-c" autoscaling = false node_count = 2 machine_type = "n1-standard-2" diff --git a/test/fixtures/shared/variables.tf b/test/fixtures/shared/variables.tf index 5dff24dbd4..9760d65a94 100644 --- a/test/fixtures/shared/variables.tf +++ b/test/fixtures/shared/variables.tf @@ -20,13 +20,13 @@ variable "project_id" { variable "region" { description = "The GCP region to create and test resources in" - default = "us-east4" + default = "us-central1" } variable "zones" { type = list(string) description = "The GCP zones to create and test resources in, for applicable tests" - default = ["us-east4-a", "us-east4-b", "us-east4-c"] + default = ["us-central1-a", "us-central1-b", "us-central1-c"] } variable "compute_engine_service_account" { @@ -36,4 +36,3 @@ variable "compute_engine_service_account" { variable "registry_project_id" { description = "Project to use for granting access to the GCR registry, if requested" } - diff --git a/test/integration/node_pool/controls/gcloud.rb b/test/integration/node_pool/controls/gcloud.rb index 2c6ed4f648..69a15e8293 100644 --- a/test/integration/node_pool/controls/gcloud.rb +++ b/test/integration/node_pool/controls/gcloud.rb @@ -392,8 +392,8 @@ including( "name" => "pool-03", "locations" => match_array([ - "us-east4-b", - "us-east4-c", + "us-central1-b", + "us-central1-c", ]), ) )