From b3404152c01d9309747f5b510ccb9c80c3d39795 Mon Sep 17 00:00:00 2001 From: "Julian V. Modesto" Date: Thu, 29 Aug 2019 11:45:25 -0400 Subject: [PATCH 01/82] Check null and length of beta feature outputs --- autogen/main.tf | 4 ++-- modules/beta-private-cluster/main.tf | 4 ++-- modules/beta-public-cluster/main.tf | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/autogen/main.tf b/autogen/main.tf index c4b070f9a3..0f5b0edc06 100644 --- a/autogen/main.tf +++ b/autogen/main.tf @@ -105,9 +105,9 @@ locals { {% if beta_cluster %} # BETA features cluster_output_istio_enabled = google_container_cluster.primary.addons_config.0.istio_config.0.disabled - cluster_output_pod_security_policy_enabled = google_container_cluster.primary.pod_security_policy_config.0.enabled + cluster_output_pod_security_policy_enabled = google_container_cluster.primary.pod_security_policy_config != null && length(google_container_cluster.primary.pod_security_policy_config) == 1 ? google_container_cluster.primary.pod_security_policy_config.0.enabled : false cluster_output_intranode_visbility_enabled = google_container_cluster.primary.enable_intranode_visibility - cluster_output_vertical_pod_autoscaling_enabled = google_container_cluster.primary.vertical_pod_autoscaling.0.enabled + cluster_output_vertical_pod_autoscaling_enabled = google_container_cluster.primary.vertical_pod_autoscaling != null && length(google_container_cluster.primary.vertical_pod_autoscaling) == 1 ? google_container_cluster.primary.vertical_pod_autoscaling.0.enabled : false # /BETA features {% endif %} diff --git a/modules/beta-private-cluster/main.tf b/modules/beta-private-cluster/main.tf index ad6116e7ef..1199f2dfa6 100644 --- a/modules/beta-private-cluster/main.tf +++ b/modules/beta-private-cluster/main.tf @@ -94,9 +94,9 @@ locals { # BETA features cluster_output_istio_enabled = google_container_cluster.primary.addons_config.0.istio_config.0.disabled - cluster_output_pod_security_policy_enabled = google_container_cluster.primary.pod_security_policy_config.0.enabled + cluster_output_pod_security_policy_enabled = google_container_cluster.primary.pod_security_policy_config != null && length(google_container_cluster.primary.pod_security_policy_config) == 1 ? google_container_cluster.primary.pod_security_policy_config.0.enabled : false cluster_output_intranode_visbility_enabled = google_container_cluster.primary.enable_intranode_visibility - cluster_output_vertical_pod_autoscaling_enabled = google_container_cluster.primary.vertical_pod_autoscaling.0.enabled + cluster_output_vertical_pod_autoscaling_enabled = google_container_cluster.primary.vertical_pod_autoscaling != null && length(google_container_cluster.primary.vertical_pod_autoscaling) == 1 ? google_container_cluster.primary.vertical_pod_autoscaling.0.enabled : false # /BETA features diff --git a/modules/beta-public-cluster/main.tf b/modules/beta-public-cluster/main.tf index c956463414..e4e7548a3a 100644 --- a/modules/beta-public-cluster/main.tf +++ b/modules/beta-public-cluster/main.tf @@ -94,9 +94,9 @@ locals { # BETA features cluster_output_istio_enabled = google_container_cluster.primary.addons_config.0.istio_config.0.disabled - cluster_output_pod_security_policy_enabled = google_container_cluster.primary.pod_security_policy_config.0.enabled + cluster_output_pod_security_policy_enabled = google_container_cluster.primary.pod_security_policy_config != null && length(google_container_cluster.primary.pod_security_policy_config) == 1 ? google_container_cluster.primary.pod_security_policy_config.0.enabled : false cluster_output_intranode_visbility_enabled = google_container_cluster.primary.enable_intranode_visibility - cluster_output_vertical_pod_autoscaling_enabled = google_container_cluster.primary.vertical_pod_autoscaling.0.enabled + cluster_output_vertical_pod_autoscaling_enabled = google_container_cluster.primary.vertical_pod_autoscaling != null && length(google_container_cluster.primary.vertical_pod_autoscaling) == 1 ? google_container_cluster.primary.vertical_pod_autoscaling.0.enabled : false # /BETA features From 979836cff2f72c2fb707da724b558f3e2195c94d Mon Sep 17 00:00:00 2001 From: "Julian V. Modesto" Date: Thu, 29 Aug 2019 13:08:46 -0400 Subject: [PATCH 02/82] Fix autogen module source --- autogen/README.md | 2 +- helpers/generate_modules/generate_modules.py | 3 +++ modules/beta-private-cluster/README.md | 2 +- modules/beta-public-cluster/README.md | 2 +- 4 files changed, 6 insertions(+), 3 deletions(-) diff --git a/autogen/README.md b/autogen/README.md index 620aa422c5..73a6314289 100644 --- a/autogen/README.md +++ b/autogen/README.md @@ -28,7 +28,7 @@ There are multiple examples included in the [examples](./examples/) folder but s ```hcl module "gke" { - source = "terraform-google-modules/kubernetes-engine/google{% if private_cluster %}//modules/private-cluster{% endif %}" + source = "terraform-google-modules/kubernetes-engine/google{{ module_path }}" project_id = "" name = "gke-test-1" region = "us-central1" diff --git a/helpers/generate_modules/generate_modules.py b/helpers/generate_modules/generate_modules.py index f6beb84832..c235e7ad65 100755 --- a/helpers/generate_modules/generate_modules.py +++ b/helpers/generate_modules/generate_modules.py @@ -46,13 +46,16 @@ def template_options(self, base): 'private_cluster': False, }), Module("./modules/private-cluster", { + 'module_path': '//modules/private-cluster', 'private_cluster': True }), Module("./modules/beta-private-cluster", { + 'module_path': '//modules/beta-private-cluster', 'private_cluster': True, 'beta_cluster': True, }), Module("./modules/beta-public-cluster", { + 'module_path': '//modules/beta-public-cluster', 'private_cluster': False, 'beta_cluster': True, }), diff --git a/modules/beta-private-cluster/README.md b/modules/beta-private-cluster/README.md index 74bd64c022..dbc69c4d14 100644 --- a/modules/beta-private-cluster/README.md +++ b/modules/beta-private-cluster/README.md @@ -25,7 +25,7 @@ There are multiple examples included in the [examples](./examples/) folder but s ```hcl module "gke" { - source = "terraform-google-modules/kubernetes-engine/google//modules/private-cluster" + source = "terraform-google-modules/kubernetes-engine/google//modules/beta-private-cluster" project_id = "" name = "gke-test-1" region = "us-central1" diff --git a/modules/beta-public-cluster/README.md b/modules/beta-public-cluster/README.md index 316f46d43e..db41d10821 100644 --- a/modules/beta-public-cluster/README.md +++ b/modules/beta-public-cluster/README.md @@ -23,7 +23,7 @@ There are multiple examples included in the [examples](./examples/) folder but s ```hcl module "gke" { - source = "terraform-google-modules/kubernetes-engine/google" + source = "terraform-google-modules/kubernetes-engine/google//modules/beta-public-cluster" project_id = "" name = "gke-test-1" region = "us-central1" From 9bb2f29923beeb88839f68300ed484ae75df729a Mon Sep 17 00:00:00 2001 From: "Julian V. Modesto" Date: Thu, 29 Aug 2019 13:11:52 -0400 Subject: [PATCH 03/82] Set cluster initial node count from var --- autogen/cluster.tf | 1 + cluster.tf | 1 + modules/beta-private-cluster/cluster.tf | 1 + modules/beta-public-cluster/cluster.tf | 1 + modules/private-cluster/cluster.tf | 1 + 5 files changed, 5 insertions(+) diff --git a/autogen/cluster.tf b/autogen/cluster.tf index 9ae4414a18..17f6653ad9 100644 --- a/autogen/cluster.tf +++ b/autogen/cluster.tf @@ -152,6 +152,7 @@ resource "google_container_cluster" "primary" { delete = "30m" } + initial_node_count = var.initial_node_count node_pool { name = "default-pool" initial_node_count = var.initial_node_count diff --git a/cluster.tf b/cluster.tf index 0f94ddb0e5..62766e122a 100644 --- a/cluster.tf +++ b/cluster.tf @@ -108,6 +108,7 @@ resource "google_container_cluster" "primary" { delete = "30m" } + initial_node_count = var.initial_node_count node_pool { name = "default-pool" initial_node_count = var.initial_node_count diff --git a/modules/beta-private-cluster/cluster.tf b/modules/beta-private-cluster/cluster.tf index 901ae674eb..6fce4d8598 100644 --- a/modules/beta-private-cluster/cluster.tf +++ b/modules/beta-private-cluster/cluster.tf @@ -144,6 +144,7 @@ resource "google_container_cluster" "primary" { delete = "30m" } + initial_node_count = var.initial_node_count node_pool { name = "default-pool" initial_node_count = var.initial_node_count diff --git a/modules/beta-public-cluster/cluster.tf b/modules/beta-public-cluster/cluster.tf index 1c8561344a..c36f99c596 100644 --- a/modules/beta-public-cluster/cluster.tf +++ b/modules/beta-public-cluster/cluster.tf @@ -144,6 +144,7 @@ resource "google_container_cluster" "primary" { delete = "30m" } + initial_node_count = var.initial_node_count node_pool { name = "default-pool" initial_node_count = var.initial_node_count diff --git a/modules/private-cluster/cluster.tf b/modules/private-cluster/cluster.tf index a5bb5da979..5954739abd 100644 --- a/modules/private-cluster/cluster.tf +++ b/modules/private-cluster/cluster.tf @@ -108,6 +108,7 @@ resource "google_container_cluster" "primary" { delete = "30m" } + initial_node_count = var.initial_node_count node_pool { name = "default-pool" initial_node_count = var.initial_node_count From 10be6f3c523653bf69c81c8f9b59cd8e4554185f Mon Sep 17 00:00:00 2001 From: "Julian V. Modesto" Date: Wed, 4 Sep 2019 10:45:38 -0400 Subject: [PATCH 04/82] Ignore changes to initial node count --- autogen/cluster.tf | 3 +-- cluster.tf | 3 +-- modules/beta-private-cluster/cluster.tf | 3 +-- modules/beta-public-cluster/cluster.tf | 3 +-- modules/private-cluster/cluster.tf | 3 +-- 5 files changed, 5 insertions(+), 10 deletions(-) diff --git a/autogen/cluster.tf b/autogen/cluster.tf index 17f6653ad9..4e5fd74d55 100644 --- a/autogen/cluster.tf +++ b/autogen/cluster.tf @@ -143,7 +143,7 @@ resource "google_container_cluster" "primary" { } lifecycle { - ignore_changes = [node_pool] + ignore_changes = [node_pool, initial_node_count] } timeouts { @@ -152,7 +152,6 @@ resource "google_container_cluster" "primary" { delete = "30m" } - initial_node_count = var.initial_node_count node_pool { name = "default-pool" initial_node_count = var.initial_node_count diff --git a/cluster.tf b/cluster.tf index 62766e122a..ffdb27b0fc 100644 --- a/cluster.tf +++ b/cluster.tf @@ -99,7 +99,7 @@ resource "google_container_cluster" "primary" { } lifecycle { - ignore_changes = [node_pool] + ignore_changes = [node_pool, initial_node_count] } timeouts { @@ -108,7 +108,6 @@ resource "google_container_cluster" "primary" { delete = "30m" } - initial_node_count = var.initial_node_count node_pool { name = "default-pool" initial_node_count = var.initial_node_count diff --git a/modules/beta-private-cluster/cluster.tf b/modules/beta-private-cluster/cluster.tf index 6fce4d8598..c481c69a35 100644 --- a/modules/beta-private-cluster/cluster.tf +++ b/modules/beta-private-cluster/cluster.tf @@ -135,7 +135,7 @@ resource "google_container_cluster" "primary" { } lifecycle { - ignore_changes = [node_pool] + ignore_changes = [node_pool, initial_node_count] } timeouts { @@ -144,7 +144,6 @@ resource "google_container_cluster" "primary" { delete = "30m" } - initial_node_count = var.initial_node_count node_pool { name = "default-pool" initial_node_count = var.initial_node_count diff --git a/modules/beta-public-cluster/cluster.tf b/modules/beta-public-cluster/cluster.tf index c36f99c596..a264e932b9 100644 --- a/modules/beta-public-cluster/cluster.tf +++ b/modules/beta-public-cluster/cluster.tf @@ -135,7 +135,7 @@ resource "google_container_cluster" "primary" { } lifecycle { - ignore_changes = [node_pool] + ignore_changes = [node_pool, initial_node_count] } timeouts { @@ -144,7 +144,6 @@ resource "google_container_cluster" "primary" { delete = "30m" } - initial_node_count = var.initial_node_count node_pool { name = "default-pool" initial_node_count = var.initial_node_count diff --git a/modules/private-cluster/cluster.tf b/modules/private-cluster/cluster.tf index 5954739abd..412e8295ed 100644 --- a/modules/private-cluster/cluster.tf +++ b/modules/private-cluster/cluster.tf @@ -99,7 +99,7 @@ resource "google_container_cluster" "primary" { } lifecycle { - ignore_changes = [node_pool] + ignore_changes = [node_pool, initial_node_count] } timeouts { @@ -108,7 +108,6 @@ resource "google_container_cluster" "primary" { delete = "30m" } - initial_node_count = var.initial_node_count node_pool { name = "default-pool" initial_node_count = var.initial_node_count From 90fc5092e029d24ea4f94ad0b21079e936fd05f6 Mon Sep 17 00:00:00 2001 From: Aaron Sproul Date: Wed, 4 Sep 2019 15:10:02 -0700 Subject: [PATCH 05/82] Allow a node pool to be created before it is destroyed --- autogen/cluster.tf | 20 +++++++++++++++++++- cluster.tf | 20 +++++++++++++++++++- modules/beta-private-cluster/cluster.tf | 20 +++++++++++++++++++- modules/beta-public-cluster/cluster.tf | 20 +++++++++++++++++++- modules/private-cluster/cluster.tf | 20 +++++++++++++++++++- 5 files changed, 95 insertions(+), 5 deletions(-) diff --git a/autogen/cluster.tf b/autogen/cluster.tf index 9ae4414a18..2bea962897 100644 --- a/autogen/cluster.tf +++ b/autogen/cluster.tf @@ -219,6 +219,23 @@ resource "google_container_cluster" "primary" { /****************************************** Create Container Cluster node pools *****************************************/ +resource "random_id" "name" { + # if any node_pool definition has a create_before_destroy key, then create random_id names + count = length(compact([for node_pool in var.node_pools : lookup(node_pool, "create_before_destroy", "")])) > 0 ? length(var.node_pools) : 0 + + byte_length = 2 + prefix = format("%s-", lookup(var.node_pools[count.index], "name")) + + keepers = { + disk_size_gb = lookup(var.node_pools[count.index], "disk_size_gb", 100) + disk_type = lookup(var.node_pools[count.index], "disk_type", "pd-standard") + preemptible = lookup(var.node_pools[count.index], "preemptible", false) + local_ssd_count = lookup(var.node_pools[count.index], "local_ssd_count", 0) + image_type = lookup(var.node_pools[count.index], "image_type", "COS") + machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2") + } +} + resource "google_container_node_pool" "pools" { {% if beta_cluster %} provider = google-beta @@ -226,7 +243,7 @@ resource "google_container_node_pool" "pools" { provider = google {% endif %} count = length(var.node_pools) - name = var.node_pools[count.index]["name"] + name = lookup(var.node_pools[count.index], "create_before_destroy", false) ? random_id.name.*.hex[count.index] : lookup(var.node_pools[count.index], "name") project = var.project_id location = local.location cluster = google_container_cluster.primary.name @@ -342,6 +359,7 @@ resource "google_container_node_pool" "pools" { lifecycle { ignore_changes = [initial_node_count] + create_before_destroy = lookup(var.node_pools[count.index], "create_before_destroy", null) } timeouts { diff --git a/cluster.tf b/cluster.tf index 0f94ddb0e5..6bb872392c 100644 --- a/cluster.tf +++ b/cluster.tf @@ -124,10 +124,27 @@ resource "google_container_cluster" "primary" { /****************************************** Create Container Cluster node pools *****************************************/ +resource "random_id" "name" { + # if any node_pool definition has a create_before_destroy key, then create random_id names + count = length(compact([for node_pool in var.node_pools : lookup(node_pool, "create_before_destroy", "")])) > 0 ? length(var.node_pools) : 0 + + byte_length = 2 + prefix = format("%s-", lookup(var.node_pools[count.index], "name")) + + keepers = { + disk_size_gb = lookup(var.node_pools[count.index], "disk_size_gb", 100) + disk_type = lookup(var.node_pools[count.index], "disk_type", "pd-standard") + preemptible = lookup(var.node_pools[count.index], "preemptible", false) + local_ssd_count = lookup(var.node_pools[count.index], "local_ssd_count", 0) + image_type = lookup(var.node_pools[count.index], "image_type", "COS") + machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2") + } +} + resource "google_container_node_pool" "pools" { provider = google count = length(var.node_pools) - name = var.node_pools[count.index]["name"] + name = lookup(var.node_pools[count.index], "create_before_destroy", false) ? random_id.name.*.hex[count.index] : lookup(var.node_pools[count.index], "name") project = var.project_id location = local.location cluster = google_container_cluster.primary.name @@ -217,6 +234,7 @@ resource "google_container_node_pool" "pools" { lifecycle { ignore_changes = [initial_node_count] + create_before_destroy = lookup(var.node_pools[count.index], "create_before_destroy", null) } timeouts { diff --git a/modules/beta-private-cluster/cluster.tf b/modules/beta-private-cluster/cluster.tf index 901ae674eb..9487ad7262 100644 --- a/modules/beta-private-cluster/cluster.tf +++ b/modules/beta-private-cluster/cluster.tf @@ -205,10 +205,27 @@ resource "google_container_cluster" "primary" { /****************************************** Create Container Cluster node pools *****************************************/ +resource "random_id" "name" { + # if any node_pool definition has a create_before_destroy key, then create random_id names + count = length(compact([for node_pool in var.node_pools : lookup(node_pool, "create_before_destroy", "")])) > 0 ? length(var.node_pools) : 0 + + byte_length = 2 + prefix = format("%s-", lookup(var.node_pools[count.index], "name")) + + keepers = { + disk_size_gb = lookup(var.node_pools[count.index], "disk_size_gb", 100) + disk_type = lookup(var.node_pools[count.index], "disk_type", "pd-standard") + preemptible = lookup(var.node_pools[count.index], "preemptible", false) + local_ssd_count = lookup(var.node_pools[count.index], "local_ssd_count", 0) + image_type = lookup(var.node_pools[count.index], "image_type", "COS") + machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2") + } +} + resource "google_container_node_pool" "pools" { provider = google-beta count = length(var.node_pools) - name = var.node_pools[count.index]["name"] + name = lookup(var.node_pools[count.index], "create_before_destroy", false) ? random_id.name.*.hex[count.index] : lookup(var.node_pools[count.index], "name") project = var.project_id location = local.location cluster = google_container_cluster.primary.name @@ -318,6 +335,7 @@ resource "google_container_node_pool" "pools" { lifecycle { ignore_changes = [initial_node_count] + create_before_destroy = lookup(var.node_pools[count.index], "create_before_destroy", null) } timeouts { diff --git a/modules/beta-public-cluster/cluster.tf b/modules/beta-public-cluster/cluster.tf index 1c8561344a..172214cb77 100644 --- a/modules/beta-public-cluster/cluster.tf +++ b/modules/beta-public-cluster/cluster.tf @@ -200,10 +200,27 @@ resource "google_container_cluster" "primary" { /****************************************** Create Container Cluster node pools *****************************************/ +resource "random_id" "name" { + # if any node_pool definition has a create_before_destroy key, then create random_id names + count = length(compact([for node_pool in var.node_pools : lookup(node_pool, "create_before_destroy", "")])) > 0 ? length(var.node_pools) : 0 + + byte_length = 2 + prefix = format("%s-", lookup(var.node_pools[count.index], "name")) + + keepers = { + disk_size_gb = lookup(var.node_pools[count.index], "disk_size_gb", 100) + disk_type = lookup(var.node_pools[count.index], "disk_type", "pd-standard") + preemptible = lookup(var.node_pools[count.index], "preemptible", false) + local_ssd_count = lookup(var.node_pools[count.index], "local_ssd_count", 0) + image_type = lookup(var.node_pools[count.index], "image_type", "COS") + machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2") + } +} + resource "google_container_node_pool" "pools" { provider = google-beta count = length(var.node_pools) - name = var.node_pools[count.index]["name"] + name = lookup(var.node_pools[count.index], "create_before_destroy", false) ? random_id.name.*.hex[count.index] : lookup(var.node_pools[count.index], "name") project = var.project_id location = local.location cluster = google_container_cluster.primary.name @@ -313,6 +330,7 @@ resource "google_container_node_pool" "pools" { lifecycle { ignore_changes = [initial_node_count] + create_before_destroy = lookup(var.node_pools[count.index], "create_before_destroy", null) } timeouts { diff --git a/modules/private-cluster/cluster.tf b/modules/private-cluster/cluster.tf index a5bb5da979..2e92c026fb 100644 --- a/modules/private-cluster/cluster.tf +++ b/modules/private-cluster/cluster.tf @@ -129,10 +129,27 @@ resource "google_container_cluster" "primary" { /****************************************** Create Container Cluster node pools *****************************************/ +resource "random_id" "name" { + # if any node_pool definition has a create_before_destroy key, then create random_id names + count = length(compact([for node_pool in var.node_pools : lookup(node_pool, "create_before_destroy", "")])) > 0 ? length(var.node_pools) : 0 + + byte_length = 2 + prefix = format("%s-", lookup(var.node_pools[count.index], "name")) + + keepers = { + disk_size_gb = lookup(var.node_pools[count.index], "disk_size_gb", 100) + disk_type = lookup(var.node_pools[count.index], "disk_type", "pd-standard") + preemptible = lookup(var.node_pools[count.index], "preemptible", false) + local_ssd_count = lookup(var.node_pools[count.index], "local_ssd_count", 0) + image_type = lookup(var.node_pools[count.index], "image_type", "COS") + machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2") + } +} + resource "google_container_node_pool" "pools" { provider = google count = length(var.node_pools) - name = var.node_pools[count.index]["name"] + name = lookup(var.node_pools[count.index], "create_before_destroy", false) ? random_id.name.*.hex[count.index] : lookup(var.node_pools[count.index], "name") project = var.project_id location = local.location cluster = google_container_cluster.primary.name @@ -222,6 +239,7 @@ resource "google_container_node_pool" "pools" { lifecycle { ignore_changes = [initial_node_count] + create_before_destroy = lookup(var.node_pools[count.index], "create_before_destroy", null) } timeouts { From 0832ba048fb922626d43d941d2b0bac3cb9c6b60 Mon Sep 17 00:00:00 2001 From: Aaron Sproul Date: Thu, 5 Sep 2019 09:45:01 -0700 Subject: [PATCH 06/82] simplify count for random_id.name since random_id is a safe op --- autogen/cluster.tf | 4 +--- cluster.tf | 4 +--- modules/beta-private-cluster/cluster.tf | 4 +--- modules/beta-public-cluster/cluster.tf | 4 +--- modules/private-cluster/cluster.tf | 4 +--- 5 files changed, 5 insertions(+), 15 deletions(-) diff --git a/autogen/cluster.tf b/autogen/cluster.tf index 2bea962897..fcf0e79132 100644 --- a/autogen/cluster.tf +++ b/autogen/cluster.tf @@ -220,9 +220,7 @@ resource "google_container_cluster" "primary" { Create Container Cluster node pools *****************************************/ resource "random_id" "name" { - # if any node_pool definition has a create_before_destroy key, then create random_id names - count = length(compact([for node_pool in var.node_pools : lookup(node_pool, "create_before_destroy", "")])) > 0 ? length(var.node_pools) : 0 - + count = length(var.node_pools) byte_length = 2 prefix = format("%s-", lookup(var.node_pools[count.index], "name")) diff --git a/cluster.tf b/cluster.tf index 6bb872392c..2338452945 100644 --- a/cluster.tf +++ b/cluster.tf @@ -125,9 +125,7 @@ resource "google_container_cluster" "primary" { Create Container Cluster node pools *****************************************/ resource "random_id" "name" { - # if any node_pool definition has a create_before_destroy key, then create random_id names - count = length(compact([for node_pool in var.node_pools : lookup(node_pool, "create_before_destroy", "")])) > 0 ? length(var.node_pools) : 0 - + count = length(var.node_pools) byte_length = 2 prefix = format("%s-", lookup(var.node_pools[count.index], "name")) diff --git a/modules/beta-private-cluster/cluster.tf b/modules/beta-private-cluster/cluster.tf index 9487ad7262..87297eefee 100644 --- a/modules/beta-private-cluster/cluster.tf +++ b/modules/beta-private-cluster/cluster.tf @@ -206,9 +206,7 @@ resource "google_container_cluster" "primary" { Create Container Cluster node pools *****************************************/ resource "random_id" "name" { - # if any node_pool definition has a create_before_destroy key, then create random_id names - count = length(compact([for node_pool in var.node_pools : lookup(node_pool, "create_before_destroy", "")])) > 0 ? length(var.node_pools) : 0 - + count = length(var.node_pools) byte_length = 2 prefix = format("%s-", lookup(var.node_pools[count.index], "name")) diff --git a/modules/beta-public-cluster/cluster.tf b/modules/beta-public-cluster/cluster.tf index 172214cb77..bfbbd44356 100644 --- a/modules/beta-public-cluster/cluster.tf +++ b/modules/beta-public-cluster/cluster.tf @@ -201,9 +201,7 @@ resource "google_container_cluster" "primary" { Create Container Cluster node pools *****************************************/ resource "random_id" "name" { - # if any node_pool definition has a create_before_destroy key, then create random_id names - count = length(compact([for node_pool in var.node_pools : lookup(node_pool, "create_before_destroy", "")])) > 0 ? length(var.node_pools) : 0 - + count = length(var.node_pools) byte_length = 2 prefix = format("%s-", lookup(var.node_pools[count.index], "name")) diff --git a/modules/private-cluster/cluster.tf b/modules/private-cluster/cluster.tf index 2e92c026fb..a91fa1051e 100644 --- a/modules/private-cluster/cluster.tf +++ b/modules/private-cluster/cluster.tf @@ -130,9 +130,7 @@ resource "google_container_cluster" "primary" { Create Container Cluster node pools *****************************************/ resource "random_id" "name" { - # if any node_pool definition has a create_before_destroy key, then create random_id names - count = length(compact([for node_pool in var.node_pools : lookup(node_pool, "create_before_destroy", "")])) > 0 ? length(var.node_pools) : 0 - + count = length(var.node_pools) byte_length = 2 prefix = format("%s-", lookup(var.node_pools[count.index], "name")) From ecfb001bda98841cfc80fd64d6b16076d9df38ad Mon Sep 17 00:00:00 2001 From: Aaron Sproul Date: Thu, 5 Sep 2019 16:50:52 -0700 Subject: [PATCH 07/82] Track all resources in keepers which are specified by ForceNew in node config schema --- autogen/cluster.tf | 55 ++++++++++++++++++++---- cluster.tf | 57 ++++++++++++++++++++----- modules/beta-private-cluster/cluster.tf | 57 ++++++++++++++++++++----- modules/beta-public-cluster/cluster.tf | 57 ++++++++++++++++++++----- modules/private-cluster/cluster.tf | 57 ++++++++++++++++++++----- 5 files changed, 234 insertions(+), 49 deletions(-) diff --git a/autogen/cluster.tf b/autogen/cluster.tf index fcf0e79132..6b8c66dfa2 100644 --- a/autogen/cluster.tf +++ b/autogen/cluster.tf @@ -219,19 +219,56 @@ resource "google_container_cluster" "primary" { /****************************************** Create Container Cluster node pools *****************************************/ +locals { + force_node_pool_recreation_resources = [ + "disk_size_gb", + "disk_type", + "accelerator_count", + "accelerator_type", + "local_ssd_count", + "machine_type", + "preemptible", + "service_account", + ] +} + resource "random_id" "name" { count = length(var.node_pools) byte_length = 2 prefix = format("%s-", lookup(var.node_pools[count.index], "name")) - - keepers = { - disk_size_gb = lookup(var.node_pools[count.index], "disk_size_gb", 100) - disk_type = lookup(var.node_pools[count.index], "disk_type", "pd-standard") - preemptible = lookup(var.node_pools[count.index], "preemptible", false) - local_ssd_count = lookup(var.node_pools[count.index], "local_ssd_count", 0) - image_type = lookup(var.node_pools[count.index], "image_type", "COS") - machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2") - } + keepers = merge( + zipmap( + local.force_node_pool_recreation_resources, + [for keeper in local.force_node_pool_recreation_resources : lookup(var.node_pools[count.index], keeper, "")] + ), + { + labels_all = join(",", keys(var.node_pools_labels["all"]), values(var.node_pools_labels["all"])) + }, + { + labels_node_pool = join(",", keys(var.node_pools_labels[var.node_pools[count.index]["name"]]), values(var.node_pools_labels[var.node_pools[count.index]["name"]])) + }, + { + metadata_all = join(",", keys(var.node_pools_metadata["all"]), values(var.node_pools_metadata["all"])) + }, + { + metadata_node_pool = join(",", + keys(var.node_pools_metadata[var.node_pools[count.index]["name"]]), + values(var.node_pools_metadata[var.node_pools[count.index]["name"]]) + ) + }, + { + oauth_scopes_all = join(",", var.node_pools_oauth_scopes["all"]) + }, + { + oauth_scopes_node_pool = join(",", var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]]) + }, + { + tags_all = join(",", var.node_pools_tags["all"]) + }, + { + tags_node_pool = join(",", var.node_pools_tags[var.node_pools[count.index]["name"]]) + } + ) } resource "google_container_node_pool" "pools" { diff --git a/cluster.tf b/cluster.tf index 2338452945..b09af57f55 100644 --- a/cluster.tf +++ b/cluster.tf @@ -124,19 +124,56 @@ resource "google_container_cluster" "primary" { /****************************************** Create Container Cluster node pools *****************************************/ +locals { + force_node_pool_recreation_resources = [ + "disk_size_gb", + "disk_type", + "accelerator_count", + "accelerator_type", + "local_ssd_count", + "machine_type", + "preemptible", + "service_account", + ] +} + resource "random_id" "name" { count = length(var.node_pools) byte_length = 2 prefix = format("%s-", lookup(var.node_pools[count.index], "name")) - - keepers = { - disk_size_gb = lookup(var.node_pools[count.index], "disk_size_gb", 100) - disk_type = lookup(var.node_pools[count.index], "disk_type", "pd-standard") - preemptible = lookup(var.node_pools[count.index], "preemptible", false) - local_ssd_count = lookup(var.node_pools[count.index], "local_ssd_count", 0) - image_type = lookup(var.node_pools[count.index], "image_type", "COS") - machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2") - } + keepers = merge( + zipmap( + local.force_node_pool_recreation_resources, + [for keeper in local.force_node_pool_recreation_resources : lookup(var.node_pools[count.index], keeper, "")] + ), + { + labels_all = join(",", keys(var.node_pools_labels["all"]), values(var.node_pools_labels["all"])) + }, + { + labels_node_pool = join(",", keys(var.node_pools_labels[var.node_pools[count.index]["name"]]), values(var.node_pools_labels[var.node_pools[count.index]["name"]])) + }, + { + metadata_all = join(",", keys(var.node_pools_metadata["all"]), values(var.node_pools_metadata["all"])) + }, + { + metadata_node_pool = join(",", + keys(var.node_pools_metadata[var.node_pools[count.index]["name"]]), + values(var.node_pools_metadata[var.node_pools[count.index]["name"]]) + ) + }, + { + oauth_scopes_all = join(",", var.node_pools_oauth_scopes["all"]) + }, + { + oauth_scopes_node_pool = join(",", var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]]) + }, + { + tags_all = join(",", var.node_pools_tags["all"]) + }, + { + tags_node_pool = join(",", var.node_pools_tags[var.node_pools[count.index]["name"]]) + } + ) } resource "google_container_node_pool" "pools" { @@ -231,7 +268,7 @@ resource "google_container_node_pool" "pools" { } lifecycle { - ignore_changes = [initial_node_count] + ignore_changes = [initial_node_count] create_before_destroy = lookup(var.node_pools[count.index], "create_before_destroy", null) } diff --git a/modules/beta-private-cluster/cluster.tf b/modules/beta-private-cluster/cluster.tf index 87297eefee..dd27f7f5cf 100644 --- a/modules/beta-private-cluster/cluster.tf +++ b/modules/beta-private-cluster/cluster.tf @@ -205,19 +205,56 @@ resource "google_container_cluster" "primary" { /****************************************** Create Container Cluster node pools *****************************************/ +locals { + force_node_pool_recreation_resources = [ + "disk_size_gb", + "disk_type", + "accelerator_count", + "accelerator_type", + "local_ssd_count", + "machine_type", + "preemptible", + "service_account", + ] +} + resource "random_id" "name" { count = length(var.node_pools) byte_length = 2 prefix = format("%s-", lookup(var.node_pools[count.index], "name")) - - keepers = { - disk_size_gb = lookup(var.node_pools[count.index], "disk_size_gb", 100) - disk_type = lookup(var.node_pools[count.index], "disk_type", "pd-standard") - preemptible = lookup(var.node_pools[count.index], "preemptible", false) - local_ssd_count = lookup(var.node_pools[count.index], "local_ssd_count", 0) - image_type = lookup(var.node_pools[count.index], "image_type", "COS") - machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2") - } + keepers = merge( + zipmap( + local.force_node_pool_recreation_resources, + [for keeper in local.force_node_pool_recreation_resources : lookup(var.node_pools[count.index], keeper, "")] + ), + { + labels_all = join(",", keys(var.node_pools_labels["all"]), values(var.node_pools_labels["all"])) + }, + { + labels_node_pool = join(",", keys(var.node_pools_labels[var.node_pools[count.index]["name"]]), values(var.node_pools_labels[var.node_pools[count.index]["name"]])) + }, + { + metadata_all = join(",", keys(var.node_pools_metadata["all"]), values(var.node_pools_metadata["all"])) + }, + { + metadata_node_pool = join(",", + keys(var.node_pools_metadata[var.node_pools[count.index]["name"]]), + values(var.node_pools_metadata[var.node_pools[count.index]["name"]]) + ) + }, + { + oauth_scopes_all = join(",", var.node_pools_oauth_scopes["all"]) + }, + { + oauth_scopes_node_pool = join(",", var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]]) + }, + { + tags_all = join(",", var.node_pools_tags["all"]) + }, + { + tags_node_pool = join(",", var.node_pools_tags[var.node_pools[count.index]["name"]]) + } + ) } resource "google_container_node_pool" "pools" { @@ -332,7 +369,7 @@ resource "google_container_node_pool" "pools" { } lifecycle { - ignore_changes = [initial_node_count] + ignore_changes = [initial_node_count] create_before_destroy = lookup(var.node_pools[count.index], "create_before_destroy", null) } diff --git a/modules/beta-public-cluster/cluster.tf b/modules/beta-public-cluster/cluster.tf index bfbbd44356..bb239d81a3 100644 --- a/modules/beta-public-cluster/cluster.tf +++ b/modules/beta-public-cluster/cluster.tf @@ -200,19 +200,56 @@ resource "google_container_cluster" "primary" { /****************************************** Create Container Cluster node pools *****************************************/ +locals { + force_node_pool_recreation_resources = [ + "disk_size_gb", + "disk_type", + "accelerator_count", + "accelerator_type", + "local_ssd_count", + "machine_type", + "preemptible", + "service_account", + ] +} + resource "random_id" "name" { count = length(var.node_pools) byte_length = 2 prefix = format("%s-", lookup(var.node_pools[count.index], "name")) - - keepers = { - disk_size_gb = lookup(var.node_pools[count.index], "disk_size_gb", 100) - disk_type = lookup(var.node_pools[count.index], "disk_type", "pd-standard") - preemptible = lookup(var.node_pools[count.index], "preemptible", false) - local_ssd_count = lookup(var.node_pools[count.index], "local_ssd_count", 0) - image_type = lookup(var.node_pools[count.index], "image_type", "COS") - machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2") - } + keepers = merge( + zipmap( + local.force_node_pool_recreation_resources, + [for keeper in local.force_node_pool_recreation_resources : lookup(var.node_pools[count.index], keeper, "")] + ), + { + labels_all = join(",", keys(var.node_pools_labels["all"]), values(var.node_pools_labels["all"])) + }, + { + labels_node_pool = join(",", keys(var.node_pools_labels[var.node_pools[count.index]["name"]]), values(var.node_pools_labels[var.node_pools[count.index]["name"]])) + }, + { + metadata_all = join(",", keys(var.node_pools_metadata["all"]), values(var.node_pools_metadata["all"])) + }, + { + metadata_node_pool = join(",", + keys(var.node_pools_metadata[var.node_pools[count.index]["name"]]), + values(var.node_pools_metadata[var.node_pools[count.index]["name"]]) + ) + }, + { + oauth_scopes_all = join(",", var.node_pools_oauth_scopes["all"]) + }, + { + oauth_scopes_node_pool = join(",", var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]]) + }, + { + tags_all = join(",", var.node_pools_tags["all"]) + }, + { + tags_node_pool = join(",", var.node_pools_tags[var.node_pools[count.index]["name"]]) + } + ) } resource "google_container_node_pool" "pools" { @@ -327,7 +364,7 @@ resource "google_container_node_pool" "pools" { } lifecycle { - ignore_changes = [initial_node_count] + ignore_changes = [initial_node_count] create_before_destroy = lookup(var.node_pools[count.index], "create_before_destroy", null) } diff --git a/modules/private-cluster/cluster.tf b/modules/private-cluster/cluster.tf index a91fa1051e..764b9b85ba 100644 --- a/modules/private-cluster/cluster.tf +++ b/modules/private-cluster/cluster.tf @@ -129,19 +129,56 @@ resource "google_container_cluster" "primary" { /****************************************** Create Container Cluster node pools *****************************************/ +locals { + force_node_pool_recreation_resources = [ + "disk_size_gb", + "disk_type", + "accelerator_count", + "accelerator_type", + "local_ssd_count", + "machine_type", + "preemptible", + "service_account", + ] +} + resource "random_id" "name" { count = length(var.node_pools) byte_length = 2 prefix = format("%s-", lookup(var.node_pools[count.index], "name")) - - keepers = { - disk_size_gb = lookup(var.node_pools[count.index], "disk_size_gb", 100) - disk_type = lookup(var.node_pools[count.index], "disk_type", "pd-standard") - preemptible = lookup(var.node_pools[count.index], "preemptible", false) - local_ssd_count = lookup(var.node_pools[count.index], "local_ssd_count", 0) - image_type = lookup(var.node_pools[count.index], "image_type", "COS") - machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2") - } + keepers = merge( + zipmap( + local.force_node_pool_recreation_resources, + [for keeper in local.force_node_pool_recreation_resources : lookup(var.node_pools[count.index], keeper, "")] + ), + { + labels_all = join(",", keys(var.node_pools_labels["all"]), values(var.node_pools_labels["all"])) + }, + { + labels_node_pool = join(",", keys(var.node_pools_labels[var.node_pools[count.index]["name"]]), values(var.node_pools_labels[var.node_pools[count.index]["name"]])) + }, + { + metadata_all = join(",", keys(var.node_pools_metadata["all"]), values(var.node_pools_metadata["all"])) + }, + { + metadata_node_pool = join(",", + keys(var.node_pools_metadata[var.node_pools[count.index]["name"]]), + values(var.node_pools_metadata[var.node_pools[count.index]["name"]]) + ) + }, + { + oauth_scopes_all = join(",", var.node_pools_oauth_scopes["all"]) + }, + { + oauth_scopes_node_pool = join(",", var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]]) + }, + { + tags_all = join(",", var.node_pools_tags["all"]) + }, + { + tags_node_pool = join(",", var.node_pools_tags[var.node_pools[count.index]["name"]]) + } + ) } resource "google_container_node_pool" "pools" { @@ -236,7 +273,7 @@ resource "google_container_node_pool" "pools" { } lifecycle { - ignore_changes = [initial_node_count] + ignore_changes = [initial_node_count] create_before_destroy = lookup(var.node_pools[count.index], "create_before_destroy", null) } From 5ea28b5af9128839bc5691b33d720d501931e473 Mon Sep 17 00:00:00 2001 From: Aaron Sproul Date: Fri, 6 Sep 2019 11:41:36 -0700 Subject: [PATCH 08/82] templatize lifecycle block since interpolations are not allowed --- README.md | 1 + autogen/cluster.tf | 17 ++- autogen/dns.tf | 12 +- autogen/main.tf | 4 +- autogen/masq.tf | 3 +- autogen/outputs.tf | 3 +- autogen/variables.tf | 6 + cluster.tf | 114 +++++++++++++++++- dns.tf | 12 +- main.tf | 4 +- masq.tf | 3 +- modules/beta-private-cluster/README.md | 1 + modules/beta-private-cluster/cluster.tf | 134 +++++++++++++++++++++- modules/beta-private-cluster/dns.tf | 12 +- modules/beta-private-cluster/main.tf | 4 +- modules/beta-private-cluster/masq.tf | 3 +- modules/beta-private-cluster/outputs.tf | 3 +- modules/beta-private-cluster/variables.tf | 6 + modules/beta-public-cluster/README.md | 1 + modules/beta-public-cluster/cluster.tf | 134 +++++++++++++++++++++- modules/beta-public-cluster/dns.tf | 12 +- modules/beta-public-cluster/main.tf | 4 +- modules/beta-public-cluster/masq.tf | 3 +- modules/beta-public-cluster/outputs.tf | 3 +- modules/beta-public-cluster/variables.tf | 6 + modules/private-cluster/README.md | 1 + modules/private-cluster/cluster.tf | 114 +++++++++++++++++- modules/private-cluster/dns.tf | 12 +- modules/private-cluster/main.tf | 4 +- modules/private-cluster/masq.tf | 3 +- modules/private-cluster/outputs.tf | 3 +- modules/private-cluster/variables.tf | 6 + outputs.tf | 3 +- variables.tf | 6 + 34 files changed, 592 insertions(+), 65 deletions(-) diff --git a/README.md b/README.md index 923d3f7a09..40b3f02e86 100644 --- a/README.md +++ b/README.md @@ -158,6 +158,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | network\_policy\_provider | The network policy provider. | string | `"CALICO"` | no | | network\_project\_id | The project ID of the shared VPC's host (for shared vpc support) | string | `""` | no | | node\_pools | List of maps containing node pools | list(map(string)) | `` | no | +| node\_pools\_create\_before\_destroy | Create a new node pool, then destroy the old node pool. Default behavior is destroy node pool then recreate it | bool | `"false"` | no | | node\_pools\_labels | Map of maps containing node labels by node-pool name | map(map(string)) | `` | no | | node\_pools\_metadata | Map of maps containing node metadata by node-pool name | map(map(string)) | `` | no | | node\_pools\_oauth\_scopes | Map of lists containing node oauth scopes by node-pool name | map(list(string)) | `` | no | diff --git a/autogen/cluster.tf b/autogen/cluster.tf index 6b8c66dfa2..a9599f19dc 100644 --- a/autogen/cluster.tf +++ b/autogen/cluster.tf @@ -271,14 +271,19 @@ resource "random_id" "name" { ) } -resource "google_container_node_pool" "pools" { +{% for create_before_destroy_value in ["true", "false"] %} +resource "google_container_node_pool" "pools{{ loop.index0 }}" { {% if beta_cluster %} provider = google-beta {% else %} provider = google {% endif %} - count = length(var.node_pools) - name = lookup(var.node_pools[count.index], "create_before_destroy", false) ? random_id.name.*.hex[count.index] : lookup(var.node_pools[count.index], "name") + {% if create_before_destroy_value == "true" %} + count = var.node_pools_create_before_destroy ? length(var.node_pools) : 0 + {% else %} + count = var.node_pools_create_before_destroy ? 0 : length(var.node_pools) + {% endif %} + name = var.node_pools_create_before_destroy ? random_id.name.*.hex[count.index] : lookup(var.node_pools[count.index], "name") project = var.project_id location = local.location cluster = google_container_cluster.primary.name @@ -394,7 +399,7 @@ resource "google_container_node_pool" "pools" { lifecycle { ignore_changes = [initial_node_count] - create_before_destroy = lookup(var.node_pools[count.index], "create_before_destroy", null) + create_before_destroy = {{ create_before_destroy_value }} } timeouts { @@ -404,6 +409,7 @@ resource "google_container_node_pool" "pools" { } } +{% endfor %} resource "null_resource" "wait_for_cluster" { provisioner "local-exec" { @@ -417,6 +423,7 @@ resource "null_resource" "wait_for_cluster" { depends_on = [ google_container_cluster.primary, - google_container_node_pool.pools, + google_container_node_pool.pools0, + google_container_node_pool.pools1, ] } diff --git a/autogen/dns.tf b/autogen/dns.tf index d9d4a35395..92a19d1a7d 100644 --- a/autogen/dns.tf +++ b/autogen/dns.tf @@ -29,7 +29,8 @@ resource "null_resource" "delete_default_kube_dns_configmap" { depends_on = [ data.google_client_config.default, google_container_cluster.primary, - google_container_node_pool.pools, + google_container_node_pool.pools0, + google_container_node_pool.pools1, ] } @@ -58,7 +59,8 @@ EOF null_resource.delete_default_kube_dns_configmap, data.google_client_config.default, google_container_cluster.primary, - google_container_node_pool.pools, + google_container_node_pool.pools0, + google_container_node_pool.pools1, ] } @@ -85,7 +87,8 @@ EOF null_resource.delete_default_kube_dns_configmap, data.google_client_config.default, google_container_cluster.primary, - google_container_node_pool.pools, + google_container_node_pool.pools0, + google_container_node_pool.pools1, ] } @@ -115,6 +118,7 @@ EOF null_resource.delete_default_kube_dns_configmap, data.google_client_config.default, google_container_cluster.primary, - google_container_node_pool.pools, + google_container_node_pool.pools0, + google_container_node_pool.pools1, ] } diff --git a/autogen/main.tf b/autogen/main.tf index 0f5b0edc06..45d0d2a3f0 100644 --- a/autogen/main.tf +++ b/autogen/main.tf @@ -112,8 +112,8 @@ locals { # /BETA features {% endif %} - cluster_output_node_pools_names = concat(google_container_node_pool.pools.*.name, [""]) - cluster_output_node_pools_versions = concat(google_container_node_pool.pools.*.version, [""]) + cluster_output_node_pools_names = concat(google_container_node_pool.pools0.*.name, google_container_node_pool.pools1.*.name, [""]) + cluster_output_node_pools_versions = concat(google_container_node_pool.pools0.*.version, google_container_node_pool.pools1.*.version, [""]) cluster_master_auth_list_layer1 = local.cluster_output_master_auth cluster_master_auth_list_layer2 = local.cluster_master_auth_list_layer1[0] diff --git a/autogen/masq.tf b/autogen/masq.tf index 6deab757c3..a73a7cbfea 100644 --- a/autogen/masq.tf +++ b/autogen/masq.tf @@ -43,6 +43,7 @@ EOF depends_on = [ data.google_client_config.default, google_container_cluster.primary, - google_container_node_pool.pools, + google_container_node_pool.pools0, + google_container_node_pool.pools1, ] } diff --git a/autogen/outputs.tf b/autogen/outputs.tf index ff8eab1bef..f5240baa46 100644 --- a/autogen/outputs.tf +++ b/autogen/outputs.tf @@ -53,7 +53,8 @@ output "endpoint" { * to be up. */ google_container_cluster.primary, - google_container_node_pool.pools, + google_container_node_pool.pools0, + google_container_node_pool.pools1, ] } diff --git a/autogen/variables.tf b/autogen/variables.tf index 16c2955ced..2877bfbd4c 100644 --- a/autogen/variables.tf +++ b/autogen/variables.tf @@ -210,6 +210,12 @@ variable "node_pools_oauth_scopes" { } } +variable "node_pools_create_before_destroy" { + type = bool + description = "Create a new node pool, then destroy the old node pool. Default behavior is destroy node pool then recreate it" + default = false +} + variable "stub_domains" { type = map(list(string)) description = "Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server" diff --git a/cluster.tf b/cluster.tf index b09af57f55..da08050943 100644 --- a/cluster.tf +++ b/cluster.tf @@ -176,10 +176,10 @@ resource "random_id" "name" { ) } -resource "google_container_node_pool" "pools" { +resource "google_container_node_pool" "pools0" { provider = google - count = length(var.node_pools) - name = lookup(var.node_pools[count.index], "create_before_destroy", false) ? random_id.name.*.hex[count.index] : lookup(var.node_pools[count.index], "name") + count = var.node_pools_create_before_destroy ? length(var.node_pools) : 0 + name = var.node_pools_create_before_destroy ? random_id.name.*.hex[count.index] : lookup(var.node_pools[count.index], "name") project = var.project_id location = local.location cluster = google_container_cluster.primary.name @@ -269,7 +269,110 @@ resource "google_container_node_pool" "pools" { lifecycle { ignore_changes = [initial_node_count] - create_before_destroy = lookup(var.node_pools[count.index], "create_before_destroy", null) + create_before_destroy = true + } + + timeouts { + create = "30m" + update = "30m" + delete = "30m" + } +} + +resource "google_container_node_pool" "pools1" { + provider = google + count = var.node_pools_create_before_destroy ? 0 : length(var.node_pools) + name = var.node_pools_create_before_destroy ? random_id.name.*.hex[count.index] : lookup(var.node_pools[count.index], "name") + project = var.project_id + location = local.location + cluster = google_container_cluster.primary.name + version = lookup(var.node_pools[count.index], "auto_upgrade", false) ? "" : lookup( + var.node_pools[count.index], + "version", + local.node_version, + ) + initial_node_count = lookup( + var.node_pools[count.index], + "initial_node_count", + lookup(var.node_pools[count.index], "min_count", 1), + ) + + node_count = lookup(var.node_pools[count.index], "autoscaling", true) ? null : lookup(var.node_pools[count.index], "min_count", 1) + + dynamic "autoscaling" { + for_each = lookup(var.node_pools[count.index], "autoscaling", true) ? [var.node_pools[count.index]] : [] + content { + min_node_count = lookup(autoscaling.value, "min_count", 1) + max_node_count = lookup(autoscaling.value, "max_count", 100) + } + } + + management { + auto_repair = lookup(var.node_pools[count.index], "auto_repair", true) + auto_upgrade = lookup(var.node_pools[count.index], "auto_upgrade", local.default_auto_upgrade) + } + + node_config { + image_type = lookup(var.node_pools[count.index], "image_type", "COS") + machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2") + labels = merge( + { + "cluster_name" = var.name + }, + { + "node_pool" = var.node_pools[count.index]["name"] + }, + var.node_pools_labels["all"], + var.node_pools_labels[var.node_pools[count.index]["name"]], + ) + metadata = merge( + { + "cluster_name" = var.name + }, + { + "node_pool" = var.node_pools[count.index]["name"] + }, + var.node_pools_metadata["all"], + var.node_pools_metadata[var.node_pools[count.index]["name"]], + { + "disable-legacy-endpoints" = var.disable_legacy_metadata_endpoints + }, + ) + tags = concat( + ["gke-${var.name}"], + ["gke-${var.name}-${var.node_pools[count.index]["name"]}"], + var.node_pools_tags["all"], + var.node_pools_tags[var.node_pools[count.index]["name"]], + ) + + disk_size_gb = lookup(var.node_pools[count.index], "disk_size_gb", 100) + disk_type = lookup(var.node_pools[count.index], "disk_type", "pd-standard") + service_account = lookup( + var.node_pools[count.index], + "service_account", + local.service_account, + ) + preemptible = lookup(var.node_pools[count.index], "preemptible", false) + + oauth_scopes = concat( + var.node_pools_oauth_scopes["all"], + var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]], + ) + + guest_accelerator = [ + for guest_accelerator in lookup(var.node_pools[count.index], "accelerator_count", 0) > 0 ? [{ + type = lookup(var.node_pools[count.index], "accelerator_type", "") + count = lookup(var.node_pools[count.index], "accelerator_count", 0) + }] : [] : { + type = guest_accelerator["type"] + count = guest_accelerator["count"] + } + ] + } + + lifecycle { + ignore_changes = [initial_node_count] + create_before_destroy = false } timeouts { @@ -292,6 +395,7 @@ resource "null_resource" "wait_for_cluster" { depends_on = [ google_container_cluster.primary, - google_container_node_pool.pools, + google_container_node_pool.pools0, + google_container_node_pool.pools1, ] } diff --git a/dns.tf b/dns.tf index b240a23e65..089b7fb99c 100644 --- a/dns.tf +++ b/dns.tf @@ -29,7 +29,8 @@ resource "null_resource" "delete_default_kube_dns_configmap" { depends_on = [ data.google_client_config.default, google_container_cluster.primary, - google_container_node_pool.pools, + google_container_node_pool.pools0, + google_container_node_pool.pools1, ] } @@ -58,7 +59,8 @@ EOF null_resource.delete_default_kube_dns_configmap, data.google_client_config.default, google_container_cluster.primary, - google_container_node_pool.pools, + google_container_node_pool.pools0, + google_container_node_pool.pools1, ] } @@ -85,7 +87,8 @@ EOF null_resource.delete_default_kube_dns_configmap, data.google_client_config.default, google_container_cluster.primary, - google_container_node_pool.pools, + google_container_node_pool.pools0, + google_container_node_pool.pools1, ] } @@ -115,6 +118,7 @@ EOF null_resource.delete_default_kube_dns_configmap, data.google_client_config.default, google_container_cluster.primary, - google_container_node_pool.pools, + google_container_node_pool.pools0, + google_container_node_pool.pools1, ] } diff --git a/main.tf b/main.tf index b63d60f884..6a98b9fdae 100644 --- a/main.tf +++ b/main.tf @@ -81,8 +81,8 @@ locals { cluster_output_kubernetes_dashboard_enabled = google_container_cluster.primary.addons_config.0.kubernetes_dashboard.0.disabled - cluster_output_node_pools_names = concat(google_container_node_pool.pools.*.name, [""]) - cluster_output_node_pools_versions = concat(google_container_node_pool.pools.*.version, [""]) + cluster_output_node_pools_names = concat(google_container_node_pool.pools0.*.name, google_container_node_pool.pools1.*.name, [""]) + cluster_output_node_pools_versions = concat(google_container_node_pool.pools0.*.version, google_container_node_pool.pools1.*.version, [""]) cluster_master_auth_list_layer1 = local.cluster_output_master_auth cluster_master_auth_list_layer2 = local.cluster_master_auth_list_layer1[0] diff --git a/masq.tf b/masq.tf index b6e411fc42..b5c40bef4f 100644 --- a/masq.tf +++ b/masq.tf @@ -43,6 +43,7 @@ EOF depends_on = [ data.google_client_config.default, google_container_cluster.primary, - google_container_node_pool.pools, + google_container_node_pool.pools0, + google_container_node_pool.pools1, ] } diff --git a/modules/beta-private-cluster/README.md b/modules/beta-private-cluster/README.md index dbc69c4d14..e8bb4c72a3 100644 --- a/modules/beta-private-cluster/README.md +++ b/modules/beta-private-cluster/README.md @@ -179,6 +179,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | network\_project\_id | The project ID of the shared VPC's host (for shared vpc support) | string | `""` | no | | node\_metadata | Specifies how node metadata is exposed to the workload running on the node | string | `"UNSPECIFIED"` | no | | node\_pools | List of maps containing node pools | list(map(string)) | `` | no | +| node\_pools\_create\_before\_destroy | Create a new node pool, then destroy the old node pool. Default behavior is destroy node pool then recreate it | bool | `"false"` | no | | node\_pools\_labels | Map of maps containing node labels by node-pool name | map(map(string)) | `` | no | | node\_pools\_metadata | Map of maps containing node metadata by node-pool name | map(map(string)) | `` | no | | node\_pools\_oauth\_scopes | Map of lists containing node oauth scopes by node-pool name | map(list(string)) | `` | no | diff --git a/modules/beta-private-cluster/cluster.tf b/modules/beta-private-cluster/cluster.tf index dd27f7f5cf..22794bcc7f 100644 --- a/modules/beta-private-cluster/cluster.tf +++ b/modules/beta-private-cluster/cluster.tf @@ -257,10 +257,10 @@ resource "random_id" "name" { ) } -resource "google_container_node_pool" "pools" { +resource "google_container_node_pool" "pools0" { provider = google-beta - count = length(var.node_pools) - name = lookup(var.node_pools[count.index], "create_before_destroy", false) ? random_id.name.*.hex[count.index] : lookup(var.node_pools[count.index], "name") + count = var.node_pools_create_before_destroy ? length(var.node_pools) : 0 + name = var.node_pools_create_before_destroy ? random_id.name.*.hex[count.index] : lookup(var.node_pools[count.index], "name") project = var.project_id location = local.location cluster = google_container_cluster.primary.name @@ -370,7 +370,130 @@ resource "google_container_node_pool" "pools" { lifecycle { ignore_changes = [initial_node_count] - create_before_destroy = lookup(var.node_pools[count.index], "create_before_destroy", null) + create_before_destroy = true + } + + timeouts { + create = "30m" + update = "30m" + delete = "30m" + } +} + +resource "google_container_node_pool" "pools1" { + provider = google-beta + count = var.node_pools_create_before_destroy ? 0 : length(var.node_pools) + name = var.node_pools_create_before_destroy ? random_id.name.*.hex[count.index] : lookup(var.node_pools[count.index], "name") + project = var.project_id + location = local.location + cluster = google_container_cluster.primary.name + version = lookup(var.node_pools[count.index], "auto_upgrade", false) ? "" : lookup( + var.node_pools[count.index], + "version", + local.node_version, + ) + initial_node_count = lookup( + var.node_pools[count.index], + "initial_node_count", + lookup(var.node_pools[count.index], "min_count", 1), + ) + max_pods_per_node = lookup(var.node_pools[count.index], "max_pods_per_node", null) + + node_count = lookup(var.node_pools[count.index], "autoscaling", true) ? null : lookup(var.node_pools[count.index], "min_count", 1) + + dynamic "autoscaling" { + for_each = lookup(var.node_pools[count.index], "autoscaling", true) ? [var.node_pools[count.index]] : [] + content { + min_node_count = lookup(autoscaling.value, "min_count", 1) + max_node_count = lookup(autoscaling.value, "max_count", 100) + } + } + + management { + auto_repair = lookup(var.node_pools[count.index], "auto_repair", true) + auto_upgrade = lookup(var.node_pools[count.index], "auto_upgrade", local.default_auto_upgrade) + } + + node_config { + image_type = lookup(var.node_pools[count.index], "image_type", "COS") + machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2") + labels = merge( + { + "cluster_name" = var.name + }, + { + "node_pool" = var.node_pools[count.index]["name"] + }, + var.node_pools_labels["all"], + var.node_pools_labels[var.node_pools[count.index]["name"]], + ) + metadata = merge( + { + "cluster_name" = var.name + }, + { + "node_pool" = var.node_pools[count.index]["name"] + }, + var.node_pools_metadata["all"], + var.node_pools_metadata[var.node_pools[count.index]["name"]], + { + "disable-legacy-endpoints" = var.disable_legacy_metadata_endpoints + }, + ) + dynamic "taint" { + for_each = concat( + var.node_pools_taints["all"], + var.node_pools_taints[var.node_pools[count.index]["name"]], + ) + content { + effect = taint.value.effect + key = taint.value.key + value = taint.value.value + } + } + tags = concat( + ["gke-${var.name}"], + ["gke-${var.name}-${var.node_pools[count.index]["name"]}"], + var.node_pools_tags["all"], + var.node_pools_tags[var.node_pools[count.index]["name"]], + ) + + disk_size_gb = lookup(var.node_pools[count.index], "disk_size_gb", 100) + disk_type = lookup(var.node_pools[count.index], "disk_type", "pd-standard") + service_account = lookup( + var.node_pools[count.index], + "service_account", + local.service_account, + ) + preemptible = lookup(var.node_pools[count.index], "preemptible", false) + + oauth_scopes = concat( + var.node_pools_oauth_scopes["all"], + var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]], + ) + + guest_accelerator = [ + for guest_accelerator in lookup(var.node_pools[count.index], "accelerator_count", 0) > 0 ? [{ + type = lookup(var.node_pools[count.index], "accelerator_type", "") + count = lookup(var.node_pools[count.index], "accelerator_count", 0) + }] : [] : { + type = guest_accelerator["type"] + count = guest_accelerator["count"] + } + ] + + dynamic "workload_metadata_config" { + for_each = local.cluster_node_metadata_config + + content { + node_metadata = workload_metadata_config.value.node_metadata + } + } + } + + lifecycle { + ignore_changes = [initial_node_count] + create_before_destroy = false } timeouts { @@ -393,6 +516,7 @@ resource "null_resource" "wait_for_cluster" { depends_on = [ google_container_cluster.primary, - google_container_node_pool.pools, + google_container_node_pool.pools0, + google_container_node_pool.pools1, ] } diff --git a/modules/beta-private-cluster/dns.tf b/modules/beta-private-cluster/dns.tf index b240a23e65..089b7fb99c 100644 --- a/modules/beta-private-cluster/dns.tf +++ b/modules/beta-private-cluster/dns.tf @@ -29,7 +29,8 @@ resource "null_resource" "delete_default_kube_dns_configmap" { depends_on = [ data.google_client_config.default, google_container_cluster.primary, - google_container_node_pool.pools, + google_container_node_pool.pools0, + google_container_node_pool.pools1, ] } @@ -58,7 +59,8 @@ EOF null_resource.delete_default_kube_dns_configmap, data.google_client_config.default, google_container_cluster.primary, - google_container_node_pool.pools, + google_container_node_pool.pools0, + google_container_node_pool.pools1, ] } @@ -85,7 +87,8 @@ EOF null_resource.delete_default_kube_dns_configmap, data.google_client_config.default, google_container_cluster.primary, - google_container_node_pool.pools, + google_container_node_pool.pools0, + google_container_node_pool.pools1, ] } @@ -115,6 +118,7 @@ EOF null_resource.delete_default_kube_dns_configmap, data.google_client_config.default, google_container_cluster.primary, - google_container_node_pool.pools, + google_container_node_pool.pools0, + google_container_node_pool.pools1, ] } diff --git a/modules/beta-private-cluster/main.tf b/modules/beta-private-cluster/main.tf index 1199f2dfa6..db40ea6147 100644 --- a/modules/beta-private-cluster/main.tf +++ b/modules/beta-private-cluster/main.tf @@ -100,8 +100,8 @@ locals { # /BETA features - cluster_output_node_pools_names = concat(google_container_node_pool.pools.*.name, [""]) - cluster_output_node_pools_versions = concat(google_container_node_pool.pools.*.version, [""]) + cluster_output_node_pools_names = concat(google_container_node_pool.pools0.*.name, google_container_node_pool.pools1.*.name, [""]) + cluster_output_node_pools_versions = concat(google_container_node_pool.pools0.*.version, google_container_node_pool.pools1.*.version, [""]) cluster_master_auth_list_layer1 = local.cluster_output_master_auth cluster_master_auth_list_layer2 = local.cluster_master_auth_list_layer1[0] diff --git a/modules/beta-private-cluster/masq.tf b/modules/beta-private-cluster/masq.tf index b6e411fc42..b5c40bef4f 100644 --- a/modules/beta-private-cluster/masq.tf +++ b/modules/beta-private-cluster/masq.tf @@ -43,6 +43,7 @@ EOF depends_on = [ data.google_client_config.default, google_container_cluster.primary, - google_container_node_pool.pools, + google_container_node_pool.pools0, + google_container_node_pool.pools1, ] } diff --git a/modules/beta-private-cluster/outputs.tf b/modules/beta-private-cluster/outputs.tf index 4153960069..7c2984c966 100644 --- a/modules/beta-private-cluster/outputs.tf +++ b/modules/beta-private-cluster/outputs.tf @@ -53,7 +53,8 @@ output "endpoint" { * to be up. */ google_container_cluster.primary, - google_container_node_pool.pools, + google_container_node_pool.pools0, + google_container_node_pool.pools1, ] } diff --git a/modules/beta-private-cluster/variables.tf b/modules/beta-private-cluster/variables.tf index 975fe7a173..d57bba8830 100644 --- a/modules/beta-private-cluster/variables.tf +++ b/modules/beta-private-cluster/variables.tf @@ -208,6 +208,12 @@ variable "node_pools_oauth_scopes" { } } +variable "node_pools_create_before_destroy" { + type = bool + description = "Create a new node pool, then destroy the old node pool. Default behavior is destroy node pool then recreate it" + default = false +} + variable "stub_domains" { type = map(list(string)) description = "Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server" diff --git a/modules/beta-public-cluster/README.md b/modules/beta-public-cluster/README.md index db41d10821..09aa7beb54 100644 --- a/modules/beta-public-cluster/README.md +++ b/modules/beta-public-cluster/README.md @@ -170,6 +170,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | network\_project\_id | The project ID of the shared VPC's host (for shared vpc support) | string | `""` | no | | node\_metadata | Specifies how node metadata is exposed to the workload running on the node | string | `"UNSPECIFIED"` | no | | node\_pools | List of maps containing node pools | list(map(string)) | `` | no | +| node\_pools\_create\_before\_destroy | Create a new node pool, then destroy the old node pool. Default behavior is destroy node pool then recreate it | bool | `"false"` | no | | node\_pools\_labels | Map of maps containing node labels by node-pool name | map(map(string)) | `` | no | | node\_pools\_metadata | Map of maps containing node metadata by node-pool name | map(map(string)) | `` | no | | node\_pools\_oauth\_scopes | Map of lists containing node oauth scopes by node-pool name | map(list(string)) | `` | no | diff --git a/modules/beta-public-cluster/cluster.tf b/modules/beta-public-cluster/cluster.tf index bb239d81a3..a425ce7da1 100644 --- a/modules/beta-public-cluster/cluster.tf +++ b/modules/beta-public-cluster/cluster.tf @@ -252,10 +252,10 @@ resource "random_id" "name" { ) } -resource "google_container_node_pool" "pools" { +resource "google_container_node_pool" "pools0" { provider = google-beta - count = length(var.node_pools) - name = lookup(var.node_pools[count.index], "create_before_destroy", false) ? random_id.name.*.hex[count.index] : lookup(var.node_pools[count.index], "name") + count = var.node_pools_create_before_destroy ? length(var.node_pools) : 0 + name = var.node_pools_create_before_destroy ? random_id.name.*.hex[count.index] : lookup(var.node_pools[count.index], "name") project = var.project_id location = local.location cluster = google_container_cluster.primary.name @@ -365,7 +365,130 @@ resource "google_container_node_pool" "pools" { lifecycle { ignore_changes = [initial_node_count] - create_before_destroy = lookup(var.node_pools[count.index], "create_before_destroy", null) + create_before_destroy = true + } + + timeouts { + create = "30m" + update = "30m" + delete = "30m" + } +} + +resource "google_container_node_pool" "pools1" { + provider = google-beta + count = var.node_pools_create_before_destroy ? 0 : length(var.node_pools) + name = var.node_pools_create_before_destroy ? random_id.name.*.hex[count.index] : lookup(var.node_pools[count.index], "name") + project = var.project_id + location = local.location + cluster = google_container_cluster.primary.name + version = lookup(var.node_pools[count.index], "auto_upgrade", false) ? "" : lookup( + var.node_pools[count.index], + "version", + local.node_version, + ) + initial_node_count = lookup( + var.node_pools[count.index], + "initial_node_count", + lookup(var.node_pools[count.index], "min_count", 1), + ) + max_pods_per_node = lookup(var.node_pools[count.index], "max_pods_per_node", null) + + node_count = lookup(var.node_pools[count.index], "autoscaling", true) ? null : lookup(var.node_pools[count.index], "min_count", 1) + + dynamic "autoscaling" { + for_each = lookup(var.node_pools[count.index], "autoscaling", true) ? [var.node_pools[count.index]] : [] + content { + min_node_count = lookup(autoscaling.value, "min_count", 1) + max_node_count = lookup(autoscaling.value, "max_count", 100) + } + } + + management { + auto_repair = lookup(var.node_pools[count.index], "auto_repair", true) + auto_upgrade = lookup(var.node_pools[count.index], "auto_upgrade", local.default_auto_upgrade) + } + + node_config { + image_type = lookup(var.node_pools[count.index], "image_type", "COS") + machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2") + labels = merge( + { + "cluster_name" = var.name + }, + { + "node_pool" = var.node_pools[count.index]["name"] + }, + var.node_pools_labels["all"], + var.node_pools_labels[var.node_pools[count.index]["name"]], + ) + metadata = merge( + { + "cluster_name" = var.name + }, + { + "node_pool" = var.node_pools[count.index]["name"] + }, + var.node_pools_metadata["all"], + var.node_pools_metadata[var.node_pools[count.index]["name"]], + { + "disable-legacy-endpoints" = var.disable_legacy_metadata_endpoints + }, + ) + dynamic "taint" { + for_each = concat( + var.node_pools_taints["all"], + var.node_pools_taints[var.node_pools[count.index]["name"]], + ) + content { + effect = taint.value.effect + key = taint.value.key + value = taint.value.value + } + } + tags = concat( + ["gke-${var.name}"], + ["gke-${var.name}-${var.node_pools[count.index]["name"]}"], + var.node_pools_tags["all"], + var.node_pools_tags[var.node_pools[count.index]["name"]], + ) + + disk_size_gb = lookup(var.node_pools[count.index], "disk_size_gb", 100) + disk_type = lookup(var.node_pools[count.index], "disk_type", "pd-standard") + service_account = lookup( + var.node_pools[count.index], + "service_account", + local.service_account, + ) + preemptible = lookup(var.node_pools[count.index], "preemptible", false) + + oauth_scopes = concat( + var.node_pools_oauth_scopes["all"], + var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]], + ) + + guest_accelerator = [ + for guest_accelerator in lookup(var.node_pools[count.index], "accelerator_count", 0) > 0 ? [{ + type = lookup(var.node_pools[count.index], "accelerator_type", "") + count = lookup(var.node_pools[count.index], "accelerator_count", 0) + }] : [] : { + type = guest_accelerator["type"] + count = guest_accelerator["count"] + } + ] + + dynamic "workload_metadata_config" { + for_each = local.cluster_node_metadata_config + + content { + node_metadata = workload_metadata_config.value.node_metadata + } + } + } + + lifecycle { + ignore_changes = [initial_node_count] + create_before_destroy = false } timeouts { @@ -388,6 +511,7 @@ resource "null_resource" "wait_for_cluster" { depends_on = [ google_container_cluster.primary, - google_container_node_pool.pools, + google_container_node_pool.pools0, + google_container_node_pool.pools1, ] } diff --git a/modules/beta-public-cluster/dns.tf b/modules/beta-public-cluster/dns.tf index b240a23e65..089b7fb99c 100644 --- a/modules/beta-public-cluster/dns.tf +++ b/modules/beta-public-cluster/dns.tf @@ -29,7 +29,8 @@ resource "null_resource" "delete_default_kube_dns_configmap" { depends_on = [ data.google_client_config.default, google_container_cluster.primary, - google_container_node_pool.pools, + google_container_node_pool.pools0, + google_container_node_pool.pools1, ] } @@ -58,7 +59,8 @@ EOF null_resource.delete_default_kube_dns_configmap, data.google_client_config.default, google_container_cluster.primary, - google_container_node_pool.pools, + google_container_node_pool.pools0, + google_container_node_pool.pools1, ] } @@ -85,7 +87,8 @@ EOF null_resource.delete_default_kube_dns_configmap, data.google_client_config.default, google_container_cluster.primary, - google_container_node_pool.pools, + google_container_node_pool.pools0, + google_container_node_pool.pools1, ] } @@ -115,6 +118,7 @@ EOF null_resource.delete_default_kube_dns_configmap, data.google_client_config.default, google_container_cluster.primary, - google_container_node_pool.pools, + google_container_node_pool.pools0, + google_container_node_pool.pools1, ] } diff --git a/modules/beta-public-cluster/main.tf b/modules/beta-public-cluster/main.tf index e4e7548a3a..0d0b4c2986 100644 --- a/modules/beta-public-cluster/main.tf +++ b/modules/beta-public-cluster/main.tf @@ -100,8 +100,8 @@ locals { # /BETA features - cluster_output_node_pools_names = concat(google_container_node_pool.pools.*.name, [""]) - cluster_output_node_pools_versions = concat(google_container_node_pool.pools.*.version, [""]) + cluster_output_node_pools_names = concat(google_container_node_pool.pools0.*.name, google_container_node_pool.pools1.*.name, [""]) + cluster_output_node_pools_versions = concat(google_container_node_pool.pools0.*.version, google_container_node_pool.pools1.*.version, [""]) cluster_master_auth_list_layer1 = local.cluster_output_master_auth cluster_master_auth_list_layer2 = local.cluster_master_auth_list_layer1[0] diff --git a/modules/beta-public-cluster/masq.tf b/modules/beta-public-cluster/masq.tf index b6e411fc42..b5c40bef4f 100644 --- a/modules/beta-public-cluster/masq.tf +++ b/modules/beta-public-cluster/masq.tf @@ -43,6 +43,7 @@ EOF depends_on = [ data.google_client_config.default, google_container_cluster.primary, - google_container_node_pool.pools, + google_container_node_pool.pools0, + google_container_node_pool.pools1, ] } diff --git a/modules/beta-public-cluster/outputs.tf b/modules/beta-public-cluster/outputs.tf index 4153960069..7c2984c966 100644 --- a/modules/beta-public-cluster/outputs.tf +++ b/modules/beta-public-cluster/outputs.tf @@ -53,7 +53,8 @@ output "endpoint" { * to be up. */ google_container_cluster.primary, - google_container_node_pool.pools, + google_container_node_pool.pools0, + google_container_node_pool.pools1, ] } diff --git a/modules/beta-public-cluster/variables.tf b/modules/beta-public-cluster/variables.tf index 850f38ea83..53d98b7e02 100644 --- a/modules/beta-public-cluster/variables.tf +++ b/modules/beta-public-cluster/variables.tf @@ -208,6 +208,12 @@ variable "node_pools_oauth_scopes" { } } +variable "node_pools_create_before_destroy" { + type = bool + description = "Create a new node pool, then destroy the old node pool. Default behavior is destroy node pool then recreate it" + default = false +} + variable "stub_domains" { type = map(list(string)) description = "Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server" diff --git a/modules/private-cluster/README.md b/modules/private-cluster/README.md index c29d58ee93..9110164143 100644 --- a/modules/private-cluster/README.md +++ b/modules/private-cluster/README.md @@ -167,6 +167,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | network\_policy\_provider | The network policy provider. | string | `"CALICO"` | no | | network\_project\_id | The project ID of the shared VPC's host (for shared vpc support) | string | `""` | no | | node\_pools | List of maps containing node pools | list(map(string)) | `` | no | +| node\_pools\_create\_before\_destroy | Create a new node pool, then destroy the old node pool. Default behavior is destroy node pool then recreate it | bool | `"false"` | no | | node\_pools\_labels | Map of maps containing node labels by node-pool name | map(map(string)) | `` | no | | node\_pools\_metadata | Map of maps containing node metadata by node-pool name | map(map(string)) | `` | no | | node\_pools\_oauth\_scopes | Map of lists containing node oauth scopes by node-pool name | map(list(string)) | `` | no | diff --git a/modules/private-cluster/cluster.tf b/modules/private-cluster/cluster.tf index 764b9b85ba..2a4c280add 100644 --- a/modules/private-cluster/cluster.tf +++ b/modules/private-cluster/cluster.tf @@ -181,10 +181,10 @@ resource "random_id" "name" { ) } -resource "google_container_node_pool" "pools" { +resource "google_container_node_pool" "pools0" { provider = google - count = length(var.node_pools) - name = lookup(var.node_pools[count.index], "create_before_destroy", false) ? random_id.name.*.hex[count.index] : lookup(var.node_pools[count.index], "name") + count = var.node_pools_create_before_destroy ? length(var.node_pools) : 0 + name = var.node_pools_create_before_destroy ? random_id.name.*.hex[count.index] : lookup(var.node_pools[count.index], "name") project = var.project_id location = local.location cluster = google_container_cluster.primary.name @@ -274,7 +274,110 @@ resource "google_container_node_pool" "pools" { lifecycle { ignore_changes = [initial_node_count] - create_before_destroy = lookup(var.node_pools[count.index], "create_before_destroy", null) + create_before_destroy = true + } + + timeouts { + create = "30m" + update = "30m" + delete = "30m" + } +} + +resource "google_container_node_pool" "pools1" { + provider = google + count = var.node_pools_create_before_destroy ? 0 : length(var.node_pools) + name = var.node_pools_create_before_destroy ? random_id.name.*.hex[count.index] : lookup(var.node_pools[count.index], "name") + project = var.project_id + location = local.location + cluster = google_container_cluster.primary.name + version = lookup(var.node_pools[count.index], "auto_upgrade", false) ? "" : lookup( + var.node_pools[count.index], + "version", + local.node_version, + ) + initial_node_count = lookup( + var.node_pools[count.index], + "initial_node_count", + lookup(var.node_pools[count.index], "min_count", 1), + ) + + node_count = lookup(var.node_pools[count.index], "autoscaling", true) ? null : lookup(var.node_pools[count.index], "min_count", 1) + + dynamic "autoscaling" { + for_each = lookup(var.node_pools[count.index], "autoscaling", true) ? [var.node_pools[count.index]] : [] + content { + min_node_count = lookup(autoscaling.value, "min_count", 1) + max_node_count = lookup(autoscaling.value, "max_count", 100) + } + } + + management { + auto_repair = lookup(var.node_pools[count.index], "auto_repair", true) + auto_upgrade = lookup(var.node_pools[count.index], "auto_upgrade", local.default_auto_upgrade) + } + + node_config { + image_type = lookup(var.node_pools[count.index], "image_type", "COS") + machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2") + labels = merge( + { + "cluster_name" = var.name + }, + { + "node_pool" = var.node_pools[count.index]["name"] + }, + var.node_pools_labels["all"], + var.node_pools_labels[var.node_pools[count.index]["name"]], + ) + metadata = merge( + { + "cluster_name" = var.name + }, + { + "node_pool" = var.node_pools[count.index]["name"] + }, + var.node_pools_metadata["all"], + var.node_pools_metadata[var.node_pools[count.index]["name"]], + { + "disable-legacy-endpoints" = var.disable_legacy_metadata_endpoints + }, + ) + tags = concat( + ["gke-${var.name}"], + ["gke-${var.name}-${var.node_pools[count.index]["name"]}"], + var.node_pools_tags["all"], + var.node_pools_tags[var.node_pools[count.index]["name"]], + ) + + disk_size_gb = lookup(var.node_pools[count.index], "disk_size_gb", 100) + disk_type = lookup(var.node_pools[count.index], "disk_type", "pd-standard") + service_account = lookup( + var.node_pools[count.index], + "service_account", + local.service_account, + ) + preemptible = lookup(var.node_pools[count.index], "preemptible", false) + + oauth_scopes = concat( + var.node_pools_oauth_scopes["all"], + var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]], + ) + + guest_accelerator = [ + for guest_accelerator in lookup(var.node_pools[count.index], "accelerator_count", 0) > 0 ? [{ + type = lookup(var.node_pools[count.index], "accelerator_type", "") + count = lookup(var.node_pools[count.index], "accelerator_count", 0) + }] : [] : { + type = guest_accelerator["type"] + count = guest_accelerator["count"] + } + ] + } + + lifecycle { + ignore_changes = [initial_node_count] + create_before_destroy = false } timeouts { @@ -297,6 +400,7 @@ resource "null_resource" "wait_for_cluster" { depends_on = [ google_container_cluster.primary, - google_container_node_pool.pools, + google_container_node_pool.pools0, + google_container_node_pool.pools1, ] } diff --git a/modules/private-cluster/dns.tf b/modules/private-cluster/dns.tf index b240a23e65..089b7fb99c 100644 --- a/modules/private-cluster/dns.tf +++ b/modules/private-cluster/dns.tf @@ -29,7 +29,8 @@ resource "null_resource" "delete_default_kube_dns_configmap" { depends_on = [ data.google_client_config.default, google_container_cluster.primary, - google_container_node_pool.pools, + google_container_node_pool.pools0, + google_container_node_pool.pools1, ] } @@ -58,7 +59,8 @@ EOF null_resource.delete_default_kube_dns_configmap, data.google_client_config.default, google_container_cluster.primary, - google_container_node_pool.pools, + google_container_node_pool.pools0, + google_container_node_pool.pools1, ] } @@ -85,7 +87,8 @@ EOF null_resource.delete_default_kube_dns_configmap, data.google_client_config.default, google_container_cluster.primary, - google_container_node_pool.pools, + google_container_node_pool.pools0, + google_container_node_pool.pools1, ] } @@ -115,6 +118,7 @@ EOF null_resource.delete_default_kube_dns_configmap, data.google_client_config.default, google_container_cluster.primary, - google_container_node_pool.pools, + google_container_node_pool.pools0, + google_container_node_pool.pools1, ] } diff --git a/modules/private-cluster/main.tf b/modules/private-cluster/main.tf index bfe746401c..a646f036cb 100644 --- a/modules/private-cluster/main.tf +++ b/modules/private-cluster/main.tf @@ -81,8 +81,8 @@ locals { cluster_output_kubernetes_dashboard_enabled = google_container_cluster.primary.addons_config.0.kubernetes_dashboard.0.disabled - cluster_output_node_pools_names = concat(google_container_node_pool.pools.*.name, [""]) - cluster_output_node_pools_versions = concat(google_container_node_pool.pools.*.version, [""]) + cluster_output_node_pools_names = concat(google_container_node_pool.pools0.*.name, google_container_node_pool.pools1.*.name, [""]) + cluster_output_node_pools_versions = concat(google_container_node_pool.pools0.*.version, google_container_node_pool.pools1.*.version, [""]) cluster_master_auth_list_layer1 = local.cluster_output_master_auth cluster_master_auth_list_layer2 = local.cluster_master_auth_list_layer1[0] diff --git a/modules/private-cluster/masq.tf b/modules/private-cluster/masq.tf index b6e411fc42..b5c40bef4f 100644 --- a/modules/private-cluster/masq.tf +++ b/modules/private-cluster/masq.tf @@ -43,6 +43,7 @@ EOF depends_on = [ data.google_client_config.default, google_container_cluster.primary, - google_container_node_pool.pools, + google_container_node_pool.pools0, + google_container_node_pool.pools1, ] } diff --git a/modules/private-cluster/outputs.tf b/modules/private-cluster/outputs.tf index dea7b5c7b5..a997ed3458 100644 --- a/modules/private-cluster/outputs.tf +++ b/modules/private-cluster/outputs.tf @@ -53,7 +53,8 @@ output "endpoint" { * to be up. */ google_container_cluster.primary, - google_container_node_pool.pools, + google_container_node_pool.pools0, + google_container_node_pool.pools1, ] } diff --git a/modules/private-cluster/variables.tf b/modules/private-cluster/variables.tf index 8008e08975..134b399c96 100644 --- a/modules/private-cluster/variables.tf +++ b/modules/private-cluster/variables.tf @@ -198,6 +198,12 @@ variable "node_pools_oauth_scopes" { } } +variable "node_pools_create_before_destroy" { + type = bool + description = "Create a new node pool, then destroy the old node pool. Default behavior is destroy node pool then recreate it" + default = false +} + variable "stub_domains" { type = map(list(string)) description = "Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server" diff --git a/outputs.tf b/outputs.tf index dea7b5c7b5..a997ed3458 100644 --- a/outputs.tf +++ b/outputs.tf @@ -53,7 +53,8 @@ output "endpoint" { * to be up. */ google_container_cluster.primary, - google_container_node_pool.pools, + google_container_node_pool.pools0, + google_container_node_pool.pools1, ] } diff --git a/variables.tf b/variables.tf index 460bdeaeff..4a90e1f6ef 100644 --- a/variables.tf +++ b/variables.tf @@ -198,6 +198,12 @@ variable "node_pools_oauth_scopes" { } } +variable "node_pools_create_before_destroy" { + type = bool + description = "Create a new node pool, then destroy the old node pool. Default behavior is destroy node pool then recreate it" + default = false +} + variable "stub_domains" { type = map(list(string)) description = "Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server" From 38799421328fcaead526fa5749f1887c6989cf9e Mon Sep 17 00:00:00 2001 From: Aaron Sproul Date: Fri, 6 Sep 2019 12:51:20 -0700 Subject: [PATCH 09/82] rename terraform resource names to more appropriate and descriptive names --- autogen/cluster.tf | 12 ++++++++---- autogen/dns.tf | 16 ++++++++-------- autogen/main.tf | 4 ++-- autogen/masq.tf | 4 ++-- autogen/outputs.tf | 4 ++-- cluster.tf | 16 ++++++++-------- dns.tf | 16 ++++++++-------- main.tf | 4 ++-- masq.tf | 4 ++-- modules/beta-private-cluster/cluster.tf | 16 ++++++++-------- modules/beta-private-cluster/dns.tf | 16 ++++++++-------- modules/beta-private-cluster/main.tf | 4 ++-- modules/beta-private-cluster/masq.tf | 4 ++-- modules/beta-private-cluster/outputs.tf | 4 ++-- modules/beta-public-cluster/cluster.tf | 16 ++++++++-------- modules/beta-public-cluster/dns.tf | 16 ++++++++-------- modules/beta-public-cluster/main.tf | 4 ++-- modules/beta-public-cluster/masq.tf | 4 ++-- modules/beta-public-cluster/outputs.tf | 4 ++-- modules/private-cluster/cluster.tf | 16 ++++++++-------- modules/private-cluster/dns.tf | 16 ++++++++-------- modules/private-cluster/main.tf | 4 ++-- modules/private-cluster/masq.tf | 4 ++-- modules/private-cluster/outputs.tf | 4 ++-- outputs.tf | 4 ++-- 25 files changed, 110 insertions(+), 106 deletions(-) diff --git a/autogen/cluster.tf b/autogen/cluster.tf index 45f6af0fb6..dfa417d880 100644 --- a/autogen/cluster.tf +++ b/autogen/cluster.tf @@ -271,8 +271,12 @@ resource "random_id" "name" { ) } -{% for create_before_destroy_value in ["true", "false"] %} -resource "google_container_node_pool" "pools{{ loop.index0 }}" { +{% for create_before_destroy_value in ["false", "true"] %} +{% if create_before_destroy_value == "false" %} +resource "google_container_node_pool" "pools" { +{% else %} +resource "google_container_node_pool" "pools_lifecycle_variant" { +{% endif %} {% if beta_cluster %} provider = google-beta {% else %} @@ -423,7 +427,7 @@ resource "null_resource" "wait_for_cluster" { depends_on = [ google_container_cluster.primary, - google_container_node_pool.pools0, - google_container_node_pool.pools1, + google_container_node_pool.pools, + google_container_node_pool.pools_lifecycle_variant, ] } diff --git a/autogen/dns.tf b/autogen/dns.tf index 92a19d1a7d..106fad4b70 100644 --- a/autogen/dns.tf +++ b/autogen/dns.tf @@ -29,8 +29,8 @@ resource "null_resource" "delete_default_kube_dns_configmap" { depends_on = [ data.google_client_config.default, google_container_cluster.primary, - google_container_node_pool.pools0, - google_container_node_pool.pools1, + google_container_node_pool.pools, + google_container_node_pool.pools_lifecycle_variant, ] } @@ -59,8 +59,8 @@ EOF null_resource.delete_default_kube_dns_configmap, data.google_client_config.default, google_container_cluster.primary, - google_container_node_pool.pools0, - google_container_node_pool.pools1, + google_container_node_pool.pools, + google_container_node_pool.pools_lifecycle_variant, ] } @@ -87,8 +87,8 @@ EOF null_resource.delete_default_kube_dns_configmap, data.google_client_config.default, google_container_cluster.primary, - google_container_node_pool.pools0, - google_container_node_pool.pools1, + google_container_node_pool.pools, + google_container_node_pool.pools_lifecycle_variant, ] } @@ -118,7 +118,7 @@ EOF null_resource.delete_default_kube_dns_configmap, data.google_client_config.default, google_container_cluster.primary, - google_container_node_pool.pools0, - google_container_node_pool.pools1, + google_container_node_pool.pools, + google_container_node_pool.pools_lifecycle_variant, ] } diff --git a/autogen/main.tf b/autogen/main.tf index 45d0d2a3f0..ea20c3baaf 100644 --- a/autogen/main.tf +++ b/autogen/main.tf @@ -112,8 +112,8 @@ locals { # /BETA features {% endif %} - cluster_output_node_pools_names = concat(google_container_node_pool.pools0.*.name, google_container_node_pool.pools1.*.name, [""]) - cluster_output_node_pools_versions = concat(google_container_node_pool.pools0.*.version, google_container_node_pool.pools1.*.version, [""]) + cluster_output_node_pools_names = concat(google_container_node_pool.pools.*.name, google_container_node_pool.pools_lifecycle_variant.*.name, [""]) + cluster_output_node_pools_versions = concat(google_container_node_pool.pools.*.version, google_container_node_pool.pools_lifecycle_variant.*.version, [""]) cluster_master_auth_list_layer1 = local.cluster_output_master_auth cluster_master_auth_list_layer2 = local.cluster_master_auth_list_layer1[0] diff --git a/autogen/masq.tf b/autogen/masq.tf index a73a7cbfea..680a778179 100644 --- a/autogen/masq.tf +++ b/autogen/masq.tf @@ -43,7 +43,7 @@ EOF depends_on = [ data.google_client_config.default, google_container_cluster.primary, - google_container_node_pool.pools0, - google_container_node_pool.pools1, + google_container_node_pool.pools, + google_container_node_pool.pools_lifecycle_variant, ] } diff --git a/autogen/outputs.tf b/autogen/outputs.tf index f5240baa46..8d0ddb9486 100644 --- a/autogen/outputs.tf +++ b/autogen/outputs.tf @@ -53,8 +53,8 @@ output "endpoint" { * to be up. */ google_container_cluster.primary, - google_container_node_pool.pools0, - google_container_node_pool.pools1, + google_container_node_pool.pools, + google_container_node_pool.pools_lifecycle_variant, ] } diff --git a/cluster.tf b/cluster.tf index 655464de47..9b5d24d3a6 100644 --- a/cluster.tf +++ b/cluster.tf @@ -176,9 +176,9 @@ resource "random_id" "name" { ) } -resource "google_container_node_pool" "pools0" { +resource "google_container_node_pool" "pools" { provider = google - count = var.node_pools_create_before_destroy ? length(var.node_pools) : 0 + count = var.node_pools_create_before_destroy ? 0 : length(var.node_pools) name = var.node_pools_create_before_destroy ? random_id.name.*.hex[count.index] : lookup(var.node_pools[count.index], "name") project = var.project_id location = local.location @@ -269,7 +269,7 @@ resource "google_container_node_pool" "pools0" { lifecycle { ignore_changes = [initial_node_count] - create_before_destroy = true + create_before_destroy = false } timeouts { @@ -279,9 +279,9 @@ resource "google_container_node_pool" "pools0" { } } -resource "google_container_node_pool" "pools1" { +resource "google_container_node_pool" "pools_lifecycle_variant" { provider = google - count = var.node_pools_create_before_destroy ? 0 : length(var.node_pools) + count = var.node_pools_create_before_destroy ? length(var.node_pools) : 0 name = var.node_pools_create_before_destroy ? random_id.name.*.hex[count.index] : lookup(var.node_pools[count.index], "name") project = var.project_id location = local.location @@ -372,7 +372,7 @@ resource "google_container_node_pool" "pools1" { lifecycle { ignore_changes = [initial_node_count] - create_before_destroy = false + create_before_destroy = true } timeouts { @@ -395,7 +395,7 @@ resource "null_resource" "wait_for_cluster" { depends_on = [ google_container_cluster.primary, - google_container_node_pool.pools0, - google_container_node_pool.pools1, + google_container_node_pool.pools, + google_container_node_pool.pools_lifecycle_variant, ] } diff --git a/dns.tf b/dns.tf index 089b7fb99c..fbd840285b 100644 --- a/dns.tf +++ b/dns.tf @@ -29,8 +29,8 @@ resource "null_resource" "delete_default_kube_dns_configmap" { depends_on = [ data.google_client_config.default, google_container_cluster.primary, - google_container_node_pool.pools0, - google_container_node_pool.pools1, + google_container_node_pool.pools, + google_container_node_pool.pools_lifecycle_variant, ] } @@ -59,8 +59,8 @@ EOF null_resource.delete_default_kube_dns_configmap, data.google_client_config.default, google_container_cluster.primary, - google_container_node_pool.pools0, - google_container_node_pool.pools1, + google_container_node_pool.pools, + google_container_node_pool.pools_lifecycle_variant, ] } @@ -87,8 +87,8 @@ EOF null_resource.delete_default_kube_dns_configmap, data.google_client_config.default, google_container_cluster.primary, - google_container_node_pool.pools0, - google_container_node_pool.pools1, + google_container_node_pool.pools, + google_container_node_pool.pools_lifecycle_variant, ] } @@ -118,7 +118,7 @@ EOF null_resource.delete_default_kube_dns_configmap, data.google_client_config.default, google_container_cluster.primary, - google_container_node_pool.pools0, - google_container_node_pool.pools1, + google_container_node_pool.pools, + google_container_node_pool.pools_lifecycle_variant, ] } diff --git a/main.tf b/main.tf index 6a98b9fdae..c01f2a6f81 100644 --- a/main.tf +++ b/main.tf @@ -81,8 +81,8 @@ locals { cluster_output_kubernetes_dashboard_enabled = google_container_cluster.primary.addons_config.0.kubernetes_dashboard.0.disabled - cluster_output_node_pools_names = concat(google_container_node_pool.pools0.*.name, google_container_node_pool.pools1.*.name, [""]) - cluster_output_node_pools_versions = concat(google_container_node_pool.pools0.*.version, google_container_node_pool.pools1.*.version, [""]) + cluster_output_node_pools_names = concat(google_container_node_pool.pools.*.name, google_container_node_pool.pools_lifecycle_variant.*.name, [""]) + cluster_output_node_pools_versions = concat(google_container_node_pool.pools.*.version, google_container_node_pool.pools_lifecycle_variant.*.version, [""]) cluster_master_auth_list_layer1 = local.cluster_output_master_auth cluster_master_auth_list_layer2 = local.cluster_master_auth_list_layer1[0] diff --git a/masq.tf b/masq.tf index b5c40bef4f..2f351c8c2e 100644 --- a/masq.tf +++ b/masq.tf @@ -43,7 +43,7 @@ EOF depends_on = [ data.google_client_config.default, google_container_cluster.primary, - google_container_node_pool.pools0, - google_container_node_pool.pools1, + google_container_node_pool.pools, + google_container_node_pool.pools_lifecycle_variant, ] } diff --git a/modules/beta-private-cluster/cluster.tf b/modules/beta-private-cluster/cluster.tf index 56d57146b6..d8ddb3ce21 100644 --- a/modules/beta-private-cluster/cluster.tf +++ b/modules/beta-private-cluster/cluster.tf @@ -257,9 +257,9 @@ resource "random_id" "name" { ) } -resource "google_container_node_pool" "pools0" { +resource "google_container_node_pool" "pools" { provider = google-beta - count = var.node_pools_create_before_destroy ? length(var.node_pools) : 0 + count = var.node_pools_create_before_destroy ? 0 : length(var.node_pools) name = var.node_pools_create_before_destroy ? random_id.name.*.hex[count.index] : lookup(var.node_pools[count.index], "name") project = var.project_id location = local.location @@ -370,7 +370,7 @@ resource "google_container_node_pool" "pools0" { lifecycle { ignore_changes = [initial_node_count] - create_before_destroy = true + create_before_destroy = false } timeouts { @@ -380,9 +380,9 @@ resource "google_container_node_pool" "pools0" { } } -resource "google_container_node_pool" "pools1" { +resource "google_container_node_pool" "pools_lifecycle_variant" { provider = google-beta - count = var.node_pools_create_before_destroy ? 0 : length(var.node_pools) + count = var.node_pools_create_before_destroy ? length(var.node_pools) : 0 name = var.node_pools_create_before_destroy ? random_id.name.*.hex[count.index] : lookup(var.node_pools[count.index], "name") project = var.project_id location = local.location @@ -493,7 +493,7 @@ resource "google_container_node_pool" "pools1" { lifecycle { ignore_changes = [initial_node_count] - create_before_destroy = false + create_before_destroy = true } timeouts { @@ -516,7 +516,7 @@ resource "null_resource" "wait_for_cluster" { depends_on = [ google_container_cluster.primary, - google_container_node_pool.pools0, - google_container_node_pool.pools1, + google_container_node_pool.pools, + google_container_node_pool.pools_lifecycle_variant, ] } diff --git a/modules/beta-private-cluster/dns.tf b/modules/beta-private-cluster/dns.tf index 089b7fb99c..fbd840285b 100644 --- a/modules/beta-private-cluster/dns.tf +++ b/modules/beta-private-cluster/dns.tf @@ -29,8 +29,8 @@ resource "null_resource" "delete_default_kube_dns_configmap" { depends_on = [ data.google_client_config.default, google_container_cluster.primary, - google_container_node_pool.pools0, - google_container_node_pool.pools1, + google_container_node_pool.pools, + google_container_node_pool.pools_lifecycle_variant, ] } @@ -59,8 +59,8 @@ EOF null_resource.delete_default_kube_dns_configmap, data.google_client_config.default, google_container_cluster.primary, - google_container_node_pool.pools0, - google_container_node_pool.pools1, + google_container_node_pool.pools, + google_container_node_pool.pools_lifecycle_variant, ] } @@ -87,8 +87,8 @@ EOF null_resource.delete_default_kube_dns_configmap, data.google_client_config.default, google_container_cluster.primary, - google_container_node_pool.pools0, - google_container_node_pool.pools1, + google_container_node_pool.pools, + google_container_node_pool.pools_lifecycle_variant, ] } @@ -118,7 +118,7 @@ EOF null_resource.delete_default_kube_dns_configmap, data.google_client_config.default, google_container_cluster.primary, - google_container_node_pool.pools0, - google_container_node_pool.pools1, + google_container_node_pool.pools, + google_container_node_pool.pools_lifecycle_variant, ] } diff --git a/modules/beta-private-cluster/main.tf b/modules/beta-private-cluster/main.tf index db40ea6147..2e70e18f65 100644 --- a/modules/beta-private-cluster/main.tf +++ b/modules/beta-private-cluster/main.tf @@ -100,8 +100,8 @@ locals { # /BETA features - cluster_output_node_pools_names = concat(google_container_node_pool.pools0.*.name, google_container_node_pool.pools1.*.name, [""]) - cluster_output_node_pools_versions = concat(google_container_node_pool.pools0.*.version, google_container_node_pool.pools1.*.version, [""]) + cluster_output_node_pools_names = concat(google_container_node_pool.pools.*.name, google_container_node_pool.pools_lifecycle_variant.*.name, [""]) + cluster_output_node_pools_versions = concat(google_container_node_pool.pools.*.version, google_container_node_pool.pools_lifecycle_variant.*.version, [""]) cluster_master_auth_list_layer1 = local.cluster_output_master_auth cluster_master_auth_list_layer2 = local.cluster_master_auth_list_layer1[0] diff --git a/modules/beta-private-cluster/masq.tf b/modules/beta-private-cluster/masq.tf index b5c40bef4f..2f351c8c2e 100644 --- a/modules/beta-private-cluster/masq.tf +++ b/modules/beta-private-cluster/masq.tf @@ -43,7 +43,7 @@ EOF depends_on = [ data.google_client_config.default, google_container_cluster.primary, - google_container_node_pool.pools0, - google_container_node_pool.pools1, + google_container_node_pool.pools, + google_container_node_pool.pools_lifecycle_variant, ] } diff --git a/modules/beta-private-cluster/outputs.tf b/modules/beta-private-cluster/outputs.tf index 7c2984c966..915b9cd060 100644 --- a/modules/beta-private-cluster/outputs.tf +++ b/modules/beta-private-cluster/outputs.tf @@ -53,8 +53,8 @@ output "endpoint" { * to be up. */ google_container_cluster.primary, - google_container_node_pool.pools0, - google_container_node_pool.pools1, + google_container_node_pool.pools, + google_container_node_pool.pools_lifecycle_variant, ] } diff --git a/modules/beta-public-cluster/cluster.tf b/modules/beta-public-cluster/cluster.tf index ae125668c8..2ff4321b13 100644 --- a/modules/beta-public-cluster/cluster.tf +++ b/modules/beta-public-cluster/cluster.tf @@ -252,9 +252,9 @@ resource "random_id" "name" { ) } -resource "google_container_node_pool" "pools0" { +resource "google_container_node_pool" "pools" { provider = google-beta - count = var.node_pools_create_before_destroy ? length(var.node_pools) : 0 + count = var.node_pools_create_before_destroy ? 0 : length(var.node_pools) name = var.node_pools_create_before_destroy ? random_id.name.*.hex[count.index] : lookup(var.node_pools[count.index], "name") project = var.project_id location = local.location @@ -365,7 +365,7 @@ resource "google_container_node_pool" "pools0" { lifecycle { ignore_changes = [initial_node_count] - create_before_destroy = true + create_before_destroy = false } timeouts { @@ -375,9 +375,9 @@ resource "google_container_node_pool" "pools0" { } } -resource "google_container_node_pool" "pools1" { +resource "google_container_node_pool" "pools_lifecycle_variant" { provider = google-beta - count = var.node_pools_create_before_destroy ? 0 : length(var.node_pools) + count = var.node_pools_create_before_destroy ? length(var.node_pools) : 0 name = var.node_pools_create_before_destroy ? random_id.name.*.hex[count.index] : lookup(var.node_pools[count.index], "name") project = var.project_id location = local.location @@ -488,7 +488,7 @@ resource "google_container_node_pool" "pools1" { lifecycle { ignore_changes = [initial_node_count] - create_before_destroy = false + create_before_destroy = true } timeouts { @@ -511,7 +511,7 @@ resource "null_resource" "wait_for_cluster" { depends_on = [ google_container_cluster.primary, - google_container_node_pool.pools0, - google_container_node_pool.pools1, + google_container_node_pool.pools, + google_container_node_pool.pools_lifecycle_variant, ] } diff --git a/modules/beta-public-cluster/dns.tf b/modules/beta-public-cluster/dns.tf index 089b7fb99c..fbd840285b 100644 --- a/modules/beta-public-cluster/dns.tf +++ b/modules/beta-public-cluster/dns.tf @@ -29,8 +29,8 @@ resource "null_resource" "delete_default_kube_dns_configmap" { depends_on = [ data.google_client_config.default, google_container_cluster.primary, - google_container_node_pool.pools0, - google_container_node_pool.pools1, + google_container_node_pool.pools, + google_container_node_pool.pools_lifecycle_variant, ] } @@ -59,8 +59,8 @@ EOF null_resource.delete_default_kube_dns_configmap, data.google_client_config.default, google_container_cluster.primary, - google_container_node_pool.pools0, - google_container_node_pool.pools1, + google_container_node_pool.pools, + google_container_node_pool.pools_lifecycle_variant, ] } @@ -87,8 +87,8 @@ EOF null_resource.delete_default_kube_dns_configmap, data.google_client_config.default, google_container_cluster.primary, - google_container_node_pool.pools0, - google_container_node_pool.pools1, + google_container_node_pool.pools, + google_container_node_pool.pools_lifecycle_variant, ] } @@ -118,7 +118,7 @@ EOF null_resource.delete_default_kube_dns_configmap, data.google_client_config.default, google_container_cluster.primary, - google_container_node_pool.pools0, - google_container_node_pool.pools1, + google_container_node_pool.pools, + google_container_node_pool.pools_lifecycle_variant, ] } diff --git a/modules/beta-public-cluster/main.tf b/modules/beta-public-cluster/main.tf index 0d0b4c2986..f6e39e549f 100644 --- a/modules/beta-public-cluster/main.tf +++ b/modules/beta-public-cluster/main.tf @@ -100,8 +100,8 @@ locals { # /BETA features - cluster_output_node_pools_names = concat(google_container_node_pool.pools0.*.name, google_container_node_pool.pools1.*.name, [""]) - cluster_output_node_pools_versions = concat(google_container_node_pool.pools0.*.version, google_container_node_pool.pools1.*.version, [""]) + cluster_output_node_pools_names = concat(google_container_node_pool.pools.*.name, google_container_node_pool.pools_lifecycle_variant.*.name, [""]) + cluster_output_node_pools_versions = concat(google_container_node_pool.pools.*.version, google_container_node_pool.pools_lifecycle_variant.*.version, [""]) cluster_master_auth_list_layer1 = local.cluster_output_master_auth cluster_master_auth_list_layer2 = local.cluster_master_auth_list_layer1[0] diff --git a/modules/beta-public-cluster/masq.tf b/modules/beta-public-cluster/masq.tf index b5c40bef4f..2f351c8c2e 100644 --- a/modules/beta-public-cluster/masq.tf +++ b/modules/beta-public-cluster/masq.tf @@ -43,7 +43,7 @@ EOF depends_on = [ data.google_client_config.default, google_container_cluster.primary, - google_container_node_pool.pools0, - google_container_node_pool.pools1, + google_container_node_pool.pools, + google_container_node_pool.pools_lifecycle_variant, ] } diff --git a/modules/beta-public-cluster/outputs.tf b/modules/beta-public-cluster/outputs.tf index 7c2984c966..915b9cd060 100644 --- a/modules/beta-public-cluster/outputs.tf +++ b/modules/beta-public-cluster/outputs.tf @@ -53,8 +53,8 @@ output "endpoint" { * to be up. */ google_container_cluster.primary, - google_container_node_pool.pools0, - google_container_node_pool.pools1, + google_container_node_pool.pools, + google_container_node_pool.pools_lifecycle_variant, ] } diff --git a/modules/private-cluster/cluster.tf b/modules/private-cluster/cluster.tf index 3a95f2eaba..f79151def0 100644 --- a/modules/private-cluster/cluster.tf +++ b/modules/private-cluster/cluster.tf @@ -181,9 +181,9 @@ resource "random_id" "name" { ) } -resource "google_container_node_pool" "pools0" { +resource "google_container_node_pool" "pools" { provider = google - count = var.node_pools_create_before_destroy ? length(var.node_pools) : 0 + count = var.node_pools_create_before_destroy ? 0 : length(var.node_pools) name = var.node_pools_create_before_destroy ? random_id.name.*.hex[count.index] : lookup(var.node_pools[count.index], "name") project = var.project_id location = local.location @@ -274,7 +274,7 @@ resource "google_container_node_pool" "pools0" { lifecycle { ignore_changes = [initial_node_count] - create_before_destroy = true + create_before_destroy = false } timeouts { @@ -284,9 +284,9 @@ resource "google_container_node_pool" "pools0" { } } -resource "google_container_node_pool" "pools1" { +resource "google_container_node_pool" "pools_lifecycle_variant" { provider = google - count = var.node_pools_create_before_destroy ? 0 : length(var.node_pools) + count = var.node_pools_create_before_destroy ? length(var.node_pools) : 0 name = var.node_pools_create_before_destroy ? random_id.name.*.hex[count.index] : lookup(var.node_pools[count.index], "name") project = var.project_id location = local.location @@ -377,7 +377,7 @@ resource "google_container_node_pool" "pools1" { lifecycle { ignore_changes = [initial_node_count] - create_before_destroy = false + create_before_destroy = true } timeouts { @@ -400,7 +400,7 @@ resource "null_resource" "wait_for_cluster" { depends_on = [ google_container_cluster.primary, - google_container_node_pool.pools0, - google_container_node_pool.pools1, + google_container_node_pool.pools, + google_container_node_pool.pools_lifecycle_variant, ] } diff --git a/modules/private-cluster/dns.tf b/modules/private-cluster/dns.tf index 089b7fb99c..fbd840285b 100644 --- a/modules/private-cluster/dns.tf +++ b/modules/private-cluster/dns.tf @@ -29,8 +29,8 @@ resource "null_resource" "delete_default_kube_dns_configmap" { depends_on = [ data.google_client_config.default, google_container_cluster.primary, - google_container_node_pool.pools0, - google_container_node_pool.pools1, + google_container_node_pool.pools, + google_container_node_pool.pools_lifecycle_variant, ] } @@ -59,8 +59,8 @@ EOF null_resource.delete_default_kube_dns_configmap, data.google_client_config.default, google_container_cluster.primary, - google_container_node_pool.pools0, - google_container_node_pool.pools1, + google_container_node_pool.pools, + google_container_node_pool.pools_lifecycle_variant, ] } @@ -87,8 +87,8 @@ EOF null_resource.delete_default_kube_dns_configmap, data.google_client_config.default, google_container_cluster.primary, - google_container_node_pool.pools0, - google_container_node_pool.pools1, + google_container_node_pool.pools, + google_container_node_pool.pools_lifecycle_variant, ] } @@ -118,7 +118,7 @@ EOF null_resource.delete_default_kube_dns_configmap, data.google_client_config.default, google_container_cluster.primary, - google_container_node_pool.pools0, - google_container_node_pool.pools1, + google_container_node_pool.pools, + google_container_node_pool.pools_lifecycle_variant, ] } diff --git a/modules/private-cluster/main.tf b/modules/private-cluster/main.tf index a646f036cb..f732fd3ff3 100644 --- a/modules/private-cluster/main.tf +++ b/modules/private-cluster/main.tf @@ -81,8 +81,8 @@ locals { cluster_output_kubernetes_dashboard_enabled = google_container_cluster.primary.addons_config.0.kubernetes_dashboard.0.disabled - cluster_output_node_pools_names = concat(google_container_node_pool.pools0.*.name, google_container_node_pool.pools1.*.name, [""]) - cluster_output_node_pools_versions = concat(google_container_node_pool.pools0.*.version, google_container_node_pool.pools1.*.version, [""]) + cluster_output_node_pools_names = concat(google_container_node_pool.pools.*.name, google_container_node_pool.pools_lifecycle_variant.*.name, [""]) + cluster_output_node_pools_versions = concat(google_container_node_pool.pools.*.version, google_container_node_pool.pools_lifecycle_variant.*.version, [""]) cluster_master_auth_list_layer1 = local.cluster_output_master_auth cluster_master_auth_list_layer2 = local.cluster_master_auth_list_layer1[0] diff --git a/modules/private-cluster/masq.tf b/modules/private-cluster/masq.tf index b5c40bef4f..2f351c8c2e 100644 --- a/modules/private-cluster/masq.tf +++ b/modules/private-cluster/masq.tf @@ -43,7 +43,7 @@ EOF depends_on = [ data.google_client_config.default, google_container_cluster.primary, - google_container_node_pool.pools0, - google_container_node_pool.pools1, + google_container_node_pool.pools, + google_container_node_pool.pools_lifecycle_variant, ] } diff --git a/modules/private-cluster/outputs.tf b/modules/private-cluster/outputs.tf index a997ed3458..53350b0754 100644 --- a/modules/private-cluster/outputs.tf +++ b/modules/private-cluster/outputs.tf @@ -53,8 +53,8 @@ output "endpoint" { * to be up. */ google_container_cluster.primary, - google_container_node_pool.pools0, - google_container_node_pool.pools1, + google_container_node_pool.pools, + google_container_node_pool.pools_lifecycle_variant, ] } diff --git a/outputs.tf b/outputs.tf index a997ed3458..53350b0754 100644 --- a/outputs.tf +++ b/outputs.tf @@ -53,8 +53,8 @@ output "endpoint" { * to be up. */ google_container_cluster.primary, - google_container_node_pool.pools0, - google_container_node_pool.pools1, + google_container_node_pool.pools, + google_container_node_pool.pools_lifecycle_variant, ] } From 445f1e3902169850800331d8165cb07dd304e070 Mon Sep 17 00:00:00 2001 From: Aaron Sproul Date: Sat, 7 Sep 2019 12:51:54 -0700 Subject: [PATCH 10/82] enable random_id names when create_before_destroy is desired --- autogen/cluster.tf | 2 +- cluster.tf | 2 +- modules/beta-private-cluster/cluster.tf | 2 +- modules/beta-public-cluster/cluster.tf | 2 +- modules/private-cluster/cluster.tf | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/autogen/cluster.tf b/autogen/cluster.tf index dfa417d880..afc03efdd5 100644 --- a/autogen/cluster.tf +++ b/autogen/cluster.tf @@ -233,7 +233,7 @@ locals { } resource "random_id" "name" { - count = length(var.node_pools) + count = var.node_pools_create_before_destroy ? length(var.node_pools) : 0 byte_length = 2 prefix = format("%s-", lookup(var.node_pools[count.index], "name")) keepers = merge( diff --git a/cluster.tf b/cluster.tf index 9b5d24d3a6..7b5fea5316 100644 --- a/cluster.tf +++ b/cluster.tf @@ -138,7 +138,7 @@ locals { } resource "random_id" "name" { - count = length(var.node_pools) + count = var.node_pools_create_before_destroy ? length(var.node_pools) : 0 byte_length = 2 prefix = format("%s-", lookup(var.node_pools[count.index], "name")) keepers = merge( diff --git a/modules/beta-private-cluster/cluster.tf b/modules/beta-private-cluster/cluster.tf index d8ddb3ce21..848a49a323 100644 --- a/modules/beta-private-cluster/cluster.tf +++ b/modules/beta-private-cluster/cluster.tf @@ -219,7 +219,7 @@ locals { } resource "random_id" "name" { - count = length(var.node_pools) + count = var.node_pools_create_before_destroy ? length(var.node_pools) : 0 byte_length = 2 prefix = format("%s-", lookup(var.node_pools[count.index], "name")) keepers = merge( diff --git a/modules/beta-public-cluster/cluster.tf b/modules/beta-public-cluster/cluster.tf index 2ff4321b13..3775f1c251 100644 --- a/modules/beta-public-cluster/cluster.tf +++ b/modules/beta-public-cluster/cluster.tf @@ -214,7 +214,7 @@ locals { } resource "random_id" "name" { - count = length(var.node_pools) + count = var.node_pools_create_before_destroy ? length(var.node_pools) : 0 byte_length = 2 prefix = format("%s-", lookup(var.node_pools[count.index], "name")) keepers = merge( diff --git a/modules/private-cluster/cluster.tf b/modules/private-cluster/cluster.tf index f79151def0..e2a3a99342 100644 --- a/modules/private-cluster/cluster.tf +++ b/modules/private-cluster/cluster.tf @@ -143,7 +143,7 @@ locals { } resource "random_id" "name" { - count = length(var.node_pools) + count = var.node_pools_create_before_destroy ? length(var.node_pools) : 0 byte_length = 2 prefix = format("%s-", lookup(var.node_pools[count.index], "name")) keepers = merge( From 3a9533fe53b3dbe875c03b03d984d2eafda0584f Mon Sep 17 00:00:00 2001 From: Aaron Sproul Date: Mon, 9 Sep 2019 13:22:33 -0700 Subject: [PATCH 11/82] if a metadata_all tag is moved to a specific node pool, it should not recreate that resource --- autogen/cluster.tf | 33 +++++++++++++------------ cluster.tf | 33 +++++++++++++------------ modules/beta-private-cluster/cluster.tf | 33 +++++++++++++------------ modules/beta-public-cluster/cluster.tf | 33 +++++++++++++------------ modules/private-cluster/cluster.tf | 33 +++++++++++++------------ 5 files changed, 85 insertions(+), 80 deletions(-) diff --git a/autogen/cluster.tf b/autogen/cluster.tf index afc03efdd5..af65747aef 100644 --- a/autogen/cluster.tf +++ b/autogen/cluster.tf @@ -242,31 +242,32 @@ resource "random_id" "name" { [for keeper in local.force_node_pool_recreation_resources : lookup(var.node_pools[count.index], keeper, "")] ), { - labels_all = join(",", keys(var.node_pools_labels["all"]), values(var.node_pools_labels["all"])) - }, - { - labels_node_pool = join(",", keys(var.node_pools_labels[var.node_pools[count.index]["name"]]), values(var.node_pools_labels[var.node_pools[count.index]["name"]])) - }, - { - metadata_all = join(",", keys(var.node_pools_metadata["all"]), values(var.node_pools_metadata["all"])) + labels = join(",", + keys(var.node_pools_labels["all"]), + keys(var.node_pools_labels[var.node_pools[count.index]["name"]]), + values(var.node_pools_labels["all"]), + values(var.node_pools_labels[var.node_pools[count.index]["name"]]) + ) }, { - metadata_node_pool = join(",", + metadata = join(",", + keys(var.node_pools_metadata["all"]), keys(var.node_pools_metadata[var.node_pools[count.index]["name"]]), + values(var.node_pools_metadata["all"]), values(var.node_pools_metadata[var.node_pools[count.index]["name"]]) ) }, { - oauth_scopes_all = join(",", var.node_pools_oauth_scopes["all"]) - }, - { - oauth_scopes_node_pool = join(",", var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]]) - }, - { - tags_all = join(",", var.node_pools_tags["all"]) + oauth_scopes = join(",", + var.node_pools_oauth_scopes["all"], + var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]] + ) }, { - tags_node_pool = join(",", var.node_pools_tags[var.node_pools[count.index]["name"]]) + tags = join(",", + var.node_pools_tags["all"], + var.node_pools_tags[var.node_pools[count.index]["name"]] + ) } ) } diff --git a/cluster.tf b/cluster.tf index 7b5fea5316..3701fe0a22 100644 --- a/cluster.tf +++ b/cluster.tf @@ -147,31 +147,32 @@ resource "random_id" "name" { [for keeper in local.force_node_pool_recreation_resources : lookup(var.node_pools[count.index], keeper, "")] ), { - labels_all = join(",", keys(var.node_pools_labels["all"]), values(var.node_pools_labels["all"])) - }, - { - labels_node_pool = join(",", keys(var.node_pools_labels[var.node_pools[count.index]["name"]]), values(var.node_pools_labels[var.node_pools[count.index]["name"]])) - }, - { - metadata_all = join(",", keys(var.node_pools_metadata["all"]), values(var.node_pools_metadata["all"])) + labels = join(",", + keys(var.node_pools_labels["all"]), + keys(var.node_pools_labels[var.node_pools[count.index]["name"]]), + values(var.node_pools_labels["all"]), + values(var.node_pools_labels[var.node_pools[count.index]["name"]]) + ) }, { - metadata_node_pool = join(",", + metadata = join(",", + keys(var.node_pools_metadata["all"]), keys(var.node_pools_metadata[var.node_pools[count.index]["name"]]), + values(var.node_pools_metadata["all"]), values(var.node_pools_metadata[var.node_pools[count.index]["name"]]) ) }, { - oauth_scopes_all = join(",", var.node_pools_oauth_scopes["all"]) - }, - { - oauth_scopes_node_pool = join(",", var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]]) - }, - { - tags_all = join(",", var.node_pools_tags["all"]) + oauth_scopes = join(",", + var.node_pools_oauth_scopes["all"], + var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]] + ) }, { - tags_node_pool = join(",", var.node_pools_tags[var.node_pools[count.index]["name"]]) + tags = join(",", + var.node_pools_tags["all"], + var.node_pools_tags[var.node_pools[count.index]["name"]] + ) } ) } diff --git a/modules/beta-private-cluster/cluster.tf b/modules/beta-private-cluster/cluster.tf index 848a49a323..502649d3e2 100644 --- a/modules/beta-private-cluster/cluster.tf +++ b/modules/beta-private-cluster/cluster.tf @@ -228,31 +228,32 @@ resource "random_id" "name" { [for keeper in local.force_node_pool_recreation_resources : lookup(var.node_pools[count.index], keeper, "")] ), { - labels_all = join(",", keys(var.node_pools_labels["all"]), values(var.node_pools_labels["all"])) - }, - { - labels_node_pool = join(",", keys(var.node_pools_labels[var.node_pools[count.index]["name"]]), values(var.node_pools_labels[var.node_pools[count.index]["name"]])) - }, - { - metadata_all = join(",", keys(var.node_pools_metadata["all"]), values(var.node_pools_metadata["all"])) + labels = join(",", + keys(var.node_pools_labels["all"]), + keys(var.node_pools_labels[var.node_pools[count.index]["name"]]), + values(var.node_pools_labels["all"]), + values(var.node_pools_labels[var.node_pools[count.index]["name"]]) + ) }, { - metadata_node_pool = join(",", + metadata = join(",", + keys(var.node_pools_metadata["all"]), keys(var.node_pools_metadata[var.node_pools[count.index]["name"]]), + values(var.node_pools_metadata["all"]), values(var.node_pools_metadata[var.node_pools[count.index]["name"]]) ) }, { - oauth_scopes_all = join(",", var.node_pools_oauth_scopes["all"]) - }, - { - oauth_scopes_node_pool = join(",", var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]]) - }, - { - tags_all = join(",", var.node_pools_tags["all"]) + oauth_scopes = join(",", + var.node_pools_oauth_scopes["all"], + var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]] + ) }, { - tags_node_pool = join(",", var.node_pools_tags[var.node_pools[count.index]["name"]]) + tags = join(",", + var.node_pools_tags["all"], + var.node_pools_tags[var.node_pools[count.index]["name"]] + ) } ) } diff --git a/modules/beta-public-cluster/cluster.tf b/modules/beta-public-cluster/cluster.tf index 3775f1c251..bde673a896 100644 --- a/modules/beta-public-cluster/cluster.tf +++ b/modules/beta-public-cluster/cluster.tf @@ -223,31 +223,32 @@ resource "random_id" "name" { [for keeper in local.force_node_pool_recreation_resources : lookup(var.node_pools[count.index], keeper, "")] ), { - labels_all = join(",", keys(var.node_pools_labels["all"]), values(var.node_pools_labels["all"])) - }, - { - labels_node_pool = join(",", keys(var.node_pools_labels[var.node_pools[count.index]["name"]]), values(var.node_pools_labels[var.node_pools[count.index]["name"]])) - }, - { - metadata_all = join(",", keys(var.node_pools_metadata["all"]), values(var.node_pools_metadata["all"])) + labels = join(",", + keys(var.node_pools_labels["all"]), + keys(var.node_pools_labels[var.node_pools[count.index]["name"]]), + values(var.node_pools_labels["all"]), + values(var.node_pools_labels[var.node_pools[count.index]["name"]]) + ) }, { - metadata_node_pool = join(",", + metadata = join(",", + keys(var.node_pools_metadata["all"]), keys(var.node_pools_metadata[var.node_pools[count.index]["name"]]), + values(var.node_pools_metadata["all"]), values(var.node_pools_metadata[var.node_pools[count.index]["name"]]) ) }, { - oauth_scopes_all = join(",", var.node_pools_oauth_scopes["all"]) - }, - { - oauth_scopes_node_pool = join(",", var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]]) - }, - { - tags_all = join(",", var.node_pools_tags["all"]) + oauth_scopes = join(",", + var.node_pools_oauth_scopes["all"], + var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]] + ) }, { - tags_node_pool = join(",", var.node_pools_tags[var.node_pools[count.index]["name"]]) + tags = join(",", + var.node_pools_tags["all"], + var.node_pools_tags[var.node_pools[count.index]["name"]] + ) } ) } diff --git a/modules/private-cluster/cluster.tf b/modules/private-cluster/cluster.tf index e2a3a99342..0dcac5e535 100644 --- a/modules/private-cluster/cluster.tf +++ b/modules/private-cluster/cluster.tf @@ -152,31 +152,32 @@ resource "random_id" "name" { [for keeper in local.force_node_pool_recreation_resources : lookup(var.node_pools[count.index], keeper, "")] ), { - labels_all = join(",", keys(var.node_pools_labels["all"]), values(var.node_pools_labels["all"])) - }, - { - labels_node_pool = join(",", keys(var.node_pools_labels[var.node_pools[count.index]["name"]]), values(var.node_pools_labels[var.node_pools[count.index]["name"]])) - }, - { - metadata_all = join(",", keys(var.node_pools_metadata["all"]), values(var.node_pools_metadata["all"])) + labels = join(",", + keys(var.node_pools_labels["all"]), + keys(var.node_pools_labels[var.node_pools[count.index]["name"]]), + values(var.node_pools_labels["all"]), + values(var.node_pools_labels[var.node_pools[count.index]["name"]]) + ) }, { - metadata_node_pool = join(",", + metadata = join(",", + keys(var.node_pools_metadata["all"]), keys(var.node_pools_metadata[var.node_pools[count.index]["name"]]), + values(var.node_pools_metadata["all"]), values(var.node_pools_metadata[var.node_pools[count.index]["name"]]) ) }, { - oauth_scopes_all = join(",", var.node_pools_oauth_scopes["all"]) - }, - { - oauth_scopes_node_pool = join(",", var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]]) - }, - { - tags_all = join(",", var.node_pools_tags["all"]) + oauth_scopes = join(",", + var.node_pools_oauth_scopes["all"], + var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]] + ) }, { - tags_node_pool = join(",", var.node_pools_tags[var.node_pools[count.index]["name"]]) + tags = join(",", + var.node_pools_tags["all"], + var.node_pools_tags[var.node_pools[count.index]["name"]] + ) } ) } From 1f95dfb7bb6ef762d9d481953042c9ec87437c0f Mon Sep 17 00:00:00 2001 From: Aaron Sproul Date: Mon, 9 Sep 2019 16:22:41 -0700 Subject: [PATCH 12/82] Create new templated submodule for create_before_destroy lifecycle variation --- README.md | 1 - autogen/cluster.tf | 23 +- autogen/dns.tf | 4 - autogen/main.tf | 4 +- autogen/masq.tf | 1 - autogen/outputs.tf | 1 - autogen/variables.tf | 6 - cluster.tf | 164 +------ dns.tf | 4 - helpers/generate_modules/generate_modules.py | 5 + main.tf | 4 +- masq.tf | 1 - modules/beta-private-cluster/README.md | 1 - modules/beta-private-cluster/cluster.tf | 184 +------- modules/beta-private-cluster/dns.tf | 4 - modules/beta-private-cluster/main.tf | 4 +- modules/beta-private-cluster/masq.tf | 1 - modules/beta-private-cluster/outputs.tf | 1 - modules/beta-private-cluster/variables.tf | 6 - modules/beta-public-cluster/README.md | 1 - modules/beta-public-cluster/cluster.tf | 184 +------- modules/beta-public-cluster/dns.tf | 4 - modules/beta-public-cluster/main.tf | 4 +- modules/beta-public-cluster/masq.tf | 1 - modules/beta-public-cluster/outputs.tf | 1 - modules/beta-public-cluster/variables.tf | 6 - .../README.md | 400 ++++++++++++++++++ .../private-cluster-lifecycle-variant/auth.tf | 34 ++ .../cluster.tf | 303 +++++++++++++ .../private-cluster-lifecycle-variant/dns.tf | 120 ++++++ .../private-cluster-lifecycle-variant/main.tf | 124 ++++++ .../private-cluster-lifecycle-variant/masq.tf | 48 +++ .../networks.tf | 32 ++ .../outputs.tf | 125 ++++++ .../private-cluster-lifecycle-variant/sa.tf | 71 ++++ .../scripts/delete-default-resource.sh | 41 ++ .../scripts/kubectl_wrapper.sh | 53 +++ .../scripts/wait-for-cluster.sh | 33 ++ .../variables.tf | 318 ++++++++++++++ .../versions.tf | 19 + modules/private-cluster/README.md | 1 - modules/private-cluster/cluster.tf | 164 +------ modules/private-cluster/dns.tf | 4 - modules/private-cluster/main.tf | 4 +- modules/private-cluster/masq.tf | 1 - modules/private-cluster/outputs.tf | 1 - modules/private-cluster/variables.tf | 6 - outputs.tf | 1 - variables.tf | 6 - 49 files changed, 1758 insertions(+), 771 deletions(-) create mode 100644 modules/private-cluster-lifecycle-variant/README.md create mode 100644 modules/private-cluster-lifecycle-variant/auth.tf create mode 100644 modules/private-cluster-lifecycle-variant/cluster.tf create mode 100644 modules/private-cluster-lifecycle-variant/dns.tf create mode 100644 modules/private-cluster-lifecycle-variant/main.tf create mode 100644 modules/private-cluster-lifecycle-variant/masq.tf create mode 100644 modules/private-cluster-lifecycle-variant/networks.tf create mode 100644 modules/private-cluster-lifecycle-variant/outputs.tf create mode 100644 modules/private-cluster-lifecycle-variant/sa.tf create mode 100755 modules/private-cluster-lifecycle-variant/scripts/delete-default-resource.sh create mode 100755 modules/private-cluster-lifecycle-variant/scripts/kubectl_wrapper.sh create mode 100755 modules/private-cluster-lifecycle-variant/scripts/wait-for-cluster.sh create mode 100644 modules/private-cluster-lifecycle-variant/variables.tf create mode 100644 modules/private-cluster-lifecycle-variant/versions.tf diff --git a/README.md b/README.md index 40b3f02e86..923d3f7a09 100644 --- a/README.md +++ b/README.md @@ -158,7 +158,6 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | network\_policy\_provider | The network policy provider. | string | `"CALICO"` | no | | network\_project\_id | The project ID of the shared VPC's host (for shared vpc support) | string | `""` | no | | node\_pools | List of maps containing node pools | list(map(string)) | `` | no | -| node\_pools\_create\_before\_destroy | Create a new node pool, then destroy the old node pool. Default behavior is destroy node pool then recreate it | bool | `"false"` | no | | node\_pools\_labels | Map of maps containing node labels by node-pool name | map(map(string)) | `` | no | | node\_pools\_metadata | Map of maps containing node metadata by node-pool name | map(map(string)) | `` | no | | node\_pools\_oauth\_scopes | Map of lists containing node oauth scopes by node-pool name | map(list(string)) | `` | no | diff --git a/autogen/cluster.tf b/autogen/cluster.tf index af65747aef..69815c8ee4 100644 --- a/autogen/cluster.tf +++ b/autogen/cluster.tf @@ -219,6 +219,7 @@ resource "google_container_cluster" "primary" { /****************************************** Create Container Cluster node pools *****************************************/ +{% if lifecycle_variant %} locals { force_node_pool_recreation_resources = [ "disk_size_gb", @@ -233,7 +234,7 @@ locals { } resource "random_id" "name" { - count = var.node_pools_create_before_destroy ? length(var.node_pools) : 0 + count = length(var.node_pools) byte_length = 2 prefix = format("%s-", lookup(var.node_pools[count.index], "name")) keepers = merge( @@ -272,23 +273,19 @@ resource "random_id" "name" { ) } -{% for create_before_destroy_value in ["false", "true"] %} -{% if create_before_destroy_value == "false" %} -resource "google_container_node_pool" "pools" { -{% else %} -resource "google_container_node_pool" "pools_lifecycle_variant" { {% endif %} +resource "google_container_node_pool" "pools" { {% if beta_cluster %} provider = google-beta {% else %} provider = google {% endif %} - {% if create_before_destroy_value == "true" %} - count = var.node_pools_create_before_destroy ? length(var.node_pools) : 0 + count = length(var.node_pools) + {% if lifecycle_variant %} + name = random_id.name.*.hex[count.index] {% else %} - count = var.node_pools_create_before_destroy ? 0 : length(var.node_pools) + name = lookup(var.node_pools[count.index], "name") {% endif %} - name = var.node_pools_create_before_destroy ? random_id.name.*.hex[count.index] : lookup(var.node_pools[count.index], "name") project = var.project_id location = local.location cluster = google_container_cluster.primary.name @@ -404,7 +401,9 @@ resource "google_container_node_pool" "pools_lifecycle_variant" { lifecycle { ignore_changes = [initial_node_count] - create_before_destroy = {{ create_before_destroy_value }} + {% if lifecycle_variant %} + create_before_destroy = true + {% endif %} } timeouts { @@ -414,7 +413,6 @@ resource "google_container_node_pool" "pools_lifecycle_variant" { } } -{% endfor %} resource "null_resource" "wait_for_cluster" { provisioner "local-exec" { @@ -429,6 +427,5 @@ resource "null_resource" "wait_for_cluster" { depends_on = [ google_container_cluster.primary, google_container_node_pool.pools, - google_container_node_pool.pools_lifecycle_variant, ] } diff --git a/autogen/dns.tf b/autogen/dns.tf index 106fad4b70..d9d4a35395 100644 --- a/autogen/dns.tf +++ b/autogen/dns.tf @@ -30,7 +30,6 @@ resource "null_resource" "delete_default_kube_dns_configmap" { data.google_client_config.default, google_container_cluster.primary, google_container_node_pool.pools, - google_container_node_pool.pools_lifecycle_variant, ] } @@ -60,7 +59,6 @@ EOF data.google_client_config.default, google_container_cluster.primary, google_container_node_pool.pools, - google_container_node_pool.pools_lifecycle_variant, ] } @@ -88,7 +86,6 @@ EOF data.google_client_config.default, google_container_cluster.primary, google_container_node_pool.pools, - google_container_node_pool.pools_lifecycle_variant, ] } @@ -119,6 +116,5 @@ EOF data.google_client_config.default, google_container_cluster.primary, google_container_node_pool.pools, - google_container_node_pool.pools_lifecycle_variant, ] } diff --git a/autogen/main.tf b/autogen/main.tf index ea20c3baaf..0f5b0edc06 100644 --- a/autogen/main.tf +++ b/autogen/main.tf @@ -112,8 +112,8 @@ locals { # /BETA features {% endif %} - cluster_output_node_pools_names = concat(google_container_node_pool.pools.*.name, google_container_node_pool.pools_lifecycle_variant.*.name, [""]) - cluster_output_node_pools_versions = concat(google_container_node_pool.pools.*.version, google_container_node_pool.pools_lifecycle_variant.*.version, [""]) + cluster_output_node_pools_names = concat(google_container_node_pool.pools.*.name, [""]) + cluster_output_node_pools_versions = concat(google_container_node_pool.pools.*.version, [""]) cluster_master_auth_list_layer1 = local.cluster_output_master_auth cluster_master_auth_list_layer2 = local.cluster_master_auth_list_layer1[0] diff --git a/autogen/masq.tf b/autogen/masq.tf index 680a778179..6deab757c3 100644 --- a/autogen/masq.tf +++ b/autogen/masq.tf @@ -44,6 +44,5 @@ EOF data.google_client_config.default, google_container_cluster.primary, google_container_node_pool.pools, - google_container_node_pool.pools_lifecycle_variant, ] } diff --git a/autogen/outputs.tf b/autogen/outputs.tf index 8d0ddb9486..ff8eab1bef 100644 --- a/autogen/outputs.tf +++ b/autogen/outputs.tf @@ -54,7 +54,6 @@ output "endpoint" { */ google_container_cluster.primary, google_container_node_pool.pools, - google_container_node_pool.pools_lifecycle_variant, ] } diff --git a/autogen/variables.tf b/autogen/variables.tf index 2877bfbd4c..16c2955ced 100644 --- a/autogen/variables.tf +++ b/autogen/variables.tf @@ -210,12 +210,6 @@ variable "node_pools_oauth_scopes" { } } -variable "node_pools_create_before_destroy" { - type = bool - description = "Create a new node pool, then destroy the old node pool. Default behavior is destroy node pool then recreate it" - default = false -} - variable "stub_domains" { type = map(list(string)) description = "Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server" diff --git a/cluster.tf b/cluster.tf index 3701fe0a22..c82253c7cd 100644 --- a/cluster.tf +++ b/cluster.tf @@ -124,166 +124,10 @@ resource "google_container_cluster" "primary" { /****************************************** Create Container Cluster node pools *****************************************/ -locals { - force_node_pool_recreation_resources = [ - "disk_size_gb", - "disk_type", - "accelerator_count", - "accelerator_type", - "local_ssd_count", - "machine_type", - "preemptible", - "service_account", - ] -} - -resource "random_id" "name" { - count = var.node_pools_create_before_destroy ? length(var.node_pools) : 0 - byte_length = 2 - prefix = format("%s-", lookup(var.node_pools[count.index], "name")) - keepers = merge( - zipmap( - local.force_node_pool_recreation_resources, - [for keeper in local.force_node_pool_recreation_resources : lookup(var.node_pools[count.index], keeper, "")] - ), - { - labels = join(",", - keys(var.node_pools_labels["all"]), - keys(var.node_pools_labels[var.node_pools[count.index]["name"]]), - values(var.node_pools_labels["all"]), - values(var.node_pools_labels[var.node_pools[count.index]["name"]]) - ) - }, - { - metadata = join(",", - keys(var.node_pools_metadata["all"]), - keys(var.node_pools_metadata[var.node_pools[count.index]["name"]]), - values(var.node_pools_metadata["all"]), - values(var.node_pools_metadata[var.node_pools[count.index]["name"]]) - ) - }, - { - oauth_scopes = join(",", - var.node_pools_oauth_scopes["all"], - var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]] - ) - }, - { - tags = join(",", - var.node_pools_tags["all"], - var.node_pools_tags[var.node_pools[count.index]["name"]] - ) - } - ) -} - resource "google_container_node_pool" "pools" { provider = google - count = var.node_pools_create_before_destroy ? 0 : length(var.node_pools) - name = var.node_pools_create_before_destroy ? random_id.name.*.hex[count.index] : lookup(var.node_pools[count.index], "name") - project = var.project_id - location = local.location - cluster = google_container_cluster.primary.name - version = lookup(var.node_pools[count.index], "auto_upgrade", false) ? "" : lookup( - var.node_pools[count.index], - "version", - local.node_version, - ) - initial_node_count = lookup( - var.node_pools[count.index], - "initial_node_count", - lookup(var.node_pools[count.index], "min_count", 1), - ) - - node_count = lookup(var.node_pools[count.index], "autoscaling", true) ? null : lookup(var.node_pools[count.index], "min_count", 1) - - dynamic "autoscaling" { - for_each = lookup(var.node_pools[count.index], "autoscaling", true) ? [var.node_pools[count.index]] : [] - content { - min_node_count = lookup(autoscaling.value, "min_count", 1) - max_node_count = lookup(autoscaling.value, "max_count", 100) - } - } - - management { - auto_repair = lookup(var.node_pools[count.index], "auto_repair", true) - auto_upgrade = lookup(var.node_pools[count.index], "auto_upgrade", local.default_auto_upgrade) - } - - node_config { - image_type = lookup(var.node_pools[count.index], "image_type", "COS") - machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2") - labels = merge( - { - "cluster_name" = var.name - }, - { - "node_pool" = var.node_pools[count.index]["name"] - }, - var.node_pools_labels["all"], - var.node_pools_labels[var.node_pools[count.index]["name"]], - ) - metadata = merge( - { - "cluster_name" = var.name - }, - { - "node_pool" = var.node_pools[count.index]["name"] - }, - var.node_pools_metadata["all"], - var.node_pools_metadata[var.node_pools[count.index]["name"]], - { - "disable-legacy-endpoints" = var.disable_legacy_metadata_endpoints - }, - ) - tags = concat( - ["gke-${var.name}"], - ["gke-${var.name}-${var.node_pools[count.index]["name"]}"], - var.node_pools_tags["all"], - var.node_pools_tags[var.node_pools[count.index]["name"]], - ) - - disk_size_gb = lookup(var.node_pools[count.index], "disk_size_gb", 100) - disk_type = lookup(var.node_pools[count.index], "disk_type", "pd-standard") - service_account = lookup( - var.node_pools[count.index], - "service_account", - local.service_account, - ) - preemptible = lookup(var.node_pools[count.index], "preemptible", false) - - oauth_scopes = concat( - var.node_pools_oauth_scopes["all"], - var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]], - ) - - guest_accelerator = [ - for guest_accelerator in lookup(var.node_pools[count.index], "accelerator_count", 0) > 0 ? [{ - type = lookup(var.node_pools[count.index], "accelerator_type", "") - count = lookup(var.node_pools[count.index], "accelerator_count", 0) - }] : [] : { - type = guest_accelerator["type"] - count = guest_accelerator["count"] - } - ] - } - - lifecycle { - ignore_changes = [initial_node_count] - create_before_destroy = false - } - - timeouts { - create = "30m" - update = "30m" - delete = "30m" - } -} - -resource "google_container_node_pool" "pools_lifecycle_variant" { - provider = google - count = var.node_pools_create_before_destroy ? length(var.node_pools) : 0 - name = var.node_pools_create_before_destroy ? random_id.name.*.hex[count.index] : lookup(var.node_pools[count.index], "name") + count = length(var.node_pools) + name = lookup(var.node_pools[count.index], "name") project = var.project_id location = local.location cluster = google_container_cluster.primary.name @@ -372,8 +216,7 @@ resource "google_container_node_pool" "pools_lifecycle_variant" { } lifecycle { - ignore_changes = [initial_node_count] - create_before_destroy = true + ignore_changes = [initial_node_count] } timeouts { @@ -397,6 +240,5 @@ resource "null_resource" "wait_for_cluster" { depends_on = [ google_container_cluster.primary, google_container_node_pool.pools, - google_container_node_pool.pools_lifecycle_variant, ] } diff --git a/dns.tf b/dns.tf index fbd840285b..b240a23e65 100644 --- a/dns.tf +++ b/dns.tf @@ -30,7 +30,6 @@ resource "null_resource" "delete_default_kube_dns_configmap" { data.google_client_config.default, google_container_cluster.primary, google_container_node_pool.pools, - google_container_node_pool.pools_lifecycle_variant, ] } @@ -60,7 +59,6 @@ EOF data.google_client_config.default, google_container_cluster.primary, google_container_node_pool.pools, - google_container_node_pool.pools_lifecycle_variant, ] } @@ -88,7 +86,6 @@ EOF data.google_client_config.default, google_container_cluster.primary, google_container_node_pool.pools, - google_container_node_pool.pools_lifecycle_variant, ] } @@ -119,6 +116,5 @@ EOF data.google_client_config.default, google_container_cluster.primary, google_container_node_pool.pools, - google_container_node_pool.pools_lifecycle_variant, ] } diff --git a/helpers/generate_modules/generate_modules.py b/helpers/generate_modules/generate_modules.py index c235e7ad65..1ae841cde1 100755 --- a/helpers/generate_modules/generate_modules.py +++ b/helpers/generate_modules/generate_modules.py @@ -54,6 +54,11 @@ def template_options(self, base): 'private_cluster': True, 'beta_cluster': True, }), + Module("./modules/private-cluster-lifecycle-variant", { + 'module_path': '//modules/private-cluster-lifecycle-variant', + 'private_cluster': True, + 'lifecycle_variant': True, + }), Module("./modules/beta-public-cluster", { 'module_path': '//modules/beta-public-cluster', 'private_cluster': False, diff --git a/main.tf b/main.tf index c01f2a6f81..b63d60f884 100644 --- a/main.tf +++ b/main.tf @@ -81,8 +81,8 @@ locals { cluster_output_kubernetes_dashboard_enabled = google_container_cluster.primary.addons_config.0.kubernetes_dashboard.0.disabled - cluster_output_node_pools_names = concat(google_container_node_pool.pools.*.name, google_container_node_pool.pools_lifecycle_variant.*.name, [""]) - cluster_output_node_pools_versions = concat(google_container_node_pool.pools.*.version, google_container_node_pool.pools_lifecycle_variant.*.version, [""]) + cluster_output_node_pools_names = concat(google_container_node_pool.pools.*.name, [""]) + cluster_output_node_pools_versions = concat(google_container_node_pool.pools.*.version, [""]) cluster_master_auth_list_layer1 = local.cluster_output_master_auth cluster_master_auth_list_layer2 = local.cluster_master_auth_list_layer1[0] diff --git a/masq.tf b/masq.tf index 2f351c8c2e..b6e411fc42 100644 --- a/masq.tf +++ b/masq.tf @@ -44,6 +44,5 @@ EOF data.google_client_config.default, google_container_cluster.primary, google_container_node_pool.pools, - google_container_node_pool.pools_lifecycle_variant, ] } diff --git a/modules/beta-private-cluster/README.md b/modules/beta-private-cluster/README.md index e8bb4c72a3..dbc69c4d14 100644 --- a/modules/beta-private-cluster/README.md +++ b/modules/beta-private-cluster/README.md @@ -179,7 +179,6 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | network\_project\_id | The project ID of the shared VPC's host (for shared vpc support) | string | `""` | no | | node\_metadata | Specifies how node metadata is exposed to the workload running on the node | string | `"UNSPECIFIED"` | no | | node\_pools | List of maps containing node pools | list(map(string)) | `` | no | -| node\_pools\_create\_before\_destroy | Create a new node pool, then destroy the old node pool. Default behavior is destroy node pool then recreate it | bool | `"false"` | no | | node\_pools\_labels | Map of maps containing node labels by node-pool name | map(map(string)) | `` | no | | node\_pools\_metadata | Map of maps containing node metadata by node-pool name | map(map(string)) | `` | no | | node\_pools\_oauth\_scopes | Map of lists containing node oauth scopes by node-pool name | map(list(string)) | `` | no | diff --git a/modules/beta-private-cluster/cluster.tf b/modules/beta-private-cluster/cluster.tf index 502649d3e2..6469f2f1f2 100644 --- a/modules/beta-private-cluster/cluster.tf +++ b/modules/beta-private-cluster/cluster.tf @@ -205,186 +205,10 @@ resource "google_container_cluster" "primary" { /****************************************** Create Container Cluster node pools *****************************************/ -locals { - force_node_pool_recreation_resources = [ - "disk_size_gb", - "disk_type", - "accelerator_count", - "accelerator_type", - "local_ssd_count", - "machine_type", - "preemptible", - "service_account", - ] -} - -resource "random_id" "name" { - count = var.node_pools_create_before_destroy ? length(var.node_pools) : 0 - byte_length = 2 - prefix = format("%s-", lookup(var.node_pools[count.index], "name")) - keepers = merge( - zipmap( - local.force_node_pool_recreation_resources, - [for keeper in local.force_node_pool_recreation_resources : lookup(var.node_pools[count.index], keeper, "")] - ), - { - labels = join(",", - keys(var.node_pools_labels["all"]), - keys(var.node_pools_labels[var.node_pools[count.index]["name"]]), - values(var.node_pools_labels["all"]), - values(var.node_pools_labels[var.node_pools[count.index]["name"]]) - ) - }, - { - metadata = join(",", - keys(var.node_pools_metadata["all"]), - keys(var.node_pools_metadata[var.node_pools[count.index]["name"]]), - values(var.node_pools_metadata["all"]), - values(var.node_pools_metadata[var.node_pools[count.index]["name"]]) - ) - }, - { - oauth_scopes = join(",", - var.node_pools_oauth_scopes["all"], - var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]] - ) - }, - { - tags = join(",", - var.node_pools_tags["all"], - var.node_pools_tags[var.node_pools[count.index]["name"]] - ) - } - ) -} - resource "google_container_node_pool" "pools" { provider = google-beta - count = var.node_pools_create_before_destroy ? 0 : length(var.node_pools) - name = var.node_pools_create_before_destroy ? random_id.name.*.hex[count.index] : lookup(var.node_pools[count.index], "name") - project = var.project_id - location = local.location - cluster = google_container_cluster.primary.name - version = lookup(var.node_pools[count.index], "auto_upgrade", false) ? "" : lookup( - var.node_pools[count.index], - "version", - local.node_version, - ) - initial_node_count = lookup( - var.node_pools[count.index], - "initial_node_count", - lookup(var.node_pools[count.index], "min_count", 1), - ) - max_pods_per_node = lookup(var.node_pools[count.index], "max_pods_per_node", null) - - node_count = lookup(var.node_pools[count.index], "autoscaling", true) ? null : lookup(var.node_pools[count.index], "min_count", 1) - - dynamic "autoscaling" { - for_each = lookup(var.node_pools[count.index], "autoscaling", true) ? [var.node_pools[count.index]] : [] - content { - min_node_count = lookup(autoscaling.value, "min_count", 1) - max_node_count = lookup(autoscaling.value, "max_count", 100) - } - } - - management { - auto_repair = lookup(var.node_pools[count.index], "auto_repair", true) - auto_upgrade = lookup(var.node_pools[count.index], "auto_upgrade", local.default_auto_upgrade) - } - - node_config { - image_type = lookup(var.node_pools[count.index], "image_type", "COS") - machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2") - labels = merge( - { - "cluster_name" = var.name - }, - { - "node_pool" = var.node_pools[count.index]["name"] - }, - var.node_pools_labels["all"], - var.node_pools_labels[var.node_pools[count.index]["name"]], - ) - metadata = merge( - { - "cluster_name" = var.name - }, - { - "node_pool" = var.node_pools[count.index]["name"] - }, - var.node_pools_metadata["all"], - var.node_pools_metadata[var.node_pools[count.index]["name"]], - { - "disable-legacy-endpoints" = var.disable_legacy_metadata_endpoints - }, - ) - dynamic "taint" { - for_each = concat( - var.node_pools_taints["all"], - var.node_pools_taints[var.node_pools[count.index]["name"]], - ) - content { - effect = taint.value.effect - key = taint.value.key - value = taint.value.value - } - } - tags = concat( - ["gke-${var.name}"], - ["gke-${var.name}-${var.node_pools[count.index]["name"]}"], - var.node_pools_tags["all"], - var.node_pools_tags[var.node_pools[count.index]["name"]], - ) - - disk_size_gb = lookup(var.node_pools[count.index], "disk_size_gb", 100) - disk_type = lookup(var.node_pools[count.index], "disk_type", "pd-standard") - service_account = lookup( - var.node_pools[count.index], - "service_account", - local.service_account, - ) - preemptible = lookup(var.node_pools[count.index], "preemptible", false) - - oauth_scopes = concat( - var.node_pools_oauth_scopes["all"], - var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]], - ) - - guest_accelerator = [ - for guest_accelerator in lookup(var.node_pools[count.index], "accelerator_count", 0) > 0 ? [{ - type = lookup(var.node_pools[count.index], "accelerator_type", "") - count = lookup(var.node_pools[count.index], "accelerator_count", 0) - }] : [] : { - type = guest_accelerator["type"] - count = guest_accelerator["count"] - } - ] - - dynamic "workload_metadata_config" { - for_each = local.cluster_node_metadata_config - - content { - node_metadata = workload_metadata_config.value.node_metadata - } - } - } - - lifecycle { - ignore_changes = [initial_node_count] - create_before_destroy = false - } - - timeouts { - create = "30m" - update = "30m" - delete = "30m" - } -} - -resource "google_container_node_pool" "pools_lifecycle_variant" { - provider = google-beta - count = var.node_pools_create_before_destroy ? length(var.node_pools) : 0 - name = var.node_pools_create_before_destroy ? random_id.name.*.hex[count.index] : lookup(var.node_pools[count.index], "name") + count = length(var.node_pools) + name = lookup(var.node_pools[count.index], "name") project = var.project_id location = local.location cluster = google_container_cluster.primary.name @@ -493,8 +317,7 @@ resource "google_container_node_pool" "pools_lifecycle_variant" { } lifecycle { - ignore_changes = [initial_node_count] - create_before_destroy = true + ignore_changes = [initial_node_count] } timeouts { @@ -518,6 +341,5 @@ resource "null_resource" "wait_for_cluster" { depends_on = [ google_container_cluster.primary, google_container_node_pool.pools, - google_container_node_pool.pools_lifecycle_variant, ] } diff --git a/modules/beta-private-cluster/dns.tf b/modules/beta-private-cluster/dns.tf index fbd840285b..b240a23e65 100644 --- a/modules/beta-private-cluster/dns.tf +++ b/modules/beta-private-cluster/dns.tf @@ -30,7 +30,6 @@ resource "null_resource" "delete_default_kube_dns_configmap" { data.google_client_config.default, google_container_cluster.primary, google_container_node_pool.pools, - google_container_node_pool.pools_lifecycle_variant, ] } @@ -60,7 +59,6 @@ EOF data.google_client_config.default, google_container_cluster.primary, google_container_node_pool.pools, - google_container_node_pool.pools_lifecycle_variant, ] } @@ -88,7 +86,6 @@ EOF data.google_client_config.default, google_container_cluster.primary, google_container_node_pool.pools, - google_container_node_pool.pools_lifecycle_variant, ] } @@ -119,6 +116,5 @@ EOF data.google_client_config.default, google_container_cluster.primary, google_container_node_pool.pools, - google_container_node_pool.pools_lifecycle_variant, ] } diff --git a/modules/beta-private-cluster/main.tf b/modules/beta-private-cluster/main.tf index 2e70e18f65..1199f2dfa6 100644 --- a/modules/beta-private-cluster/main.tf +++ b/modules/beta-private-cluster/main.tf @@ -100,8 +100,8 @@ locals { # /BETA features - cluster_output_node_pools_names = concat(google_container_node_pool.pools.*.name, google_container_node_pool.pools_lifecycle_variant.*.name, [""]) - cluster_output_node_pools_versions = concat(google_container_node_pool.pools.*.version, google_container_node_pool.pools_lifecycle_variant.*.version, [""]) + cluster_output_node_pools_names = concat(google_container_node_pool.pools.*.name, [""]) + cluster_output_node_pools_versions = concat(google_container_node_pool.pools.*.version, [""]) cluster_master_auth_list_layer1 = local.cluster_output_master_auth cluster_master_auth_list_layer2 = local.cluster_master_auth_list_layer1[0] diff --git a/modules/beta-private-cluster/masq.tf b/modules/beta-private-cluster/masq.tf index 2f351c8c2e..b6e411fc42 100644 --- a/modules/beta-private-cluster/masq.tf +++ b/modules/beta-private-cluster/masq.tf @@ -44,6 +44,5 @@ EOF data.google_client_config.default, google_container_cluster.primary, google_container_node_pool.pools, - google_container_node_pool.pools_lifecycle_variant, ] } diff --git a/modules/beta-private-cluster/outputs.tf b/modules/beta-private-cluster/outputs.tf index 915b9cd060..4153960069 100644 --- a/modules/beta-private-cluster/outputs.tf +++ b/modules/beta-private-cluster/outputs.tf @@ -54,7 +54,6 @@ output "endpoint" { */ google_container_cluster.primary, google_container_node_pool.pools, - google_container_node_pool.pools_lifecycle_variant, ] } diff --git a/modules/beta-private-cluster/variables.tf b/modules/beta-private-cluster/variables.tf index d57bba8830..975fe7a173 100644 --- a/modules/beta-private-cluster/variables.tf +++ b/modules/beta-private-cluster/variables.tf @@ -208,12 +208,6 @@ variable "node_pools_oauth_scopes" { } } -variable "node_pools_create_before_destroy" { - type = bool - description = "Create a new node pool, then destroy the old node pool. Default behavior is destroy node pool then recreate it" - default = false -} - variable "stub_domains" { type = map(list(string)) description = "Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server" diff --git a/modules/beta-public-cluster/README.md b/modules/beta-public-cluster/README.md index 09aa7beb54..db41d10821 100644 --- a/modules/beta-public-cluster/README.md +++ b/modules/beta-public-cluster/README.md @@ -170,7 +170,6 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | network\_project\_id | The project ID of the shared VPC's host (for shared vpc support) | string | `""` | no | | node\_metadata | Specifies how node metadata is exposed to the workload running on the node | string | `"UNSPECIFIED"` | no | | node\_pools | List of maps containing node pools | list(map(string)) | `` | no | -| node\_pools\_create\_before\_destroy | Create a new node pool, then destroy the old node pool. Default behavior is destroy node pool then recreate it | bool | `"false"` | no | | node\_pools\_labels | Map of maps containing node labels by node-pool name | map(map(string)) | `` | no | | node\_pools\_metadata | Map of maps containing node metadata by node-pool name | map(map(string)) | `` | no | | node\_pools\_oauth\_scopes | Map of lists containing node oauth scopes by node-pool name | map(list(string)) | `` | no | diff --git a/modules/beta-public-cluster/cluster.tf b/modules/beta-public-cluster/cluster.tf index bde673a896..5d7bf9d158 100644 --- a/modules/beta-public-cluster/cluster.tf +++ b/modules/beta-public-cluster/cluster.tf @@ -200,186 +200,10 @@ resource "google_container_cluster" "primary" { /****************************************** Create Container Cluster node pools *****************************************/ -locals { - force_node_pool_recreation_resources = [ - "disk_size_gb", - "disk_type", - "accelerator_count", - "accelerator_type", - "local_ssd_count", - "machine_type", - "preemptible", - "service_account", - ] -} - -resource "random_id" "name" { - count = var.node_pools_create_before_destroy ? length(var.node_pools) : 0 - byte_length = 2 - prefix = format("%s-", lookup(var.node_pools[count.index], "name")) - keepers = merge( - zipmap( - local.force_node_pool_recreation_resources, - [for keeper in local.force_node_pool_recreation_resources : lookup(var.node_pools[count.index], keeper, "")] - ), - { - labels = join(",", - keys(var.node_pools_labels["all"]), - keys(var.node_pools_labels[var.node_pools[count.index]["name"]]), - values(var.node_pools_labels["all"]), - values(var.node_pools_labels[var.node_pools[count.index]["name"]]) - ) - }, - { - metadata = join(",", - keys(var.node_pools_metadata["all"]), - keys(var.node_pools_metadata[var.node_pools[count.index]["name"]]), - values(var.node_pools_metadata["all"]), - values(var.node_pools_metadata[var.node_pools[count.index]["name"]]) - ) - }, - { - oauth_scopes = join(",", - var.node_pools_oauth_scopes["all"], - var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]] - ) - }, - { - tags = join(",", - var.node_pools_tags["all"], - var.node_pools_tags[var.node_pools[count.index]["name"]] - ) - } - ) -} - resource "google_container_node_pool" "pools" { provider = google-beta - count = var.node_pools_create_before_destroy ? 0 : length(var.node_pools) - name = var.node_pools_create_before_destroy ? random_id.name.*.hex[count.index] : lookup(var.node_pools[count.index], "name") - project = var.project_id - location = local.location - cluster = google_container_cluster.primary.name - version = lookup(var.node_pools[count.index], "auto_upgrade", false) ? "" : lookup( - var.node_pools[count.index], - "version", - local.node_version, - ) - initial_node_count = lookup( - var.node_pools[count.index], - "initial_node_count", - lookup(var.node_pools[count.index], "min_count", 1), - ) - max_pods_per_node = lookup(var.node_pools[count.index], "max_pods_per_node", null) - - node_count = lookup(var.node_pools[count.index], "autoscaling", true) ? null : lookup(var.node_pools[count.index], "min_count", 1) - - dynamic "autoscaling" { - for_each = lookup(var.node_pools[count.index], "autoscaling", true) ? [var.node_pools[count.index]] : [] - content { - min_node_count = lookup(autoscaling.value, "min_count", 1) - max_node_count = lookup(autoscaling.value, "max_count", 100) - } - } - - management { - auto_repair = lookup(var.node_pools[count.index], "auto_repair", true) - auto_upgrade = lookup(var.node_pools[count.index], "auto_upgrade", local.default_auto_upgrade) - } - - node_config { - image_type = lookup(var.node_pools[count.index], "image_type", "COS") - machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2") - labels = merge( - { - "cluster_name" = var.name - }, - { - "node_pool" = var.node_pools[count.index]["name"] - }, - var.node_pools_labels["all"], - var.node_pools_labels[var.node_pools[count.index]["name"]], - ) - metadata = merge( - { - "cluster_name" = var.name - }, - { - "node_pool" = var.node_pools[count.index]["name"] - }, - var.node_pools_metadata["all"], - var.node_pools_metadata[var.node_pools[count.index]["name"]], - { - "disable-legacy-endpoints" = var.disable_legacy_metadata_endpoints - }, - ) - dynamic "taint" { - for_each = concat( - var.node_pools_taints["all"], - var.node_pools_taints[var.node_pools[count.index]["name"]], - ) - content { - effect = taint.value.effect - key = taint.value.key - value = taint.value.value - } - } - tags = concat( - ["gke-${var.name}"], - ["gke-${var.name}-${var.node_pools[count.index]["name"]}"], - var.node_pools_tags["all"], - var.node_pools_tags[var.node_pools[count.index]["name"]], - ) - - disk_size_gb = lookup(var.node_pools[count.index], "disk_size_gb", 100) - disk_type = lookup(var.node_pools[count.index], "disk_type", "pd-standard") - service_account = lookup( - var.node_pools[count.index], - "service_account", - local.service_account, - ) - preemptible = lookup(var.node_pools[count.index], "preemptible", false) - - oauth_scopes = concat( - var.node_pools_oauth_scopes["all"], - var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]], - ) - - guest_accelerator = [ - for guest_accelerator in lookup(var.node_pools[count.index], "accelerator_count", 0) > 0 ? [{ - type = lookup(var.node_pools[count.index], "accelerator_type", "") - count = lookup(var.node_pools[count.index], "accelerator_count", 0) - }] : [] : { - type = guest_accelerator["type"] - count = guest_accelerator["count"] - } - ] - - dynamic "workload_metadata_config" { - for_each = local.cluster_node_metadata_config - - content { - node_metadata = workload_metadata_config.value.node_metadata - } - } - } - - lifecycle { - ignore_changes = [initial_node_count] - create_before_destroy = false - } - - timeouts { - create = "30m" - update = "30m" - delete = "30m" - } -} - -resource "google_container_node_pool" "pools_lifecycle_variant" { - provider = google-beta - count = var.node_pools_create_before_destroy ? length(var.node_pools) : 0 - name = var.node_pools_create_before_destroy ? random_id.name.*.hex[count.index] : lookup(var.node_pools[count.index], "name") + count = length(var.node_pools) + name = lookup(var.node_pools[count.index], "name") project = var.project_id location = local.location cluster = google_container_cluster.primary.name @@ -488,8 +312,7 @@ resource "google_container_node_pool" "pools_lifecycle_variant" { } lifecycle { - ignore_changes = [initial_node_count] - create_before_destroy = true + ignore_changes = [initial_node_count] } timeouts { @@ -513,6 +336,5 @@ resource "null_resource" "wait_for_cluster" { depends_on = [ google_container_cluster.primary, google_container_node_pool.pools, - google_container_node_pool.pools_lifecycle_variant, ] } diff --git a/modules/beta-public-cluster/dns.tf b/modules/beta-public-cluster/dns.tf index fbd840285b..b240a23e65 100644 --- a/modules/beta-public-cluster/dns.tf +++ b/modules/beta-public-cluster/dns.tf @@ -30,7 +30,6 @@ resource "null_resource" "delete_default_kube_dns_configmap" { data.google_client_config.default, google_container_cluster.primary, google_container_node_pool.pools, - google_container_node_pool.pools_lifecycle_variant, ] } @@ -60,7 +59,6 @@ EOF data.google_client_config.default, google_container_cluster.primary, google_container_node_pool.pools, - google_container_node_pool.pools_lifecycle_variant, ] } @@ -88,7 +86,6 @@ EOF data.google_client_config.default, google_container_cluster.primary, google_container_node_pool.pools, - google_container_node_pool.pools_lifecycle_variant, ] } @@ -119,6 +116,5 @@ EOF data.google_client_config.default, google_container_cluster.primary, google_container_node_pool.pools, - google_container_node_pool.pools_lifecycle_variant, ] } diff --git a/modules/beta-public-cluster/main.tf b/modules/beta-public-cluster/main.tf index f6e39e549f..e4e7548a3a 100644 --- a/modules/beta-public-cluster/main.tf +++ b/modules/beta-public-cluster/main.tf @@ -100,8 +100,8 @@ locals { # /BETA features - cluster_output_node_pools_names = concat(google_container_node_pool.pools.*.name, google_container_node_pool.pools_lifecycle_variant.*.name, [""]) - cluster_output_node_pools_versions = concat(google_container_node_pool.pools.*.version, google_container_node_pool.pools_lifecycle_variant.*.version, [""]) + cluster_output_node_pools_names = concat(google_container_node_pool.pools.*.name, [""]) + cluster_output_node_pools_versions = concat(google_container_node_pool.pools.*.version, [""]) cluster_master_auth_list_layer1 = local.cluster_output_master_auth cluster_master_auth_list_layer2 = local.cluster_master_auth_list_layer1[0] diff --git a/modules/beta-public-cluster/masq.tf b/modules/beta-public-cluster/masq.tf index 2f351c8c2e..b6e411fc42 100644 --- a/modules/beta-public-cluster/masq.tf +++ b/modules/beta-public-cluster/masq.tf @@ -44,6 +44,5 @@ EOF data.google_client_config.default, google_container_cluster.primary, google_container_node_pool.pools, - google_container_node_pool.pools_lifecycle_variant, ] } diff --git a/modules/beta-public-cluster/outputs.tf b/modules/beta-public-cluster/outputs.tf index 915b9cd060..4153960069 100644 --- a/modules/beta-public-cluster/outputs.tf +++ b/modules/beta-public-cluster/outputs.tf @@ -54,7 +54,6 @@ output "endpoint" { */ google_container_cluster.primary, google_container_node_pool.pools, - google_container_node_pool.pools_lifecycle_variant, ] } diff --git a/modules/beta-public-cluster/variables.tf b/modules/beta-public-cluster/variables.tf index 53d98b7e02..850f38ea83 100644 --- a/modules/beta-public-cluster/variables.tf +++ b/modules/beta-public-cluster/variables.tf @@ -208,12 +208,6 @@ variable "node_pools_oauth_scopes" { } } -variable "node_pools_create_before_destroy" { - type = bool - description = "Create a new node pool, then destroy the old node pool. Default behavior is destroy node pool then recreate it" - default = false -} - variable "stub_domains" { type = map(list(string)) description = "Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server" diff --git a/modules/private-cluster-lifecycle-variant/README.md b/modules/private-cluster-lifecycle-variant/README.md new file mode 100644 index 0000000000..779b021aac --- /dev/null +++ b/modules/private-cluster-lifecycle-variant/README.md @@ -0,0 +1,400 @@ +# Terraform Kubernetes Engine Module + +This module handles opinionated Google Cloud Platform Kubernetes Engine cluster creation and configuration with Node Pools, IP MASQ, Network Policy, etc. This particular submodule creates a [private cluster](https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters) +The resources/services/activations/deletions that this module will create/trigger are: +- Create a GKE cluster with the provided addons +- Create GKE Node Pool(s) with provided configuration and attach to cluster +- Replace the default kube-dns configmap if `stub_domains` are provided +- Activate network policy if `network_policy` is true +- Add `ip-masq-agent` configmap with provided `non_masquerade_cidrs` if `configure_ip_masq` is true + +Sub modules are provided from creating private clusters, beta private clusters, and beta public clusters as well. Beta sub modules allow for the use of various GKE beta features. See the modules directory for the various sub modules. + +**Note**: You must run Terraform from a VM on the same VPC as your cluster, otherwise there will be issues connecting to the GKE master. + + +## Compatibility + +This module is meant for use with Terraform 0.12. If you haven't +[upgraded][terraform-0.12-upgrade] and need a Terraform +0.11.x-compatible version of this module, the last released version +intended for Terraform 0.11.x is [3.0.0]. + +## Usage +There are multiple examples included in the [examples](./examples/) folder but simple usage is as follows: + +```hcl +module "gke" { + source = "terraform-google-modules/kubernetes-engine/google//modules/private-cluster-lifecycle-variant" + project_id = "" + name = "gke-test-1" + region = "us-central1" + zones = ["us-central1-a", "us-central1-b", "us-central1-f"] + network = "vpc-01" + subnetwork = "us-central1-01" + ip_range_pods = "us-central1-01-gke-01-pods" + ip_range_services = "us-central1-01-gke-01-services" + http_load_balancing = false + horizontal_pod_autoscaling = true + kubernetes_dashboard = true + network_policy = true + enable_private_endpoint = true + enable_private_nodes = true + master_ipv4_cidr_block = "10.0.0.0/28" + + node_pools = [ + { + name = "default-node-pool" + machine_type = "n1-standard-2" + min_count = 1 + max_count = 100 + disk_size_gb = 100 + disk_type = "pd-standard" + image_type = "COS" + auto_repair = true + auto_upgrade = true + service_account = "project-service-account@.iam.gserviceaccount.com" + preemptible = false + initial_node_count = 80 + }, + ] + + node_pools_oauth_scopes = { + all = [] + + default-node-pool = [ + "https://www.googleapis.com/auth/cloud-platform", + ] + } + + node_pools_labels = { + all = {} + + default-node-pool = { + default-node-pool = true + } + } + + node_pools_metadata = { + all = {} + + default-node-pool = { + node-pool-metadata-custom-value = "my-node-pool" + } + } + + node_pools_taints = { + all = [] + + default-node-pool = [ + { + key = "default-node-pool" + value = true + effect = "PREFER_NO_SCHEDULE" + }, + ] + } + + node_pools_tags = { + all = [] + + default-node-pool = [ + "default-node-pool", + ] + } +} +``` + + +Then perform the following commands on the root folder: + +- `terraform init` to get the plugins +- `terraform plan` to see the infrastructure plan +- `terraform apply` to apply the infrastructure build +- `terraform destroy` to destroy the built infrastructure + +## Upgrade to v3.0.0 + +v3.0.0 is a breaking release. Refer to the +[Upgrading to v3.0 guide][upgrading-to-v3.0] for details. + +## Upgrade to v2.0.0 + +v2.0.0 is a breaking release. Refer to the +[Upgrading to v2.0 guide][upgrading-to-v2.0] for details. + +## Upgrade to v1.0.0 + +Version 1.0.0 of this module introduces a breaking change: adding the `disable-legacy-endpoints` metadata field to all node pools. This metadata is required by GKE and [determines whether the `/0.1/` and `/v1beta1/` paths are available in the nodes' metadata server](https://cloud.google.com/kubernetes-engine/docs/how-to/protecting-cluster-metadata#disable-legacy-apis). If your applications do not require access to the node's metadata server, you can leave the default value of `true` provided by the module. If your applications require access to the metadata server, be sure to read the linked documentation to see if you need to set the value for this field to `false` to allow your applications access to the above metadata server paths. + +In either case, upgrading to module version `v1.0.0` will trigger a recreation of all node pools in the cluster. + + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| basic\_auth\_password | The password to be used with Basic Authentication. | string | `""` | no | +| basic\_auth\_username | The username to be used with Basic Authentication. An empty value will disable Basic Authentication, which is the recommended configuration. | string | `""` | no | +| cluster\_ipv4\_cidr | The IP address range of the kubernetes pods in this cluster. Default is an automatically assigned CIDR. | string | `""` | no | +| cluster\_resource\_labels | The GCE resource labels (a map of key/value pairs) to be applied to the cluster | map(string) | `` | no | +| configure\_ip\_masq | Enables the installation of ip masquerading, which is usually no longer required when using aliasied IP addresses. IP masquerading uses a kubectl call, so when you have a private cluster, you will need access to the API server. | string | `"false"` | no | +| create\_service\_account | Defines if service account specified to run nodes should be created. | bool | `"true"` | no | +| deploy\_using\_private\_endpoint | (Beta) A toggle for Terraform and kubectl to connect to the master's internal IP address during deployment. | bool | `"false"` | no | +| description | The description of the cluster | string | `""` | no | +| disable\_legacy\_metadata\_endpoints | Disable the /0.1/ and /v1beta1/ metadata server endpoints on the node. Changing this value will cause all node pools to be recreated. | bool | `"true"` | no | +| enable\_private\_endpoint | (Beta) Whether the master's internal IP address is used as the cluster endpoint | bool | `"false"` | no | +| enable\_private\_nodes | (Beta) Whether nodes have internal IP addresses only | bool | `"false"` | no | +| grant\_registry\_access | Grants created cluster-specific service account storage.objectViewer role. | bool | `"false"` | no | +| horizontal\_pod\_autoscaling | Enable horizontal pod autoscaling addon | bool | `"true"` | no | +| http\_load\_balancing | Enable httpload balancer addon | bool | `"true"` | no | +| initial\_node\_count | The number of nodes to create in this cluster's default node pool. | number | `"0"` | no | +| ip\_masq\_link\_local | Whether to masquerade traffic to the link-local prefix (169.254.0.0/16). | bool | `"false"` | no | +| ip\_masq\_resync\_interval | The interval at which the agent attempts to sync its ConfigMap file from the disk. | string | `"60s"` | no | +| ip\_range\_pods | The _name_ of the secondary subnet ip range to use for pods | string | n/a | yes | +| ip\_range\_services | The _name_ of the secondary subnet range to use for services | string | n/a | yes | +| issue\_client\_certificate | Issues a client certificate to authenticate to the cluster endpoint. To maximize the security of your cluster, leave this option disabled. Client certificates don't automatically rotate and aren't easily revocable. WARNING: changing this after cluster creation is destructive! | bool | `"false"` | no | +| kubernetes\_dashboard | Enable kubernetes dashboard addon | bool | `"false"` | no | +| kubernetes\_version | The Kubernetes version of the masters. If set to 'latest' it will pull latest available version in the selected region. | string | `"latest"` | no | +| logging\_service | The logging service that the cluster should write logs to. Available options include logging.googleapis.com, logging.googleapis.com/kubernetes (beta), and none | string | `"logging.googleapis.com"` | no | +| maintenance\_start\_time | Time window specified for daily maintenance operations in RFC3339 format | string | `"05:00"` | no | +| master\_authorized\_networks\_config | The desired configuration options for master authorized networks. The object format is {cidr_blocks = list(object({cidr_block = string, display_name = string}))}. Omit the nested cidr_blocks attribute to disallow external access (except the cluster node IPs, which GKE automatically whitelists). | object | `` | no | +| master\_ipv4\_cidr\_block | (Beta) The IP range in CIDR notation to use for the hosted master network | string | `"10.0.0.0/28"` | no | +| monitoring\_service | The monitoring service that the cluster should write metrics to. Automatically send metrics from pods in the cluster to the Google Cloud Monitoring API. VM metrics will be collected by Google Compute Engine regardless of this setting Available options include monitoring.googleapis.com, monitoring.googleapis.com/kubernetes (beta) and none | string | `"monitoring.googleapis.com"` | no | +| name | The name of the cluster (required) | string | n/a | yes | +| network | The VPC network to host the cluster in (required) | string | n/a | yes | +| network\_policy | Enable network policy addon | bool | `"false"` | no | +| network\_policy\_provider | The network policy provider. | string | `"CALICO"` | no | +| network\_project\_id | The project ID of the shared VPC's host (for shared vpc support) | string | `""` | no | +| node\_pools | List of maps containing node pools | list(map(string)) | `` | no | +| node\_pools\_labels | Map of maps containing node labels by node-pool name | map(map(string)) | `` | no | +| node\_pools\_metadata | Map of maps containing node metadata by node-pool name | map(map(string)) | `` | no | +| node\_pools\_oauth\_scopes | Map of lists containing node oauth scopes by node-pool name | map(list(string)) | `` | no | +| node\_pools\_tags | Map of lists containing node network tags by node-pool name | map(list(string)) | `` | no | +| node\_version | The Kubernetes version of the node pools. Defaults kubernetes_version (master) variable and can be overridden for individual node pools by setting the `version` key on them. Must be empyty or set the same as master at cluster creation. | string | `""` | no | +| non\_masquerade\_cidrs | List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading. | list(string) | `` | no | +| project\_id | The project ID to host the cluster in (required) | string | n/a | yes | +| region | The region to host the cluster in (required) | string | n/a | yes | +| regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | +| remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | +| service\_account | The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created. | string | `""` | no | +| stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | map(list(string)) | `` | no | +| subnetwork | The subnetwork to host the cluster in (required) | string | n/a | yes | +| upstream\_nameservers | If specified, the values replace the nameservers taken by default from the node’s /etc/resolv.conf | list | `` | no | +| zones | The zones to host the cluster in (optional if regional cluster / required if zonal) | list(string) | `` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| ca\_certificate | Cluster ca certificate (base64 encoded) | +| endpoint | Cluster endpoint | +| horizontal\_pod\_autoscaling\_enabled | Whether horizontal pod autoscaling enabled | +| http\_load\_balancing\_enabled | Whether http load balancing enabled | +| kubernetes\_dashboard\_enabled | Whether kubernetes dashboard enabled | +| location | Cluster location (region if regional cluster, zone if zonal cluster) | +| logging\_service | Logging service used | +| master\_authorized\_networks\_config | Networks from which access to master is permitted | +| master\_version | Current master kubernetes version | +| min\_master\_version | Minimum master kubernetes version | +| monitoring\_service | Monitoring service used | +| name | Cluster name | +| network\_policy\_enabled | Whether network policy enabled | +| node\_pools\_names | List of node pools names | +| node\_pools\_versions | List of node pools versions | +| region | Cluster region | +| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | +| type | Cluster type (regional / zonal) | +| zones | List of zones in which the cluster resides | + + + +## Requirements + +Before this module can be used on a project, you must ensure that the following pre-requisites are fulfilled: + +1. Terraform and kubectl are [installed](#software-dependencies) on the machine where Terraform is executed. +2. The Service Account you execute the module with has the right [permissions](#configure-a-service-account). +3. The Compute Engine and Kubernetes Engine APIs are [active](#enable-apis) on the project you will launch the cluster in. +4. If you are using a Shared VPC, the APIs must also be activated on the Shared VPC host project and your service account needs the proper permissions there. + +The [project factory](https://github.com/terraform-google-modules/terraform-google-project-factory) can be used to provision projects with the correct APIs active and the necessary Shared VPC connections. + +### Software Dependencies +#### Kubectl +- [kubectl](https://github.com/kubernetes/kubernetes/releases) 1.9.x +#### Terraform and Plugins +- [Terraform](https://www.terraform.io/downloads.html) 0.12 +- [Terraform Provider for GCP][terraform-provider-google] v2.9 + +### Configure a Service Account +In order to execute this module you must have a Service Account with the +following project roles: +- roles/compute.viewer +- roles/container.clusterAdmin +- roles/container.developer +- roles/iam.serviceAccountAdmin +- roles/iam.serviceAccountUser +- roles/resourcemanager.projectIamAdmin (only required if `service_account` is set to `create`) + +### Enable APIs +In order to operate with the Service Account you must activate the following APIs on the project where the Service Account was created: + +- Compute Engine API - compute.googleapis.com +- Kubernetes Engine API - container.googleapis.com + +## File structure +The project has the following folders and files: + +- /: root folder +- /examples: Examples for using this module and sub module. +- /helpers: Helper scripts. +- /scripts: Scripts for specific tasks on module (see Infrastructure section on this file). +- /test: Folders with files for testing the module (see Testing section on this file). +- /main.tf: `main` file for the public module, contains all the resources to create. +- /variables.tf: Variables for the public cluster module. +- /output.tf: The outputs for the public cluster module. +- /README.MD: This file. +- /modules: Private and beta sub modules. + +## Templating + +To more cleanly handle cases where desired functionality would require complex duplication of Terraform resources (i.e. [PR 51](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/51)), this repository is largely generated from the [`autogen`](/autogen) directory. + +The root module is generated by running `make generate`. Changes to this repository should be made in the [`autogen`](/autogen) directory where appropriate. + +Note: The correct sequence to update the repo using autogen functionality is to run +`make generate && make generate_docs`. This will create the various Terraform files, and then +generate the Terraform documentation using `terraform-docs`. + +## Testing + +### Requirements +- [bundler](https://github.com/bundler/bundler) +- [gcloud](https://cloud.google.com/sdk/install) +- [terraform-docs](https://github.com/segmentio/terraform-docs/releases) 0.6.0 + +### Autogeneration of documentation from .tf files +Run +``` +make generate_docs +``` + +### Integration test + +Integration tests are run though [test-kitchen](https://github.com/test-kitchen/test-kitchen), [kitchen-terraform](https://github.com/newcontext-oss/kitchen-terraform), and [InSpec](https://github.com/inspec/inspec). + +Six test-kitchen instances are defined: + +- `deploy-service` +- `node-pool` +- `shared-vpc` +- `simple-regional` +- `simple-zonal` +- `stub-domains` + +The test-kitchen instances in `test/fixtures/` wrap identically-named examples in the `examples/` directory. + +#### Setup + +1. Configure the [test fixtures](#test-configuration) +2. Download a Service Account key with the necessary permissions and put it in the module's root directory with the name `credentials.json`. + - Requires the [permissions to run the module](#configure-a-service-account) + - Requires `roles/compute.networkAdmin` to create the test suite's networks + - Requires `roles/resourcemanager.projectIamAdmin` since service account creation is tested +3. Build the Docker container for testing: + + ``` + make docker_build_kitchen_terraform + ``` +4. Run the testing container in interactive mode: + + ``` + make docker_run + ``` + + The module root directory will be loaded into the Docker container at `/cft/workdir/`. +5. Run kitchen-terraform to test the infrastructure: + + 1. `kitchen create` creates Terraform state and downloads modules, if applicable. + 2. `kitchen converge` creates the underlying resources. Run `kitchen converge ` to create resources for a specific test case. + 3. Run `kitchen converge` again. This is necessary due to an oddity in how `networkPolicyConfig` is handled by the upstream API. (See [#72](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/issues/72) for details). + 4. `kitchen verify` tests the created infrastructure. Run `kitchen verify ` to run a specific test case. + 5. `kitchen destroy` tears down the underlying resources created by `kitchen converge`. Run `kitchen destroy ` to tear down resources for a specific test case. + +Alternatively, you can simply run `make test_integration_docker` to run all the test steps non-interactively. + +If you wish to parallelize running the test suites, it is also possible to offload the work onto Concourse to run each test suite for you using the command `make test_integration_concourse`. The `.concourse` directory will be created and contain all of the logs from the running test suites. + +When running tests locally, you will need to use your own test project environment. You can configure your environment by setting all of the following variables: + +``` +export COMPUTE_ENGINE_SERVICE_ACCOUNT="" +export PROJECT_ID="" +export REGION="" +export ZONES='[""]' +export SERVICE_ACCOUNT_JSON="$(cat "")" +export CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE="" +export GOOGLE_APPLICATION_CREDENTIALS="" +``` + +#### Test configuration + +Each test-kitchen instance is configured with a `variables.tfvars` file in the test fixture directory, e.g. `test/fixtures/node_pool/terraform.tfvars`. +For convenience, since all of the variables are project-specific, these files have been symlinked to `test/fixtures/shared/terraform.tfvars`. +Similarly, each test fixture has a `variables.tf` to define these variables, and an `outputs.tf` to facilitate providing necessary information for `inspec` to locate and query against created resources. + +Each test-kitchen instance creates a GCP Network and Subnetwork fixture to house resources, and may create any other necessary fixture data as needed. + +### Autogeneration of documentation from .tf files +Run +``` +make generate_docs +``` + +### Linting +The makefile in this project will lint or sometimes just format any shell, +Python, golang, Terraform, or Dockerfiles. The linters will only be run if +the makefile finds files with the appropriate file extension. + +All of the linter checks are in the default make target, so you just have to +run + +``` +make -s +``` + +The -s is for 'silent'. Successful output looks like this + +``` +Running shellcheck +Running flake8 +Running go fmt and go vet +Running terraform validate +Running hadolint on Dockerfiles +Checking for required files +Testing the validity of the header check +.. +---------------------------------------------------------------------- +Ran 2 tests in 0.026s + +OK +Checking file headers +The following lines have trailing whitespace +``` + +The linters +are as follows: +* Shell - shellcheck. Can be found in homebrew +* Python - flake8. Can be installed with 'pip install flake8' +* Golang - gofmt. gofmt comes with the standard golang installation. golang +is a compiled language so there is no standard linter. +* Terraform - terraform has a built-in linter in the 'terraform validate' +command. +* Dockerfiles - hadolint. Can be found in homebrew + +[upgrading-to-v2.0]: ../../docs/upgrading_to_v2.0.md +[upgrading-to-v3.0]: ../../docs/upgrading_to_v3.0.md +[terraform-provider-google]: https://github.com/terraform-providers/terraform-provider-google +[3.0.0]: https://registry.terraform.io/modules/terraform-google-modules/kubernetes-engine/google/3.0.0 +[terraform-0.12-upgrade]: https://www.terraform.io/upgrade-guides/0-12.html diff --git a/modules/private-cluster-lifecycle-variant/auth.tf b/modules/private-cluster-lifecycle-variant/auth.tf new file mode 100644 index 0000000000..48e7cc6a5f --- /dev/null +++ b/modules/private-cluster-lifecycle-variant/auth.tf @@ -0,0 +1,34 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file was automatically generated from a template in ./autogen + +/****************************************** + Retrieve authentication token + *****************************************/ +data "google_client_config" "default" { + provider = google +} + +/****************************************** + Configure provider + *****************************************/ +provider "kubernetes" { + load_config_file = false + host = "https://${local.cluster_endpoint}" + token = data.google_client_config.default.access_token + cluster_ca_certificate = base64decode(local.cluster_ca_certificate) +} diff --git a/modules/private-cluster-lifecycle-variant/cluster.tf b/modules/private-cluster-lifecycle-variant/cluster.tf new file mode 100644 index 0000000000..05007e3b13 --- /dev/null +++ b/modules/private-cluster-lifecycle-variant/cluster.tf @@ -0,0 +1,303 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file was automatically generated from a template in ./autogen + +/****************************************** + Create Container Cluster + *****************************************/ +resource "google_container_cluster" "primary" { + provider = google + + name = var.name + description = var.description + project = var.project_id + resource_labels = var.cluster_resource_labels + + location = local.location + node_locations = local.node_locations + cluster_ipv4_cidr = var.cluster_ipv4_cidr + network = data.google_compute_network.gke_network.self_link + + dynamic "network_policy" { + for_each = local.cluster_network_policy + + content { + enabled = network_policy.value.enabled + provider = network_policy.value.provider + } + } + + subnetwork = data.google_compute_subnetwork.gke_subnetwork.self_link + min_master_version = local.master_version + + logging_service = var.logging_service + monitoring_service = var.monitoring_service + + dynamic "master_authorized_networks_config" { + for_each = var.master_authorized_networks_config + content { + dynamic "cidr_blocks" { + for_each = master_authorized_networks_config.value.cidr_blocks + content { + cidr_block = lookup(cidr_blocks.value, "cidr_block", "") + display_name = lookup(cidr_blocks.value, "display_name", "") + } + } + } + } + + master_auth { + username = var.basic_auth_username + password = var.basic_auth_password + + client_certificate_config { + issue_client_certificate = var.issue_client_certificate + } + } + + addons_config { + http_load_balancing { + disabled = ! var.http_load_balancing + } + + horizontal_pod_autoscaling { + disabled = ! var.horizontal_pod_autoscaling + } + + kubernetes_dashboard { + disabled = ! var.kubernetes_dashboard + } + + network_policy_config { + disabled = ! var.network_policy + } + } + + ip_allocation_policy { + cluster_secondary_range_name = var.ip_range_pods + services_secondary_range_name = var.ip_range_services + } + + maintenance_policy { + daily_maintenance_window { + start_time = var.maintenance_start_time + } + } + + lifecycle { + ignore_changes = [node_pool, initial_node_count] + } + + timeouts { + create = "30m" + update = "30m" + delete = "30m" + } + + node_pool { + name = "default-pool" + initial_node_count = var.initial_node_count + + node_config { + service_account = lookup(var.node_pools[0], "service_account", local.service_account) + } + } + + private_cluster_config { + enable_private_endpoint = var.enable_private_endpoint + enable_private_nodes = var.enable_private_nodes + master_ipv4_cidr_block = var.master_ipv4_cidr_block + } + + remove_default_node_pool = var.remove_default_node_pool +} + +/****************************************** + Create Container Cluster node pools + *****************************************/ +locals { + force_node_pool_recreation_resources = [ + "disk_size_gb", + "disk_type", + "accelerator_count", + "accelerator_type", + "local_ssd_count", + "machine_type", + "preemptible", + "service_account", + ] +} + +resource "random_id" "name" { + count = length(var.node_pools) + byte_length = 2 + prefix = format("%s-", lookup(var.node_pools[count.index], "name")) + keepers = merge( + zipmap( + local.force_node_pool_recreation_resources, + [for keeper in local.force_node_pool_recreation_resources : lookup(var.node_pools[count.index], keeper, "")] + ), + { + labels = join(",", + keys(var.node_pools_labels["all"]), + keys(var.node_pools_labels[var.node_pools[count.index]["name"]]), + values(var.node_pools_labels["all"]), + values(var.node_pools_labels[var.node_pools[count.index]["name"]]) + ) + }, + { + metadata = join(",", + keys(var.node_pools_metadata["all"]), + keys(var.node_pools_metadata[var.node_pools[count.index]["name"]]), + values(var.node_pools_metadata["all"]), + values(var.node_pools_metadata[var.node_pools[count.index]["name"]]) + ) + }, + { + oauth_scopes = join(",", + var.node_pools_oauth_scopes["all"], + var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]] + ) + }, + { + tags = join(",", + var.node_pools_tags["all"], + var.node_pools_tags[var.node_pools[count.index]["name"]] + ) + } + ) +} + +resource "google_container_node_pool" "pools" { + provider = google + count = length(var.node_pools) + name = random_id.name.*.hex[count.index] + project = var.project_id + location = local.location + cluster = google_container_cluster.primary.name + version = lookup(var.node_pools[count.index], "auto_upgrade", false) ? "" : lookup( + var.node_pools[count.index], + "version", + local.node_version, + ) + initial_node_count = lookup( + var.node_pools[count.index], + "initial_node_count", + lookup(var.node_pools[count.index], "min_count", 1), + ) + + node_count = lookup(var.node_pools[count.index], "autoscaling", true) ? null : lookup(var.node_pools[count.index], "min_count", 1) + + dynamic "autoscaling" { + for_each = lookup(var.node_pools[count.index], "autoscaling", true) ? [var.node_pools[count.index]] : [] + content { + min_node_count = lookup(autoscaling.value, "min_count", 1) + max_node_count = lookup(autoscaling.value, "max_count", 100) + } + } + + management { + auto_repair = lookup(var.node_pools[count.index], "auto_repair", true) + auto_upgrade = lookup(var.node_pools[count.index], "auto_upgrade", local.default_auto_upgrade) + } + + node_config { + image_type = lookup(var.node_pools[count.index], "image_type", "COS") + machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2") + labels = merge( + { + "cluster_name" = var.name + }, + { + "node_pool" = var.node_pools[count.index]["name"] + }, + var.node_pools_labels["all"], + var.node_pools_labels[var.node_pools[count.index]["name"]], + ) + metadata = merge( + { + "cluster_name" = var.name + }, + { + "node_pool" = var.node_pools[count.index]["name"] + }, + var.node_pools_metadata["all"], + var.node_pools_metadata[var.node_pools[count.index]["name"]], + { + "disable-legacy-endpoints" = var.disable_legacy_metadata_endpoints + }, + ) + tags = concat( + ["gke-${var.name}"], + ["gke-${var.name}-${var.node_pools[count.index]["name"]}"], + var.node_pools_tags["all"], + var.node_pools_tags[var.node_pools[count.index]["name"]], + ) + + disk_size_gb = lookup(var.node_pools[count.index], "disk_size_gb", 100) + disk_type = lookup(var.node_pools[count.index], "disk_type", "pd-standard") + service_account = lookup( + var.node_pools[count.index], + "service_account", + local.service_account, + ) + preemptible = lookup(var.node_pools[count.index], "preemptible", false) + + oauth_scopes = concat( + var.node_pools_oauth_scopes["all"], + var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]], + ) + + guest_accelerator = [ + for guest_accelerator in lookup(var.node_pools[count.index], "accelerator_count", 0) > 0 ? [{ + type = lookup(var.node_pools[count.index], "accelerator_type", "") + count = lookup(var.node_pools[count.index], "accelerator_count", 0) + }] : [] : { + type = guest_accelerator["type"] + count = guest_accelerator["count"] + } + ] + } + + lifecycle { + ignore_changes = [initial_node_count] + create_before_destroy = true + } + + timeouts { + create = "30m" + update = "30m" + delete = "30m" + } +} + +resource "null_resource" "wait_for_cluster" { + + provisioner "local-exec" { + command = "${path.module}/scripts/wait-for-cluster.sh ${var.project_id} ${var.name}" + } + + provisioner "local-exec" { + when = destroy + command = "${path.module}/scripts/wait-for-cluster.sh ${var.project_id} ${var.name}" + } + + depends_on = [ + google_container_cluster.primary, + google_container_node_pool.pools, + ] +} diff --git a/modules/private-cluster-lifecycle-variant/dns.tf b/modules/private-cluster-lifecycle-variant/dns.tf new file mode 100644 index 0000000000..b240a23e65 --- /dev/null +++ b/modules/private-cluster-lifecycle-variant/dns.tf @@ -0,0 +1,120 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file was automatically generated from a template in ./autogen + +/****************************************** + Delete default kube-dns configmap + *****************************************/ +resource "null_resource" "delete_default_kube_dns_configmap" { + count = local.custom_kube_dns_config || local.upstream_nameservers_config ? 1 : 0 + + provisioner "local-exec" { + command = "${path.module}/scripts/kubectl_wrapper.sh https://${local.cluster_endpoint} ${data.google_client_config.default.access_token} ${local.cluster_ca_certificate} ${path.module}/scripts/delete-default-resource.sh kube-system configmap kube-dns" + } + + depends_on = [ + data.google_client_config.default, + google_container_cluster.primary, + google_container_node_pool.pools, + ] +} + +/****************************************** + Create kube-dns confimap + *****************************************/ +resource "kubernetes_config_map" "kube-dns" { + count = local.custom_kube_dns_config && ! local.upstream_nameservers_config ? 1 : 0 + + metadata { + name = "kube-dns" + namespace = "kube-system" + + labels = { + maintained_by = "terraform" + } + } + + data = { + stubDomains = < 0 + upstream_nameservers_config = length(var.upstream_nameservers) > 0 + network_project_id = var.network_project_id != "" ? var.network_project_id : var.project_id + zone_count = length(var.zones) + cluster_type = var.regional ? "regional" : "zonal" + // auto upgrade by defaults only for regional cluster as long it has multiple masters versus zonal clusters have only have a single master so upgrades are more dangerous. + default_auto_upgrade = var.regional ? true : false + + cluster_network_policy = var.network_policy ? [{ + enabled = true + provider = var.network_policy_provider + }] : [{ + enabled = false + provider = null + }] + + + cluster_output_name = google_container_cluster.primary.name + cluster_output_location = google_container_cluster.primary.location + cluster_output_region = google_container_cluster.primary.region + cluster_output_regional_zones = google_container_cluster.primary.node_locations + cluster_output_zonal_zones = local.zone_count > 1 ? slice(var.zones, 1, local.zone_count) : [] + cluster_output_zones = local.cluster_output_regional_zones + + cluster_output_endpoint = var.deploy_using_private_endpoint ? google_container_cluster.primary.private_cluster_config.0.private_endpoint : google_container_cluster.primary.endpoint + + cluster_output_master_auth = concat(google_container_cluster.primary.*.master_auth, []) + cluster_output_master_version = google_container_cluster.primary.master_version + cluster_output_min_master_version = google_container_cluster.primary.min_master_version + cluster_output_logging_service = google_container_cluster.primary.logging_service + cluster_output_monitoring_service = google_container_cluster.primary.monitoring_service + cluster_output_network_policy_enabled = google_container_cluster.primary.addons_config.0.network_policy_config.0.disabled + cluster_output_http_load_balancing_enabled = google_container_cluster.primary.addons_config.0.http_load_balancing.0.disabled + cluster_output_horizontal_pod_autoscaling_enabled = google_container_cluster.primary.addons_config.0.horizontal_pod_autoscaling.0.disabled + cluster_output_kubernetes_dashboard_enabled = google_container_cluster.primary.addons_config.0.kubernetes_dashboard.0.disabled + + + cluster_output_node_pools_names = concat(google_container_node_pool.pools.*.name, [""]) + cluster_output_node_pools_versions = concat(google_container_node_pool.pools.*.version, [""]) + + cluster_master_auth_list_layer1 = local.cluster_output_master_auth + cluster_master_auth_list_layer2 = local.cluster_master_auth_list_layer1[0] + cluster_master_auth_map = local.cluster_master_auth_list_layer2[0] + # cluster locals + cluster_name = local.cluster_output_name + cluster_location = local.cluster_output_location + cluster_region = local.cluster_output_region + cluster_zones = sort(local.cluster_output_zones) + cluster_endpoint = local.cluster_output_endpoint + cluster_ca_certificate = local.cluster_master_auth_map["cluster_ca_certificate"] + cluster_master_version = local.cluster_output_master_version + cluster_min_master_version = local.cluster_output_min_master_version + cluster_logging_service = local.cluster_output_logging_service + cluster_monitoring_service = local.cluster_output_monitoring_service + cluster_node_pools_names = local.cluster_output_node_pools_names + cluster_node_pools_versions = local.cluster_output_node_pools_versions + cluster_network_policy_enabled = ! local.cluster_output_network_policy_enabled + cluster_http_load_balancing_enabled = ! local.cluster_output_http_load_balancing_enabled + cluster_horizontal_pod_autoscaling_enabled = ! local.cluster_output_horizontal_pod_autoscaling_enabled + cluster_kubernetes_dashboard_enabled = ! local.cluster_output_kubernetes_dashboard_enabled +} + +/****************************************** + Get available container engine versions + *****************************************/ +data "google_container_engine_versions" "region" { + location = local.location + project = var.project_id +} + +data "google_container_engine_versions" "zone" { + // Work around to prevent a lack of zone declaration from causing regional cluster creation from erroring out due to error + // + // data.google_container_engine_versions.zone: Cannot determine zone: set in this resource, or set provider-level zone. + // + location = local.zone_count == 0 ? data.google_compute_zones.available.names[0] : var.zones[0] + project = var.project_id +} diff --git a/modules/private-cluster-lifecycle-variant/masq.tf b/modules/private-cluster-lifecycle-variant/masq.tf new file mode 100644 index 0000000000..b6e411fc42 --- /dev/null +++ b/modules/private-cluster-lifecycle-variant/masq.tf @@ -0,0 +1,48 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file was automatically generated from a template in ./autogen + +/****************************************** + Create ip-masq-agent confimap + *****************************************/ +resource "kubernetes_config_map" "ip-masq-agent" { + count = var.configure_ip_masq ? 1 : 0 + + metadata { + name = "ip-masq-agent" + namespace = "kube-system" + + labels = { + maintained_by = "terraform" + } + } + + data = { + config = <&2 echo "3 arguments expected. Exiting." + exit 1 +fi + +RESOURCE_NAMESPACE=$1 +RESOURCE_TYPE=$2 +RESOURCE_NAME=$3 + +RESOURCE_LIST=$(kubectl -n "${RESOURCE_NAMESPACE}" get "${RESOURCE_TYPE}" || exit 1) + +# Delete requested resource +if [[ $RESOURCE_LIST = *"${RESOURCE_NAME}"* ]]; then + RESOURCE_MAINTAINED_LABEL=$(kubectl -n "${RESOURCE_NAMESPACE}" get "${RESOURCE_TYPE}" -o json "${RESOURCE_NAME}" | jq -r '.metadata.labels."maintained_by"') + if [[ $RESOURCE_MAINTAINED_LABEL = "terraform" ]]; then + echo "Terraform maintained ${RESOURCE_NAME} ${RESOURCE_TYPE} appears to have already been created in ${RESOURCE_NAMESPACE} namespace" + else + echo "Deleting default ${RESOURCE_NAME} ${RESOURCE_TYPE} found in ${RESOURCE_NAMESPACE} namespace" + kubectl -n "${RESOURCE_NAMESPACE}" delete "${RESOURCE_TYPE}" "${RESOURCE_NAME}" + fi +else + echo "No default ${RESOURCE_NAME} ${RESOURCE_TYPE} found in ${RESOURCE_NAMESPACE} namespace" +fi diff --git a/modules/private-cluster-lifecycle-variant/scripts/kubectl_wrapper.sh b/modules/private-cluster-lifecycle-variant/scripts/kubectl_wrapper.sh new file mode 100755 index 0000000000..e92300bcb5 --- /dev/null +++ b/modules/private-cluster-lifecycle-variant/scripts/kubectl_wrapper.sh @@ -0,0 +1,53 @@ +#!/bin/bash +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +set -e + +if [ "$#" -lt 3 ]; then + >&2 echo "Not all expected arguments set." + exit 1 +fi + +HOST=$1 +TOKEN=$2 +CA_CERTIFICATE=$3 + +shift 3 + +RANDOM_ID="${RANDOM}_${RANDOM}" +export TMPDIR="/tmp/kubectl_wrapper_${RANDOM_ID}" + +function cleanup { + rm -rf "${TMPDIR}" +} +trap cleanup EXIT + +mkdir "${TMPDIR}" + +export KUBECONFIG="${TMPDIR}/config" + +# shellcheck disable=SC1117 +base64 --help | grep "\--decode" && B64_ARG="--decode" || B64_ARG="-d" +echo "${CA_CERTIFICATE}" | base64 ${B64_ARG} > "${TMPDIR}/ca_certificate" + +kubectl config set-cluster kubectl-wrapper --server="${HOST}" --certificate-authority="${TMPDIR}/ca_certificate" --embed-certs=true 1>/dev/null +rm -f "${TMPDIR}/ca_certificate" +kubectl config set-context kubectl-wrapper --cluster=kubectl-wrapper --user=kubectl-wrapper --namespace=default 1>/dev/null +kubectl config set-credentials kubectl-wrapper --token="${TOKEN}" 1>/dev/null +kubectl config use-context kubectl-wrapper 1>/dev/null +kubectl version 1>/dev/null + +"$@" diff --git a/modules/private-cluster-lifecycle-variant/scripts/wait-for-cluster.sh b/modules/private-cluster-lifecycle-variant/scripts/wait-for-cluster.sh new file mode 100755 index 0000000000..6ff3253d58 --- /dev/null +++ b/modules/private-cluster-lifecycle-variant/scripts/wait-for-cluster.sh @@ -0,0 +1,33 @@ +#!/bin/bash +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e + +PROJECT=$1 +CLUSTER_NAME=$2 +gcloud_command="gcloud container clusters list --project=$PROJECT --format=json" +jq_query=".[] | select(.name==\"$CLUSTER_NAME\") | .status" + +echo "Waiting for cluster $2 in project $1 to reconcile..." + +current_status=$($gcloud_command | jq -r "$jq_query") + +while [[ "${current_status}" == "RECONCILING" ]]; do + printf "." + sleep 5 + current_status=$($gcloud_command | jq -r "$jq_query") +done + +echo "Cluster is ready!" diff --git a/modules/private-cluster-lifecycle-variant/variables.tf b/modules/private-cluster-lifecycle-variant/variables.tf new file mode 100644 index 0000000000..8008e08975 --- /dev/null +++ b/modules/private-cluster-lifecycle-variant/variables.tf @@ -0,0 +1,318 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file was automatically generated from a template in ./autogen + +variable "project_id" { + type = string + description = "The project ID to host the cluster in (required)" +} + +variable "name" { + type = string + description = "The name of the cluster (required)" +} + +variable "description" { + type = string + description = "The description of the cluster" + default = "" +} + +variable "regional" { + type = bool + description = "Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!)" + default = true +} + +variable "region" { + type = string + description = "The region to host the cluster in (required)" +} + +variable "zones" { + type = list(string) + description = "The zones to host the cluster in (optional if regional cluster / required if zonal)" + default = [] +} + +variable "network" { + type = string + description = "The VPC network to host the cluster in (required)" +} + +variable "network_project_id" { + type = string + description = "The project ID of the shared VPC's host (for shared vpc support)" + default = "" +} + +variable "subnetwork" { + type = string + description = "The subnetwork to host the cluster in (required)" +} + +variable "kubernetes_version" { + type = string + description = "The Kubernetes version of the masters. If set to 'latest' it will pull latest available version in the selected region." + default = "latest" +} + +variable "node_version" { + type = string + description = "The Kubernetes version of the node pools. Defaults kubernetes_version (master) variable and can be overridden for individual node pools by setting the `version` key on them. Must be empyty or set the same as master at cluster creation." + default = "" +} + +variable "master_authorized_networks_config" { + type = list(object({ cidr_blocks = list(object({ cidr_block = string, display_name = string })) })) + description = "The desired configuration options for master authorized networks. The object format is {cidr_blocks = list(object({cidr_block = string, display_name = string}))}. Omit the nested cidr_blocks attribute to disallow external access (except the cluster node IPs, which GKE automatically whitelists)." + default = [] +} + +variable "horizontal_pod_autoscaling" { + type = bool + description = "Enable horizontal pod autoscaling addon" + default = true +} + +variable "http_load_balancing" { + type = bool + description = "Enable httpload balancer addon" + default = true +} + +variable "kubernetes_dashboard" { + type = bool + description = "Enable kubernetes dashboard addon" + default = false +} + +variable "network_policy" { + type = bool + description = "Enable network policy addon" + default = false +} + +variable "network_policy_provider" { + type = string + description = "The network policy provider." + default = "CALICO" +} + +variable "maintenance_start_time" { + type = string + description = "Time window specified for daily maintenance operations in RFC3339 format" + default = "05:00" +} + +variable "ip_range_pods" { + type = string + description = "The _name_ of the secondary subnet ip range to use for pods" +} + +variable "ip_range_services" { + type = string + description = "The _name_ of the secondary subnet range to use for services" +} + +variable "initial_node_count" { + type = number + description = "The number of nodes to create in this cluster's default node pool." + default = 0 +} + +variable "remove_default_node_pool" { + type = bool + description = "Remove default node pool while setting up the cluster" + default = false +} + +variable "disable_legacy_metadata_endpoints" { + type = bool + description = "Disable the /0.1/ and /v1beta1/ metadata server endpoints on the node. Changing this value will cause all node pools to be recreated." + default = true +} + +variable "node_pools" { + type = list(map(string)) + description = "List of maps containing node pools" + + default = [ + { + name = "default-node-pool" + }, + ] +} + +variable "node_pools_labels" { + type = map(map(string)) + description = "Map of maps containing node labels by node-pool name" + + default = { + all = {} + default-node-pool = {} + } +} + +variable "node_pools_metadata" { + type = map(map(string)) + description = "Map of maps containing node metadata by node-pool name" + + default = { + all = {} + default-node-pool = {} + } +} + +variable "node_pools_tags" { + type = map(list(string)) + description = "Map of lists containing node network tags by node-pool name" + + default = { + all = [] + default-node-pool = [] + } +} + +variable "node_pools_oauth_scopes" { + type = map(list(string)) + description = "Map of lists containing node oauth scopes by node-pool name" + + default = { + all = ["https://www.googleapis.com/auth/cloud-platform"] + default-node-pool = [] + } +} + +variable "stub_domains" { + type = map(list(string)) + description = "Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server" + default = {} +} + +variable "upstream_nameservers" { + type = "list" + description = "If specified, the values replace the nameservers taken by default from the node’s /etc/resolv.conf" + default = [] +} + +variable "non_masquerade_cidrs" { + type = list(string) + description = "List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading." + default = ["10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16"] +} + +variable "ip_masq_resync_interval" { + type = string + description = "The interval at which the agent attempts to sync its ConfigMap file from the disk." + default = "60s" +} + +variable "ip_masq_link_local" { + type = bool + description = "Whether to masquerade traffic to the link-local prefix (169.254.0.0/16)." + default = false +} + +variable "configure_ip_masq" { + description = "Enables the installation of ip masquerading, which is usually no longer required when using aliasied IP addresses. IP masquerading uses a kubectl call, so when you have a private cluster, you will need access to the API server." + default = false +} + +variable "logging_service" { + type = string + description = "The logging service that the cluster should write logs to. Available options include logging.googleapis.com, logging.googleapis.com/kubernetes (beta), and none" + default = "logging.googleapis.com" +} + +variable "monitoring_service" { + type = string + description = "The monitoring service that the cluster should write metrics to. Automatically send metrics from pods in the cluster to the Google Cloud Monitoring API. VM metrics will be collected by Google Compute Engine regardless of this setting Available options include monitoring.googleapis.com, monitoring.googleapis.com/kubernetes (beta) and none" + default = "monitoring.googleapis.com" +} + +variable "create_service_account" { + type = bool + description = "Defines if service account specified to run nodes should be created." + default = true +} + +variable "grant_registry_access" { + type = bool + description = "Grants created cluster-specific service account storage.objectViewer role." + default = false +} + +variable "service_account" { + type = string + description = "The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created." + default = "" +} + +variable "basic_auth_username" { + type = string + description = "The username to be used with Basic Authentication. An empty value will disable Basic Authentication, which is the recommended configuration." + default = "" +} + +variable "basic_auth_password" { + type = string + description = "The password to be used with Basic Authentication." + default = "" +} + +variable "issue_client_certificate" { + type = bool + description = "Issues a client certificate to authenticate to the cluster endpoint. To maximize the security of your cluster, leave this option disabled. Client certificates don't automatically rotate and aren't easily revocable. WARNING: changing this after cluster creation is destructive!" + default = false +} + +variable "cluster_ipv4_cidr" { + default = "" + description = "The IP address range of the kubernetes pods in this cluster. Default is an automatically assigned CIDR." +} + +variable "cluster_resource_labels" { + type = map(string) + description = "The GCE resource labels (a map of key/value pairs) to be applied to the cluster" + default = {} +} + + +variable "deploy_using_private_endpoint" { + type = bool + description = "(Beta) A toggle for Terraform and kubectl to connect to the master's internal IP address during deployment." + default = false +} + +variable "enable_private_endpoint" { + type = bool + description = "(Beta) Whether the master's internal IP address is used as the cluster endpoint" + default = false +} + +variable "enable_private_nodes" { + type = bool + description = "(Beta) Whether nodes have internal IP addresses only" + default = false +} + +variable "master_ipv4_cidr_block" { + type = string + description = "(Beta) The IP range in CIDR notation to use for the hosted master network" + default = "10.0.0.0/28" +} diff --git a/modules/private-cluster-lifecycle-variant/versions.tf b/modules/private-cluster-lifecycle-variant/versions.tf new file mode 100644 index 0000000000..832ec1df39 --- /dev/null +++ b/modules/private-cluster-lifecycle-variant/versions.tf @@ -0,0 +1,19 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +terraform { + required_version = ">= 0.12" +} diff --git a/modules/private-cluster/README.md b/modules/private-cluster/README.md index 9110164143..c29d58ee93 100644 --- a/modules/private-cluster/README.md +++ b/modules/private-cluster/README.md @@ -167,7 +167,6 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | network\_policy\_provider | The network policy provider. | string | `"CALICO"` | no | | network\_project\_id | The project ID of the shared VPC's host (for shared vpc support) | string | `""` | no | | node\_pools | List of maps containing node pools | list(map(string)) | `` | no | -| node\_pools\_create\_before\_destroy | Create a new node pool, then destroy the old node pool. Default behavior is destroy node pool then recreate it | bool | `"false"` | no | | node\_pools\_labels | Map of maps containing node labels by node-pool name | map(map(string)) | `` | no | | node\_pools\_metadata | Map of maps containing node metadata by node-pool name | map(map(string)) | `` | no | | node\_pools\_oauth\_scopes | Map of lists containing node oauth scopes by node-pool name | map(list(string)) | `` | no | diff --git a/modules/private-cluster/cluster.tf b/modules/private-cluster/cluster.tf index 0dcac5e535..0c25b40c22 100644 --- a/modules/private-cluster/cluster.tf +++ b/modules/private-cluster/cluster.tf @@ -129,166 +129,10 @@ resource "google_container_cluster" "primary" { /****************************************** Create Container Cluster node pools *****************************************/ -locals { - force_node_pool_recreation_resources = [ - "disk_size_gb", - "disk_type", - "accelerator_count", - "accelerator_type", - "local_ssd_count", - "machine_type", - "preemptible", - "service_account", - ] -} - -resource "random_id" "name" { - count = var.node_pools_create_before_destroy ? length(var.node_pools) : 0 - byte_length = 2 - prefix = format("%s-", lookup(var.node_pools[count.index], "name")) - keepers = merge( - zipmap( - local.force_node_pool_recreation_resources, - [for keeper in local.force_node_pool_recreation_resources : lookup(var.node_pools[count.index], keeper, "")] - ), - { - labels = join(",", - keys(var.node_pools_labels["all"]), - keys(var.node_pools_labels[var.node_pools[count.index]["name"]]), - values(var.node_pools_labels["all"]), - values(var.node_pools_labels[var.node_pools[count.index]["name"]]) - ) - }, - { - metadata = join(",", - keys(var.node_pools_metadata["all"]), - keys(var.node_pools_metadata[var.node_pools[count.index]["name"]]), - values(var.node_pools_metadata["all"]), - values(var.node_pools_metadata[var.node_pools[count.index]["name"]]) - ) - }, - { - oauth_scopes = join(",", - var.node_pools_oauth_scopes["all"], - var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]] - ) - }, - { - tags = join(",", - var.node_pools_tags["all"], - var.node_pools_tags[var.node_pools[count.index]["name"]] - ) - } - ) -} - resource "google_container_node_pool" "pools" { provider = google - count = var.node_pools_create_before_destroy ? 0 : length(var.node_pools) - name = var.node_pools_create_before_destroy ? random_id.name.*.hex[count.index] : lookup(var.node_pools[count.index], "name") - project = var.project_id - location = local.location - cluster = google_container_cluster.primary.name - version = lookup(var.node_pools[count.index], "auto_upgrade", false) ? "" : lookup( - var.node_pools[count.index], - "version", - local.node_version, - ) - initial_node_count = lookup( - var.node_pools[count.index], - "initial_node_count", - lookup(var.node_pools[count.index], "min_count", 1), - ) - - node_count = lookup(var.node_pools[count.index], "autoscaling", true) ? null : lookup(var.node_pools[count.index], "min_count", 1) - - dynamic "autoscaling" { - for_each = lookup(var.node_pools[count.index], "autoscaling", true) ? [var.node_pools[count.index]] : [] - content { - min_node_count = lookup(autoscaling.value, "min_count", 1) - max_node_count = lookup(autoscaling.value, "max_count", 100) - } - } - - management { - auto_repair = lookup(var.node_pools[count.index], "auto_repair", true) - auto_upgrade = lookup(var.node_pools[count.index], "auto_upgrade", local.default_auto_upgrade) - } - - node_config { - image_type = lookup(var.node_pools[count.index], "image_type", "COS") - machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2") - labels = merge( - { - "cluster_name" = var.name - }, - { - "node_pool" = var.node_pools[count.index]["name"] - }, - var.node_pools_labels["all"], - var.node_pools_labels[var.node_pools[count.index]["name"]], - ) - metadata = merge( - { - "cluster_name" = var.name - }, - { - "node_pool" = var.node_pools[count.index]["name"] - }, - var.node_pools_metadata["all"], - var.node_pools_metadata[var.node_pools[count.index]["name"]], - { - "disable-legacy-endpoints" = var.disable_legacy_metadata_endpoints - }, - ) - tags = concat( - ["gke-${var.name}"], - ["gke-${var.name}-${var.node_pools[count.index]["name"]}"], - var.node_pools_tags["all"], - var.node_pools_tags[var.node_pools[count.index]["name"]], - ) - - disk_size_gb = lookup(var.node_pools[count.index], "disk_size_gb", 100) - disk_type = lookup(var.node_pools[count.index], "disk_type", "pd-standard") - service_account = lookup( - var.node_pools[count.index], - "service_account", - local.service_account, - ) - preemptible = lookup(var.node_pools[count.index], "preemptible", false) - - oauth_scopes = concat( - var.node_pools_oauth_scopes["all"], - var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]], - ) - - guest_accelerator = [ - for guest_accelerator in lookup(var.node_pools[count.index], "accelerator_count", 0) > 0 ? [{ - type = lookup(var.node_pools[count.index], "accelerator_type", "") - count = lookup(var.node_pools[count.index], "accelerator_count", 0) - }] : [] : { - type = guest_accelerator["type"] - count = guest_accelerator["count"] - } - ] - } - - lifecycle { - ignore_changes = [initial_node_count] - create_before_destroy = false - } - - timeouts { - create = "30m" - update = "30m" - delete = "30m" - } -} - -resource "google_container_node_pool" "pools_lifecycle_variant" { - provider = google - count = var.node_pools_create_before_destroy ? length(var.node_pools) : 0 - name = var.node_pools_create_before_destroy ? random_id.name.*.hex[count.index] : lookup(var.node_pools[count.index], "name") + count = length(var.node_pools) + name = lookup(var.node_pools[count.index], "name") project = var.project_id location = local.location cluster = google_container_cluster.primary.name @@ -377,8 +221,7 @@ resource "google_container_node_pool" "pools_lifecycle_variant" { } lifecycle { - ignore_changes = [initial_node_count] - create_before_destroy = true + ignore_changes = [initial_node_count] } timeouts { @@ -402,6 +245,5 @@ resource "null_resource" "wait_for_cluster" { depends_on = [ google_container_cluster.primary, google_container_node_pool.pools, - google_container_node_pool.pools_lifecycle_variant, ] } diff --git a/modules/private-cluster/dns.tf b/modules/private-cluster/dns.tf index fbd840285b..b240a23e65 100644 --- a/modules/private-cluster/dns.tf +++ b/modules/private-cluster/dns.tf @@ -30,7 +30,6 @@ resource "null_resource" "delete_default_kube_dns_configmap" { data.google_client_config.default, google_container_cluster.primary, google_container_node_pool.pools, - google_container_node_pool.pools_lifecycle_variant, ] } @@ -60,7 +59,6 @@ EOF data.google_client_config.default, google_container_cluster.primary, google_container_node_pool.pools, - google_container_node_pool.pools_lifecycle_variant, ] } @@ -88,7 +86,6 @@ EOF data.google_client_config.default, google_container_cluster.primary, google_container_node_pool.pools, - google_container_node_pool.pools_lifecycle_variant, ] } @@ -119,6 +116,5 @@ EOF data.google_client_config.default, google_container_cluster.primary, google_container_node_pool.pools, - google_container_node_pool.pools_lifecycle_variant, ] } diff --git a/modules/private-cluster/main.tf b/modules/private-cluster/main.tf index f732fd3ff3..bfe746401c 100644 --- a/modules/private-cluster/main.tf +++ b/modules/private-cluster/main.tf @@ -81,8 +81,8 @@ locals { cluster_output_kubernetes_dashboard_enabled = google_container_cluster.primary.addons_config.0.kubernetes_dashboard.0.disabled - cluster_output_node_pools_names = concat(google_container_node_pool.pools.*.name, google_container_node_pool.pools_lifecycle_variant.*.name, [""]) - cluster_output_node_pools_versions = concat(google_container_node_pool.pools.*.version, google_container_node_pool.pools_lifecycle_variant.*.version, [""]) + cluster_output_node_pools_names = concat(google_container_node_pool.pools.*.name, [""]) + cluster_output_node_pools_versions = concat(google_container_node_pool.pools.*.version, [""]) cluster_master_auth_list_layer1 = local.cluster_output_master_auth cluster_master_auth_list_layer2 = local.cluster_master_auth_list_layer1[0] diff --git a/modules/private-cluster/masq.tf b/modules/private-cluster/masq.tf index 2f351c8c2e..b6e411fc42 100644 --- a/modules/private-cluster/masq.tf +++ b/modules/private-cluster/masq.tf @@ -44,6 +44,5 @@ EOF data.google_client_config.default, google_container_cluster.primary, google_container_node_pool.pools, - google_container_node_pool.pools_lifecycle_variant, ] } diff --git a/modules/private-cluster/outputs.tf b/modules/private-cluster/outputs.tf index 53350b0754..dea7b5c7b5 100644 --- a/modules/private-cluster/outputs.tf +++ b/modules/private-cluster/outputs.tf @@ -54,7 +54,6 @@ output "endpoint" { */ google_container_cluster.primary, google_container_node_pool.pools, - google_container_node_pool.pools_lifecycle_variant, ] } diff --git a/modules/private-cluster/variables.tf b/modules/private-cluster/variables.tf index 134b399c96..8008e08975 100644 --- a/modules/private-cluster/variables.tf +++ b/modules/private-cluster/variables.tf @@ -198,12 +198,6 @@ variable "node_pools_oauth_scopes" { } } -variable "node_pools_create_before_destroy" { - type = bool - description = "Create a new node pool, then destroy the old node pool. Default behavior is destroy node pool then recreate it" - default = false -} - variable "stub_domains" { type = map(list(string)) description = "Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server" diff --git a/outputs.tf b/outputs.tf index 53350b0754..dea7b5c7b5 100644 --- a/outputs.tf +++ b/outputs.tf @@ -54,7 +54,6 @@ output "endpoint" { */ google_container_cluster.primary, google_container_node_pool.pools, - google_container_node_pool.pools_lifecycle_variant, ] } diff --git a/variables.tf b/variables.tf index 4a90e1f6ef..460bdeaeff 100644 --- a/variables.tf +++ b/variables.tf @@ -198,12 +198,6 @@ variable "node_pools_oauth_scopes" { } } -variable "node_pools_create_before_destroy" { - type = bool - description = "Create a new node pool, then destroy the old node pool. Default behavior is destroy node pool then recreate it" - default = false -} - variable "stub_domains" { type = map(list(string)) description = "Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server" From 4890bc2d4bf4eea8ad585baa2739aea83ce9c267 Mon Sep 17 00:00:00 2001 From: Aaron Sproul Date: Mon, 9 Sep 2019 16:32:32 -0700 Subject: [PATCH 13/82] add breadcrumb back to schemaNodeConfig for keepers list --- autogen/cluster.tf | 2 ++ modules/private-cluster-lifecycle-variant/cluster.tf | 2 ++ 2 files changed, 4 insertions(+) diff --git a/autogen/cluster.tf b/autogen/cluster.tf index 69815c8ee4..f01b32f99c 100644 --- a/autogen/cluster.tf +++ b/autogen/cluster.tf @@ -233,6 +233,8 @@ locals { ] } +# This keepers list is based on the terraform google provider schemaNodeConfig +# resources where "ForceNew" is "true" resource "random_id" "name" { count = length(var.node_pools) byte_length = 2 diff --git a/modules/private-cluster-lifecycle-variant/cluster.tf b/modules/private-cluster-lifecycle-variant/cluster.tf index 05007e3b13..e16847adce 100644 --- a/modules/private-cluster-lifecycle-variant/cluster.tf +++ b/modules/private-cluster-lifecycle-variant/cluster.tf @@ -142,6 +142,8 @@ locals { ] } +# This keepers list is based on the terraform google provider schemaNodeConfig +# resources where "ForceNew" is "true" resource "random_id" "name" { count = length(var.node_pools) byte_length = 2 From 350157a48c059b016d5ed60d734efce5c7cf5583 Mon Sep 17 00:00:00 2001 From: Richard Song Date: Tue, 10 Sep 2019 00:54:57 -0400 Subject: [PATCH 14/82] added feature check for beta feature istio, and added more accurate naming --- autogen/main.tf | 4 ++-- modules/beta-private-cluster/main.tf | 4 ++-- modules/beta-public-cluster/main.tf | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/autogen/main.tf b/autogen/main.tf index 0f5b0edc06..ce787f1fc0 100644 --- a/autogen/main.tf +++ b/autogen/main.tf @@ -104,7 +104,7 @@ locals { {% if beta_cluster %} # BETA features - cluster_output_istio_enabled = google_container_cluster.primary.addons_config.0.istio_config.0.disabled + cluster_output_istio_disabled = google_container_cluster.primary.addons_config.0.istio_config != null && length(google_container_cluster.primary.addons_config.0.istio_config) == 1 ? google_container_cluster.primary.addons_config.0.istio_config.0.disabled : false cluster_output_pod_security_policy_enabled = google_container_cluster.primary.pod_security_policy_config != null && length(google_container_cluster.primary.pod_security_policy_config) == 1 ? google_container_cluster.primary.pod_security_policy_config.0.enabled : false cluster_output_intranode_visbility_enabled = google_container_cluster.primary.enable_intranode_visibility cluster_output_vertical_pod_autoscaling_enabled = google_container_cluster.primary.vertical_pod_autoscaling != null && length(google_container_cluster.primary.vertical_pod_autoscaling) == 1 ? google_container_cluster.primary.vertical_pod_autoscaling.0.enabled : false @@ -137,7 +137,7 @@ locals { cluster_kubernetes_dashboard_enabled = ! local.cluster_output_kubernetes_dashboard_enabled {% if beta_cluster %} # BETA features - cluster_istio_enabled = ! local.cluster_output_istio_enabled + cluster_istio_enabled = ! local.cluster_output_istio_disabled cluster_cloudrun_enabled = var.cloudrun cluster_pod_security_policy_enabled = local.cluster_output_pod_security_policy_enabled cluster_intranode_visibility_enabled = local.cluster_output_intranode_visbility_enabled diff --git a/modules/beta-private-cluster/main.tf b/modules/beta-private-cluster/main.tf index 1199f2dfa6..fc38644871 100644 --- a/modules/beta-private-cluster/main.tf +++ b/modules/beta-private-cluster/main.tf @@ -93,7 +93,7 @@ locals { cluster_output_kubernetes_dashboard_enabled = google_container_cluster.primary.addons_config.0.kubernetes_dashboard.0.disabled # BETA features - cluster_output_istio_enabled = google_container_cluster.primary.addons_config.0.istio_config.0.disabled + cluster_output_istio_disabled = google_container_cluster.primary.addons_config.0.istio_config != null && length(google_container_cluster.primary.addons_config.0.istio_config) == 1 ? google_container_cluster.primary.addons_config.0.istio_config.0.disabled : false cluster_output_pod_security_policy_enabled = google_container_cluster.primary.pod_security_policy_config != null && length(google_container_cluster.primary.pod_security_policy_config) == 1 ? google_container_cluster.primary.pod_security_policy_config.0.enabled : false cluster_output_intranode_visbility_enabled = google_container_cluster.primary.enable_intranode_visibility cluster_output_vertical_pod_autoscaling_enabled = google_container_cluster.primary.vertical_pod_autoscaling != null && length(google_container_cluster.primary.vertical_pod_autoscaling) == 1 ? google_container_cluster.primary.vertical_pod_autoscaling.0.enabled : false @@ -124,7 +124,7 @@ locals { cluster_horizontal_pod_autoscaling_enabled = ! local.cluster_output_horizontal_pod_autoscaling_enabled cluster_kubernetes_dashboard_enabled = ! local.cluster_output_kubernetes_dashboard_enabled # BETA features - cluster_istio_enabled = ! local.cluster_output_istio_enabled + cluster_istio_enabled = ! local.cluster_output_istio_disabled cluster_cloudrun_enabled = var.cloudrun cluster_pod_security_policy_enabled = local.cluster_output_pod_security_policy_enabled cluster_intranode_visibility_enabled = local.cluster_output_intranode_visbility_enabled diff --git a/modules/beta-public-cluster/main.tf b/modules/beta-public-cluster/main.tf index e4e7548a3a..dea58d4de5 100644 --- a/modules/beta-public-cluster/main.tf +++ b/modules/beta-public-cluster/main.tf @@ -93,7 +93,7 @@ locals { cluster_output_kubernetes_dashboard_enabled = google_container_cluster.primary.addons_config.0.kubernetes_dashboard.0.disabled # BETA features - cluster_output_istio_enabled = google_container_cluster.primary.addons_config.0.istio_config.0.disabled + cluster_output_istio_disabled = google_container_cluster.primary.addons_config.0.istio_config != null && length(google_container_cluster.primary.addons_config.0.istio_config) == 1 ? google_container_cluster.primary.addons_config.0.istio_config.0.disabled : false cluster_output_pod_security_policy_enabled = google_container_cluster.primary.pod_security_policy_config != null && length(google_container_cluster.primary.pod_security_policy_config) == 1 ? google_container_cluster.primary.pod_security_policy_config.0.enabled : false cluster_output_intranode_visbility_enabled = google_container_cluster.primary.enable_intranode_visibility cluster_output_vertical_pod_autoscaling_enabled = google_container_cluster.primary.vertical_pod_autoscaling != null && length(google_container_cluster.primary.vertical_pod_autoscaling) == 1 ? google_container_cluster.primary.vertical_pod_autoscaling.0.enabled : false @@ -124,7 +124,7 @@ locals { cluster_horizontal_pod_autoscaling_enabled = ! local.cluster_output_horizontal_pod_autoscaling_enabled cluster_kubernetes_dashboard_enabled = ! local.cluster_output_kubernetes_dashboard_enabled # BETA features - cluster_istio_enabled = ! local.cluster_output_istio_enabled + cluster_istio_enabled = ! local.cluster_output_istio_disabled cluster_cloudrun_enabled = var.cloudrun cluster_pod_security_policy_enabled = local.cluster_output_pod_security_policy_enabled cluster_intranode_visibility_enabled = local.cluster_output_intranode_visbility_enabled From f880d6610cc2b03e08eb2905eebe8eccbf23cb96 Mon Sep 17 00:00:00 2001 From: Richard Song Date: Tue, 10 Sep 2019 01:11:52 -0400 Subject: [PATCH 15/82] fixed spacing --- autogen/main.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autogen/main.tf b/autogen/main.tf index ce787f1fc0..d9ad888d01 100644 --- a/autogen/main.tf +++ b/autogen/main.tf @@ -104,7 +104,7 @@ locals { {% if beta_cluster %} # BETA features - cluster_output_istio_disabled = google_container_cluster.primary.addons_config.0.istio_config != null && length(google_container_cluster.primary.addons_config.0.istio_config) == 1 ? google_container_cluster.primary.addons_config.0.istio_config.0.disabled : false + cluster_output_istio_disabled = google_container_cluster.primary.addons_config.0.istio_config != null && length(google_container_cluster.primary.addons_config.0.istio_config) == 1 ? google_container_cluster.primary.addons_config.0.istio_config.0.disabled : false cluster_output_pod_security_policy_enabled = google_container_cluster.primary.pod_security_policy_config != null && length(google_container_cluster.primary.pod_security_policy_config) == 1 ? google_container_cluster.primary.pod_security_policy_config.0.enabled : false cluster_output_intranode_visbility_enabled = google_container_cluster.primary.enable_intranode_visibility cluster_output_vertical_pod_autoscaling_enabled = google_container_cluster.primary.vertical_pod_autoscaling != null && length(google_container_cluster.primary.vertical_pod_autoscaling) == 1 ? google_container_cluster.primary.vertical_pod_autoscaling.0.enabled : false From 59158869a31cdc6024081360332ffdf44d8c30d6 Mon Sep 17 00:00:00 2001 From: Ihor Pukha Date: Tue, 27 Aug 2019 14:33:00 +0300 Subject: [PATCH 16/82] enabling metadata-concealment by default --- CHANGELOG.md | 3 ++- autogen/variables.tf | 3 ++- modules/beta-private-cluster/README.md | 2 +- modules/beta-private-cluster/variables.tf | 3 ++- modules/beta-public-cluster/README.md | 2 +- modules/beta-public-cluster/variables.tf | 3 ++- 6 files changed, 10 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 706682fbf1..c070bc124c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,7 +8,7 @@ Extending the adopted spec, each change should have a link to its corresponding ## [Unreleased] ### Added - +* Enabled metadata-concealment by default [#248] * Added `grant_registry_access` variable to grant Container Registry access to created SA [#236] * Support for Intranode Visbiility (IV) and Veritical Pod Autoscaling (VPA) beta features [#216] * Support for Workload Identity beta feature [#234] @@ -170,6 +170,7 @@ Extending the adopted spec, each change should have a link to its corresponding [v0.3.0]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v0.2.0...v0.3.0 [v0.2.0]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v0.1.0...v0.2.0 +[#248]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/248 [#236]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/236 [#217]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/217 [#234]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/234 diff --git a/autogen/variables.tf b/autogen/variables.tf index 9a956194e0..f88ce66782 100644 --- a/autogen/variables.tf +++ b/autogen/variables.tf @@ -368,7 +368,8 @@ variable "pod_security_policy_config" { variable "node_metadata" { description = "Specifies how node metadata is exposed to the workload running on the node" - default = "UNSPECIFIED" + default = "SECURE" + type = string } variable "enable_intranode_visibility" { diff --git a/modules/beta-private-cluster/README.md b/modules/beta-private-cluster/README.md index eba9f48d31..7c750d3bbe 100644 --- a/modules/beta-private-cluster/README.md +++ b/modules/beta-private-cluster/README.md @@ -177,7 +177,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | network\_policy | Enable network policy addon | bool | `"false"` | no | | network\_policy\_provider | The network policy provider. | string | `"CALICO"` | no | | network\_project\_id | The project ID of the shared VPC's host (for shared vpc support) | string | `""` | no | -| node\_metadata | Specifies how node metadata is exposed to the workload running on the node | string | `"UNSPECIFIED"` | no | +| node\_metadata | Specifies how node metadata is exposed to the workload running on the node | string | `"SECURE"` | no | | node\_pools | List of maps containing node pools | list(map(string)) | `` | no | | node\_pools\_labels | Map of maps containing node labels by node-pool name | map(map(string)) | `` | no | | node\_pools\_metadata | Map of maps containing node metadata by node-pool name | map(map(string)) | `` | no | diff --git a/modules/beta-private-cluster/variables.tf b/modules/beta-private-cluster/variables.tf index 6aa50eafff..82eb9906d0 100644 --- a/modules/beta-private-cluster/variables.tf +++ b/modules/beta-private-cluster/variables.tf @@ -365,7 +365,8 @@ variable "pod_security_policy_config" { variable "node_metadata" { description = "Specifies how node metadata is exposed to the workload running on the node" - default = "UNSPECIFIED" + default = "SECURE" + type = string } variable "enable_intranode_visibility" { diff --git a/modules/beta-public-cluster/README.md b/modules/beta-public-cluster/README.md index 49ffddedc1..7c257c619f 100644 --- a/modules/beta-public-cluster/README.md +++ b/modules/beta-public-cluster/README.md @@ -168,7 +168,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | network\_policy | Enable network policy addon | bool | `"false"` | no | | network\_policy\_provider | The network policy provider. | string | `"CALICO"` | no | | network\_project\_id | The project ID of the shared VPC's host (for shared vpc support) | string | `""` | no | -| node\_metadata | Specifies how node metadata is exposed to the workload running on the node | string | `"UNSPECIFIED"` | no | +| node\_metadata | Specifies how node metadata is exposed to the workload running on the node | string | `"SECURE"` | no | | node\_pools | List of maps containing node pools | list(map(string)) | `` | no | | node\_pools\_labels | Map of maps containing node labels by node-pool name | map(map(string)) | `` | no | | node\_pools\_metadata | Map of maps containing node metadata by node-pool name | map(map(string)) | `` | no | diff --git a/modules/beta-public-cluster/variables.tf b/modules/beta-public-cluster/variables.tf index d8b68de69b..acfecfb515 100644 --- a/modules/beta-public-cluster/variables.tf +++ b/modules/beta-public-cluster/variables.tf @@ -341,7 +341,8 @@ variable "pod_security_policy_config" { variable "node_metadata" { description = "Specifies how node metadata is exposed to the workload running on the node" - default = "UNSPECIFIED" + default = "SECURE" + type = string } variable "enable_intranode_visibility" { From d537448cadce58e38436f7c9f013eb8a2a2d3cdb Mon Sep 17 00:00:00 2001 From: Aaron Sproul Date: Tue, 10 Sep 2019 16:31:12 -0700 Subject: [PATCH 17/82] s/lifecycle-variant/update-variant/ --- autogen/cluster.tf | 9 +++++---- helpers/generate_modules/generate_modules.py | 6 +++--- .../README.md | 2 +- .../auth.tf | 0 .../cluster.tf | 3 ++- .../dns.tf | 0 .../main.tf | 0 .../masq.tf | 0 .../networks.tf | 0 .../outputs.tf | 0 .../sa.tf | 0 .../scripts/delete-default-resource.sh | 0 .../scripts/kubectl_wrapper.sh | 0 .../scripts/wait-for-cluster.sh | 0 .../variables.tf | 0 .../versions.tf | 0 16 files changed, 11 insertions(+), 9 deletions(-) rename modules/{private-cluster-lifecycle-variant => private-cluster-update-variant}/README.md (99%) rename modules/{private-cluster-lifecycle-variant => private-cluster-update-variant}/auth.tf (100%) rename modules/{private-cluster-lifecycle-variant => private-cluster-update-variant}/cluster.tf (97%) rename modules/{private-cluster-lifecycle-variant => private-cluster-update-variant}/dns.tf (100%) rename modules/{private-cluster-lifecycle-variant => private-cluster-update-variant}/main.tf (100%) rename modules/{private-cluster-lifecycle-variant => private-cluster-update-variant}/masq.tf (100%) rename modules/{private-cluster-lifecycle-variant => private-cluster-update-variant}/networks.tf (100%) rename modules/{private-cluster-lifecycle-variant => private-cluster-update-variant}/outputs.tf (100%) rename modules/{private-cluster-lifecycle-variant => private-cluster-update-variant}/sa.tf (100%) rename modules/{private-cluster-lifecycle-variant => private-cluster-update-variant}/scripts/delete-default-resource.sh (100%) rename modules/{private-cluster-lifecycle-variant => private-cluster-update-variant}/scripts/kubectl_wrapper.sh (100%) rename modules/{private-cluster-lifecycle-variant => private-cluster-update-variant}/scripts/wait-for-cluster.sh (100%) rename modules/{private-cluster-lifecycle-variant => private-cluster-update-variant}/variables.tf (100%) rename modules/{private-cluster-lifecycle-variant => private-cluster-update-variant}/versions.tf (100%) diff --git a/autogen/cluster.tf b/autogen/cluster.tf index f01b32f99c..24529bdc66 100644 --- a/autogen/cluster.tf +++ b/autogen/cluster.tf @@ -219,7 +219,7 @@ resource "google_container_cluster" "primary" { /****************************************** Create Container Cluster node pools *****************************************/ -{% if lifecycle_variant %} +{% if update_variant %} locals { force_node_pool_recreation_resources = [ "disk_size_gb", @@ -234,7 +234,8 @@ locals { } # This keepers list is based on the terraform google provider schemaNodeConfig -# resources where "ForceNew" is "true" +# resources where "ForceNew" is "true". schemaNodeConfig can be found in node_config.go at +# https://github.com/terraform-providers/terraform-provider-google/blob/master/google/node_config.go#L22 resource "random_id" "name" { count = length(var.node_pools) byte_length = 2 @@ -283,7 +284,7 @@ resource "google_container_node_pool" "pools" { provider = google {% endif %} count = length(var.node_pools) - {% if lifecycle_variant %} + {% if update_variant %} name = random_id.name.*.hex[count.index] {% else %} name = lookup(var.node_pools[count.index], "name") @@ -403,7 +404,7 @@ resource "google_container_node_pool" "pools" { lifecycle { ignore_changes = [initial_node_count] - {% if lifecycle_variant %} + {% if update_variant %} create_before_destroy = true {% endif %} } diff --git a/helpers/generate_modules/generate_modules.py b/helpers/generate_modules/generate_modules.py index 1ae841cde1..79fad3bbf9 100755 --- a/helpers/generate_modules/generate_modules.py +++ b/helpers/generate_modules/generate_modules.py @@ -54,10 +54,10 @@ def template_options(self, base): 'private_cluster': True, 'beta_cluster': True, }), - Module("./modules/private-cluster-lifecycle-variant", { - 'module_path': '//modules/private-cluster-lifecycle-variant', + Module("./modules/private-cluster-update-variant", { + 'module_path': '//modules/private-cluster-update-variant', 'private_cluster': True, - 'lifecycle_variant': True, + 'update_variant': True, }), Module("./modules/beta-public-cluster", { 'module_path': '//modules/beta-public-cluster', diff --git a/modules/private-cluster-lifecycle-variant/README.md b/modules/private-cluster-update-variant/README.md similarity index 99% rename from modules/private-cluster-lifecycle-variant/README.md rename to modules/private-cluster-update-variant/README.md index 779b021aac..7becc84ea4 100644 --- a/modules/private-cluster-lifecycle-variant/README.md +++ b/modules/private-cluster-update-variant/README.md @@ -25,7 +25,7 @@ There are multiple examples included in the [examples](./examples/) folder but s ```hcl module "gke" { - source = "terraform-google-modules/kubernetes-engine/google//modules/private-cluster-lifecycle-variant" + source = "terraform-google-modules/kubernetes-engine/google//modules/private-cluster-update-variant" project_id = "" name = "gke-test-1" region = "us-central1" diff --git a/modules/private-cluster-lifecycle-variant/auth.tf b/modules/private-cluster-update-variant/auth.tf similarity index 100% rename from modules/private-cluster-lifecycle-variant/auth.tf rename to modules/private-cluster-update-variant/auth.tf diff --git a/modules/private-cluster-lifecycle-variant/cluster.tf b/modules/private-cluster-update-variant/cluster.tf similarity index 97% rename from modules/private-cluster-lifecycle-variant/cluster.tf rename to modules/private-cluster-update-variant/cluster.tf index e16847adce..25b85c67c4 100644 --- a/modules/private-cluster-lifecycle-variant/cluster.tf +++ b/modules/private-cluster-update-variant/cluster.tf @@ -143,7 +143,8 @@ locals { } # This keepers list is based on the terraform google provider schemaNodeConfig -# resources where "ForceNew" is "true" +# resources where "ForceNew" is "true". schemaNodeConfig can be found in node_config.go at +# https://github.com/terraform-providers/terraform-provider-google/blob/master/google/node_config.go#L22 resource "random_id" "name" { count = length(var.node_pools) byte_length = 2 diff --git a/modules/private-cluster-lifecycle-variant/dns.tf b/modules/private-cluster-update-variant/dns.tf similarity index 100% rename from modules/private-cluster-lifecycle-variant/dns.tf rename to modules/private-cluster-update-variant/dns.tf diff --git a/modules/private-cluster-lifecycle-variant/main.tf b/modules/private-cluster-update-variant/main.tf similarity index 100% rename from modules/private-cluster-lifecycle-variant/main.tf rename to modules/private-cluster-update-variant/main.tf diff --git a/modules/private-cluster-lifecycle-variant/masq.tf b/modules/private-cluster-update-variant/masq.tf similarity index 100% rename from modules/private-cluster-lifecycle-variant/masq.tf rename to modules/private-cluster-update-variant/masq.tf diff --git a/modules/private-cluster-lifecycle-variant/networks.tf b/modules/private-cluster-update-variant/networks.tf similarity index 100% rename from modules/private-cluster-lifecycle-variant/networks.tf rename to modules/private-cluster-update-variant/networks.tf diff --git a/modules/private-cluster-lifecycle-variant/outputs.tf b/modules/private-cluster-update-variant/outputs.tf similarity index 100% rename from modules/private-cluster-lifecycle-variant/outputs.tf rename to modules/private-cluster-update-variant/outputs.tf diff --git a/modules/private-cluster-lifecycle-variant/sa.tf b/modules/private-cluster-update-variant/sa.tf similarity index 100% rename from modules/private-cluster-lifecycle-variant/sa.tf rename to modules/private-cluster-update-variant/sa.tf diff --git a/modules/private-cluster-lifecycle-variant/scripts/delete-default-resource.sh b/modules/private-cluster-update-variant/scripts/delete-default-resource.sh similarity index 100% rename from modules/private-cluster-lifecycle-variant/scripts/delete-default-resource.sh rename to modules/private-cluster-update-variant/scripts/delete-default-resource.sh diff --git a/modules/private-cluster-lifecycle-variant/scripts/kubectl_wrapper.sh b/modules/private-cluster-update-variant/scripts/kubectl_wrapper.sh similarity index 100% rename from modules/private-cluster-lifecycle-variant/scripts/kubectl_wrapper.sh rename to modules/private-cluster-update-variant/scripts/kubectl_wrapper.sh diff --git a/modules/private-cluster-lifecycle-variant/scripts/wait-for-cluster.sh b/modules/private-cluster-update-variant/scripts/wait-for-cluster.sh similarity index 100% rename from modules/private-cluster-lifecycle-variant/scripts/wait-for-cluster.sh rename to modules/private-cluster-update-variant/scripts/wait-for-cluster.sh diff --git a/modules/private-cluster-lifecycle-variant/variables.tf b/modules/private-cluster-update-variant/variables.tf similarity index 100% rename from modules/private-cluster-lifecycle-variant/variables.tf rename to modules/private-cluster-update-variant/variables.tf diff --git a/modules/private-cluster-lifecycle-variant/versions.tf b/modules/private-cluster-update-variant/versions.tf similarity index 100% rename from modules/private-cluster-lifecycle-variant/versions.tf rename to modules/private-cluster-update-variant/versions.tf From f5ce4e49f9647f938bfc678fa894351b6bc3b48d Mon Sep 17 00:00:00 2001 From: Aaron Sproul Date: Tue, 10 Sep 2019 16:42:35 -0700 Subject: [PATCH 18/82] lookup names in a hash table similar to master --- autogen/cluster.tf | 2 +- cluster.tf | 2 +- modules/beta-private-cluster/cluster.tf | 2 +- modules/beta-public-cluster/cluster.tf | 2 +- modules/private-cluster/cluster.tf | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/autogen/cluster.tf b/autogen/cluster.tf index 24529bdc66..054c4203c3 100644 --- a/autogen/cluster.tf +++ b/autogen/cluster.tf @@ -287,7 +287,7 @@ resource "google_container_node_pool" "pools" { {% if update_variant %} name = random_id.name.*.hex[count.index] {% else %} - name = lookup(var.node_pools[count.index], "name") + name = var.node_pools[count.index]["name"] {% endif %} project = var.project_id location = local.location diff --git a/cluster.tf b/cluster.tf index c82253c7cd..ffdb27b0fc 100644 --- a/cluster.tf +++ b/cluster.tf @@ -127,7 +127,7 @@ resource "google_container_cluster" "primary" { resource "google_container_node_pool" "pools" { provider = google count = length(var.node_pools) - name = lookup(var.node_pools[count.index], "name") + name = var.node_pools[count.index]["name"] project = var.project_id location = local.location cluster = google_container_cluster.primary.name diff --git a/modules/beta-private-cluster/cluster.tf b/modules/beta-private-cluster/cluster.tf index 6469f2f1f2..c481c69a35 100644 --- a/modules/beta-private-cluster/cluster.tf +++ b/modules/beta-private-cluster/cluster.tf @@ -208,7 +208,7 @@ resource "google_container_cluster" "primary" { resource "google_container_node_pool" "pools" { provider = google-beta count = length(var.node_pools) - name = lookup(var.node_pools[count.index], "name") + name = var.node_pools[count.index]["name"] project = var.project_id location = local.location cluster = google_container_cluster.primary.name diff --git a/modules/beta-public-cluster/cluster.tf b/modules/beta-public-cluster/cluster.tf index 5d7bf9d158..a264e932b9 100644 --- a/modules/beta-public-cluster/cluster.tf +++ b/modules/beta-public-cluster/cluster.tf @@ -203,7 +203,7 @@ resource "google_container_cluster" "primary" { resource "google_container_node_pool" "pools" { provider = google-beta count = length(var.node_pools) - name = lookup(var.node_pools[count.index], "name") + name = var.node_pools[count.index]["name"] project = var.project_id location = local.location cluster = google_container_cluster.primary.name diff --git a/modules/private-cluster/cluster.tf b/modules/private-cluster/cluster.tf index 0c25b40c22..412e8295ed 100644 --- a/modules/private-cluster/cluster.tf +++ b/modules/private-cluster/cluster.tf @@ -132,7 +132,7 @@ resource "google_container_cluster" "primary" { resource "google_container_node_pool" "pools" { provider = google count = length(var.node_pools) - name = lookup(var.node_pools[count.index], "name") + name = var.node_pools[count.index]["name"] project = var.project_id location = local.location cluster = google_container_cluster.primary.name From 1854e0494557d751c4b4f42af74dae5d73fa77cc Mon Sep 17 00:00:00 2001 From: Aaron Sproul Date: Wed, 11 Sep 2019 11:37:10 -0700 Subject: [PATCH 19/82] sort keeper values so that re-ordering has not effect --- autogen/cluster.tf | 40 +- helpers/generate_modules/generate_modules.py | 6 + .../README.md | 421 ++++++++++++++++++ .../auth.tf | 34 ++ .../cluster.tf | 418 +++++++++++++++++ .../dns.tf | 120 +++++ .../main.tf | 153 +++++++ .../masq.tf | 48 ++ .../networks.tf | 32 ++ .../outputs.tf | 151 +++++++ .../beta-private-cluster-update-variant/sa.tf | 71 +++ .../scripts/delete-default-resource.sh | 41 ++ .../scripts/kubectl_wrapper.sh | 53 +++ .../scripts/wait-for-cluster.sh | 33 ++ .../variables.tf | 406 +++++++++++++++++ .../versions.tf | 19 + .../private-cluster-update-variant/cluster.tf | 40 +- 17 files changed, 2062 insertions(+), 24 deletions(-) create mode 100644 modules/beta-private-cluster-update-variant/README.md create mode 100644 modules/beta-private-cluster-update-variant/auth.tf create mode 100644 modules/beta-private-cluster-update-variant/cluster.tf create mode 100644 modules/beta-private-cluster-update-variant/dns.tf create mode 100644 modules/beta-private-cluster-update-variant/main.tf create mode 100644 modules/beta-private-cluster-update-variant/masq.tf create mode 100644 modules/beta-private-cluster-update-variant/networks.tf create mode 100644 modules/beta-private-cluster-update-variant/outputs.tf create mode 100644 modules/beta-private-cluster-update-variant/sa.tf create mode 100755 modules/beta-private-cluster-update-variant/scripts/delete-default-resource.sh create mode 100755 modules/beta-private-cluster-update-variant/scripts/kubectl_wrapper.sh create mode 100755 modules/beta-private-cluster-update-variant/scripts/wait-for-cluster.sh create mode 100644 modules/beta-private-cluster-update-variant/variables.tf create mode 100644 modules/beta-private-cluster-update-variant/versions.tf diff --git a/autogen/cluster.tf b/autogen/cluster.tf index 054c4203c3..296b2818df 100644 --- a/autogen/cluster.tf +++ b/autogen/cluster.tf @@ -247,30 +247,46 @@ resource "random_id" "name" { ), { labels = join(",", - keys(var.node_pools_labels["all"]), - keys(var.node_pools_labels[var.node_pools[count.index]["name"]]), - values(var.node_pools_labels["all"]), - values(var.node_pools_labels[var.node_pools[count.index]["name"]]) + sort( + concat( + keys(var.node_pools_labels["all"]), + values(var.node_pools_labels["all"]), + keys(var.node_pools_labels[var.node_pools[count.index]["name"]]), + values(var.node_pools_labels[var.node_pools[count.index]["name"]]) + ) + ) ) }, { metadata = join(",", - keys(var.node_pools_metadata["all"]), - keys(var.node_pools_metadata[var.node_pools[count.index]["name"]]), - values(var.node_pools_metadata["all"]), - values(var.node_pools_metadata[var.node_pools[count.index]["name"]]) + sort( + concat( + keys(var.node_pools_metadata["all"]), + values(var.node_pools_metadata["all"]), + keys(var.node_pools_metadata[var.node_pools[count.index]["name"]]), + values(var.node_pools_metadata[var.node_pools[count.index]["name"]]) + ) + ) ) }, { oauth_scopes = join(",", - var.node_pools_oauth_scopes["all"], - var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]] + sort( + concat( + var.node_pools_oauth_scopes["all"], + var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]] + ) + ) ) }, { tags = join(",", - var.node_pools_tags["all"], - var.node_pools_tags[var.node_pools[count.index]["name"]] + sort( + concat( + var.node_pools_tags["all"], + var.node_pools_tags[var.node_pools[count.index]["name"]] + ) + ) ) } ) diff --git a/helpers/generate_modules/generate_modules.py b/helpers/generate_modules/generate_modules.py index 79fad3bbf9..b98b8bb69e 100755 --- a/helpers/generate_modules/generate_modules.py +++ b/helpers/generate_modules/generate_modules.py @@ -59,6 +59,12 @@ def template_options(self, base): 'private_cluster': True, 'update_variant': True, }), + Module("./modules/beta-private-cluster-update-variant", { + 'module_path': '//modules/beta-private-cluster-update-variant', + 'private_cluster': True, + 'update_variant': True, + 'beta_cluster': True, + }), Module("./modules/beta-public-cluster", { 'module_path': '//modules/beta-public-cluster', 'private_cluster': False, diff --git a/modules/beta-private-cluster-update-variant/README.md b/modules/beta-private-cluster-update-variant/README.md new file mode 100644 index 0000000000..96d645c115 --- /dev/null +++ b/modules/beta-private-cluster-update-variant/README.md @@ -0,0 +1,421 @@ +# Terraform Kubernetes Engine Module + +This module handles opinionated Google Cloud Platform Kubernetes Engine cluster creation and configuration with Node Pools, IP MASQ, Network Policy, etc. This particular submodule creates a [private cluster](https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters)Beta features are enabled in this submodule. +The resources/services/activations/deletions that this module will create/trigger are: +- Create a GKE cluster with the provided addons +- Create GKE Node Pool(s) with provided configuration and attach to cluster +- Replace the default kube-dns configmap if `stub_domains` are provided +- Activate network policy if `network_policy` is true +- Add `ip-masq-agent` configmap with provided `non_masquerade_cidrs` if `configure_ip_masq` is true + +Sub modules are provided from creating private clusters, beta private clusters, and beta public clusters as well. Beta sub modules allow for the use of various GKE beta features. See the modules directory for the various sub modules. + +**Note**: You must run Terraform from a VM on the same VPC as your cluster, otherwise there will be issues connecting to the GKE master. + + +## Compatibility + +This module is meant for use with Terraform 0.12. If you haven't +[upgraded][terraform-0.12-upgrade] and need a Terraform +0.11.x-compatible version of this module, the last released version +intended for Terraform 0.11.x is [3.0.0]. + +## Usage +There are multiple examples included in the [examples](./examples/) folder but simple usage is as follows: + +```hcl +module "gke" { + source = "terraform-google-modules/kubernetes-engine/google//modules/beta-private-cluster-update-variant" + project_id = "" + name = "gke-test-1" + region = "us-central1" + zones = ["us-central1-a", "us-central1-b", "us-central1-f"] + network = "vpc-01" + subnetwork = "us-central1-01" + ip_range_pods = "us-central1-01-gke-01-pods" + ip_range_services = "us-central1-01-gke-01-services" + http_load_balancing = false + horizontal_pod_autoscaling = true + kubernetes_dashboard = true + network_policy = true + enable_private_endpoint = true + enable_private_nodes = true + master_ipv4_cidr_block = "10.0.0.0/28" + istio = true + cloudrun = true + + node_pools = [ + { + name = "default-node-pool" + machine_type = "n1-standard-2" + min_count = 1 + max_count = 100 + disk_size_gb = 100 + disk_type = "pd-standard" + image_type = "COS" + auto_repair = true + auto_upgrade = true + service_account = "project-service-account@.iam.gserviceaccount.com" + preemptible = false + initial_node_count = 80 + }, + ] + + node_pools_oauth_scopes = { + all = [] + + default-node-pool = [ + "https://www.googleapis.com/auth/cloud-platform", + ] + } + + node_pools_labels = { + all = {} + + default-node-pool = { + default-node-pool = true + } + } + + node_pools_metadata = { + all = {} + + default-node-pool = { + node-pool-metadata-custom-value = "my-node-pool" + } + } + + node_pools_taints = { + all = [] + + default-node-pool = [ + { + key = "default-node-pool" + value = true + effect = "PREFER_NO_SCHEDULE" + }, + ] + } + + node_pools_tags = { + all = [] + + default-node-pool = [ + "default-node-pool", + ] + } +} +``` + + +Then perform the following commands on the root folder: + +- `terraform init` to get the plugins +- `terraform plan` to see the infrastructure plan +- `terraform apply` to apply the infrastructure build +- `terraform destroy` to destroy the built infrastructure + +## Upgrade to v3.0.0 + +v3.0.0 is a breaking release. Refer to the +[Upgrading to v3.0 guide][upgrading-to-v3.0] for details. + +## Upgrade to v2.0.0 + +v2.0.0 is a breaking release. Refer to the +[Upgrading to v2.0 guide][upgrading-to-v2.0] for details. + +## Upgrade to v1.0.0 + +Version 1.0.0 of this module introduces a breaking change: adding the `disable-legacy-endpoints` metadata field to all node pools. This metadata is required by GKE and [determines whether the `/0.1/` and `/v1beta1/` paths are available in the nodes' metadata server](https://cloud.google.com/kubernetes-engine/docs/how-to/protecting-cluster-metadata#disable-legacy-apis). If your applications do not require access to the node's metadata server, you can leave the default value of `true` provided by the module. If your applications require access to the metadata server, be sure to read the linked documentation to see if you need to set the value for this field to `false` to allow your applications access to the above metadata server paths. + +In either case, upgrading to module version `v1.0.0` will trigger a recreation of all node pools in the cluster. + + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| authenticator\_security\_group | The name of the RBAC security group for use with Google security groups in Kubernetes RBAC. Group name must be in format gke-security-groups@yourdomain.com | string | `"null"` | no | +| basic\_auth\_password | The password to be used with Basic Authentication. | string | `""` | no | +| basic\_auth\_username | The username to be used with Basic Authentication. An empty value will disable Basic Authentication, which is the recommended configuration. | string | `""` | no | +| cloudrun | (Beta) Enable CloudRun addon | string | `"false"` | no | +| cluster\_ipv4\_cidr | The IP address range of the kubernetes pods in this cluster. Default is an automatically assigned CIDR. | string | `""` | no | +| cluster\_resource\_labels | The GCE resource labels (a map of key/value pairs) to be applied to the cluster | map(string) | `` | no | +| configure\_ip\_masq | Enables the installation of ip masquerading, which is usually no longer required when using aliasied IP addresses. IP masquerading uses a kubectl call, so when you have a private cluster, you will need access to the API server. | string | `"false"` | no | +| create\_service\_account | Defines if service account specified to run nodes should be created. | bool | `"true"` | no | +| database\_encryption | Application-layer Secrets Encryption settings. The object format is {state = string, key_name = string}. Valid values of state are: "ENCRYPTED"; "DECRYPTED". key_name is the name of a CloudKMS key. | object | `` | no | +| default\_max\_pods\_per\_node | The maximum number of pods to schedule per node | string | `"110"` | no | +| deploy\_using\_private\_endpoint | (Beta) A toggle for Terraform and kubectl to connect to the master's internal IP address during deployment. | bool | `"false"` | no | +| description | The description of the cluster | string | `""` | no | +| disable\_legacy\_metadata\_endpoints | Disable the /0.1/ and /v1beta1/ metadata server endpoints on the node. Changing this value will cause all node pools to be recreated. | bool | `"true"` | no | +| enable\_binary\_authorization | Enable BinAuthZ Admission controller | string | `"false"` | no | +| enable\_intranode\_visibility | Whether Intra-node visibility is enabled for this cluster. This makes same node pod to pod traffic visible for VPC network | bool | `"false"` | no | +| enable\_private\_endpoint | (Beta) Whether the master's internal IP address is used as the cluster endpoint | bool | `"false"` | no | +| enable\_private\_nodes | (Beta) Whether nodes have internal IP addresses only | bool | `"false"` | no | +| enable\_vertical\_pod\_autoscaling | Vertical Pod Autoscaling automatically adjusts the resources of pods controlled by it | bool | `"false"` | no | +| grant\_registry\_access | Grants created cluster-specific service account storage.objectViewer role. | bool | `"false"` | no | +| horizontal\_pod\_autoscaling | Enable horizontal pod autoscaling addon | bool | `"true"` | no | +| http\_load\_balancing | Enable httpload balancer addon | bool | `"true"` | no | +| identity\_namespace | Workload Identity namespace | string | `""` | no | +| initial\_node\_count | The number of nodes to create in this cluster's default node pool. | number | `"0"` | no | +| ip\_masq\_link\_local | Whether to masquerade traffic to the link-local prefix (169.254.0.0/16). | bool | `"false"` | no | +| ip\_masq\_resync\_interval | The interval at which the agent attempts to sync its ConfigMap file from the disk. | string | `"60s"` | no | +| ip\_range\_pods | The _name_ of the secondary subnet ip range to use for pods | string | n/a | yes | +| ip\_range\_services | The _name_ of the secondary subnet range to use for services | string | n/a | yes | +| issue\_client\_certificate | Issues a client certificate to authenticate to the cluster endpoint. To maximize the security of your cluster, leave this option disabled. Client certificates don't automatically rotate and aren't easily revocable. WARNING: changing this after cluster creation is destructive! | bool | `"false"` | no | +| istio | (Beta) Enable Istio addon | string | `"false"` | no | +| kubernetes\_dashboard | Enable kubernetes dashboard addon | bool | `"false"` | no | +| kubernetes\_version | The Kubernetes version of the masters. If set to 'latest' it will pull latest available version in the selected region. | string | `"latest"` | no | +| logging\_service | The logging service that the cluster should write logs to. Available options include logging.googleapis.com, logging.googleapis.com/kubernetes (beta), and none | string | `"logging.googleapis.com"` | no | +| maintenance\_start\_time | Time window specified for daily maintenance operations in RFC3339 format | string | `"05:00"` | no | +| master\_authorized\_networks\_config | The desired configuration options for master authorized networks. The object format is {cidr_blocks = list(object({cidr_block = string, display_name = string}))}. Omit the nested cidr_blocks attribute to disallow external access (except the cluster node IPs, which GKE automatically whitelists). | object | `` | no | +| master\_ipv4\_cidr\_block | (Beta) The IP range in CIDR notation to use for the hosted master network | string | `"10.0.0.0/28"` | no | +| monitoring\_service | The monitoring service that the cluster should write metrics to. Automatically send metrics from pods in the cluster to the Google Cloud Monitoring API. VM metrics will be collected by Google Compute Engine regardless of this setting Available options include monitoring.googleapis.com, monitoring.googleapis.com/kubernetes (beta) and none | string | `"monitoring.googleapis.com"` | no | +| name | The name of the cluster (required) | string | n/a | yes | +| network | The VPC network to host the cluster in (required) | string | n/a | yes | +| network\_policy | Enable network policy addon | bool | `"false"` | no | +| network\_policy\_provider | The network policy provider. | string | `"CALICO"` | no | +| network\_project\_id | The project ID of the shared VPC's host (for shared vpc support) | string | `""` | no | +| node\_metadata | Specifies how node metadata is exposed to the workload running on the node | string | `"UNSPECIFIED"` | no | +| node\_pools | List of maps containing node pools | list(map(string)) | `` | no | +| node\_pools\_labels | Map of maps containing node labels by node-pool name | map(map(string)) | `` | no | +| node\_pools\_metadata | Map of maps containing node metadata by node-pool name | map(map(string)) | `` | no | +| node\_pools\_oauth\_scopes | Map of lists containing node oauth scopes by node-pool name | map(list(string)) | `` | no | +| node\_pools\_tags | Map of lists containing node network tags by node-pool name | map(list(string)) | `` | no | +| node\_pools\_taints | Map of lists containing node taints by node-pool name | object | `` | no | +| node\_version | The Kubernetes version of the node pools. Defaults kubernetes_version (master) variable and can be overridden for individual node pools by setting the `version` key on them. Must be empyty or set the same as master at cluster creation. | string | `""` | no | +| non\_masquerade\_cidrs | List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading. | list(string) | `` | no | +| pod\_security\_policy\_config | enabled - Enable the PodSecurityPolicy controller for this cluster. If enabled, pods must be valid under a PodSecurityPolicy to be created. | list | `` | no | +| project\_id | The project ID to host the cluster in (required) | string | n/a | yes | +| region | The region to host the cluster in (required) | string | n/a | yes | +| regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | +| remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | +| resource\_usage\_export\_dataset\_id | The dataset id for which network egress metering for this cluster will be enabled. If enabled, a daemonset will be created in the cluster to meter network egress traffic. | string | `""` | no | +| sandbox\_enabled | (Beta) Enable GKE Sandbox (Do not forget to set `image_type` = `COS_CONTAINERD` and `node_version` = `1.12.7-gke.17` or later to use it). | bool | `"false"` | no | +| service\_account | The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created. | string | `""` | no | +| stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | map(list(string)) | `` | no | +| subnetwork | The subnetwork to host the cluster in (required) | string | n/a | yes | +| upstream\_nameservers | If specified, the values replace the nameservers taken by default from the node’s /etc/resolv.conf | list | `` | no | +| zones | The zones to host the cluster in (optional if regional cluster / required if zonal) | list(string) | `` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| ca\_certificate | Cluster ca certificate (base64 encoded) | +| cloudrun\_enabled | Whether CloudRun enabled | +| endpoint | Cluster endpoint | +| horizontal\_pod\_autoscaling\_enabled | Whether horizontal pod autoscaling enabled | +| http\_load\_balancing\_enabled | Whether http load balancing enabled | +| intranode\_visibility\_enabled | Whether intra-node visibility is enabled | +| istio\_enabled | Whether Istio is enabled | +| kubernetes\_dashboard\_enabled | Whether kubernetes dashboard enabled | +| location | Cluster location (region if regional cluster, zone if zonal cluster) | +| logging\_service | Logging service used | +| master\_authorized\_networks\_config | Networks from which access to master is permitted | +| master\_version | Current master kubernetes version | +| min\_master\_version | Minimum master kubernetes version | +| monitoring\_service | Monitoring service used | +| name | Cluster name | +| network\_policy\_enabled | Whether network policy enabled | +| node\_pools\_names | List of node pools names | +| node\_pools\_versions | List of node pools versions | +| pod\_security\_policy\_enabled | Whether pod security policy is enabled | +| region | Cluster region | +| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | +| type | Cluster type (regional / zonal) | +| vertical\_pod\_autoscaling\_enabled | Whether veritical pod autoscaling is enabled | +| zones | List of zones in which the cluster resides | + + + +## Requirements + +Before this module can be used on a project, you must ensure that the following pre-requisites are fulfilled: + +1. Terraform and kubectl are [installed](#software-dependencies) on the machine where Terraform is executed. +2. The Service Account you execute the module with has the right [permissions](#configure-a-service-account). +3. The Compute Engine and Kubernetes Engine APIs are [active](#enable-apis) on the project you will launch the cluster in. +4. If you are using a Shared VPC, the APIs must also be activated on the Shared VPC host project and your service account needs the proper permissions there. + +The [project factory](https://github.com/terraform-google-modules/terraform-google-project-factory) can be used to provision projects with the correct APIs active and the necessary Shared VPC connections. + +### Software Dependencies +#### Kubectl +- [kubectl](https://github.com/kubernetes/kubernetes/releases) 1.9.x +#### Terraform and Plugins +- [Terraform](https://www.terraform.io/downloads.html) 0.12 +- [Terraform Provider for GCP Beta][terraform-provider-google-beta] v2.9 + +### Configure a Service Account +In order to execute this module you must have a Service Account with the +following project roles: +- roles/compute.viewer +- roles/container.clusterAdmin +- roles/container.developer +- roles/iam.serviceAccountAdmin +- roles/iam.serviceAccountUser +- roles/resourcemanager.projectIamAdmin (only required if `service_account` is set to `create`) + +### Enable APIs +In order to operate with the Service Account you must activate the following APIs on the project where the Service Account was created: + +- Compute Engine API - compute.googleapis.com +- Kubernetes Engine API - container.googleapis.com + +## File structure +The project has the following folders and files: + +- /: root folder +- /examples: Examples for using this module and sub module. +- /helpers: Helper scripts. +- /scripts: Scripts for specific tasks on module (see Infrastructure section on this file). +- /test: Folders with files for testing the module (see Testing section on this file). +- /main.tf: `main` file for the public module, contains all the resources to create. +- /variables.tf: Variables for the public cluster module. +- /output.tf: The outputs for the public cluster module. +- /README.MD: This file. +- /modules: Private and beta sub modules. + +## Templating + +To more cleanly handle cases where desired functionality would require complex duplication of Terraform resources (i.e. [PR 51](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/51)), this repository is largely generated from the [`autogen`](/autogen) directory. + +The root module is generated by running `make generate`. Changes to this repository should be made in the [`autogen`](/autogen) directory where appropriate. + +Note: The correct sequence to update the repo using autogen functionality is to run +`make generate && make generate_docs`. This will create the various Terraform files, and then +generate the Terraform documentation using `terraform-docs`. + +## Testing + +### Requirements +- [bundler](https://github.com/bundler/bundler) +- [gcloud](https://cloud.google.com/sdk/install) +- [terraform-docs](https://github.com/segmentio/terraform-docs/releases) 0.6.0 + +### Autogeneration of documentation from .tf files +Run +``` +make generate_docs +``` + +### Integration test + +Integration tests are run though [test-kitchen](https://github.com/test-kitchen/test-kitchen), [kitchen-terraform](https://github.com/newcontext-oss/kitchen-terraform), and [InSpec](https://github.com/inspec/inspec). + +Six test-kitchen instances are defined: + +- `deploy-service` +- `node-pool` +- `shared-vpc` +- `simple-regional` +- `simple-zonal` +- `stub-domains` + +The test-kitchen instances in `test/fixtures/` wrap identically-named examples in the `examples/` directory. + +#### Setup + +1. Configure the [test fixtures](#test-configuration) +2. Download a Service Account key with the necessary permissions and put it in the module's root directory with the name `credentials.json`. + - Requires the [permissions to run the module](#configure-a-service-account) + - Requires `roles/compute.networkAdmin` to create the test suite's networks + - Requires `roles/resourcemanager.projectIamAdmin` since service account creation is tested +3. Build the Docker container for testing: + + ``` + make docker_build_kitchen_terraform + ``` +4. Run the testing container in interactive mode: + + ``` + make docker_run + ``` + + The module root directory will be loaded into the Docker container at `/cft/workdir/`. +5. Run kitchen-terraform to test the infrastructure: + + 1. `kitchen create` creates Terraform state and downloads modules, if applicable. + 2. `kitchen converge` creates the underlying resources. Run `kitchen converge ` to create resources for a specific test case. + 3. Run `kitchen converge` again. This is necessary due to an oddity in how `networkPolicyConfig` is handled by the upstream API. (See [#72](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/issues/72) for details). + 4. `kitchen verify` tests the created infrastructure. Run `kitchen verify ` to run a specific test case. + 5. `kitchen destroy` tears down the underlying resources created by `kitchen converge`. Run `kitchen destroy ` to tear down resources for a specific test case. + +Alternatively, you can simply run `make test_integration_docker` to run all the test steps non-interactively. + +If you wish to parallelize running the test suites, it is also possible to offload the work onto Concourse to run each test suite for you using the command `make test_integration_concourse`. The `.concourse` directory will be created and contain all of the logs from the running test suites. + +When running tests locally, you will need to use your own test project environment. You can configure your environment by setting all of the following variables: + +``` +export COMPUTE_ENGINE_SERVICE_ACCOUNT="" +export PROJECT_ID="" +export REGION="" +export ZONES='[""]' +export SERVICE_ACCOUNT_JSON="$(cat "")" +export CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE="" +export GOOGLE_APPLICATION_CREDENTIALS="" +``` + +#### Test configuration + +Each test-kitchen instance is configured with a `variables.tfvars` file in the test fixture directory, e.g. `test/fixtures/node_pool/terraform.tfvars`. +For convenience, since all of the variables are project-specific, these files have been symlinked to `test/fixtures/shared/terraform.tfvars`. +Similarly, each test fixture has a `variables.tf` to define these variables, and an `outputs.tf` to facilitate providing necessary information for `inspec` to locate and query against created resources. + +Each test-kitchen instance creates a GCP Network and Subnetwork fixture to house resources, and may create any other necessary fixture data as needed. + +### Autogeneration of documentation from .tf files +Run +``` +make generate_docs +``` + +### Linting +The makefile in this project will lint or sometimes just format any shell, +Python, golang, Terraform, or Dockerfiles. The linters will only be run if +the makefile finds files with the appropriate file extension. + +All of the linter checks are in the default make target, so you just have to +run + +``` +make -s +``` + +The -s is for 'silent'. Successful output looks like this + +``` +Running shellcheck +Running flake8 +Running go fmt and go vet +Running terraform validate +Running hadolint on Dockerfiles +Checking for required files +Testing the validity of the header check +.. +---------------------------------------------------------------------- +Ran 2 tests in 0.026s + +OK +Checking file headers +The following lines have trailing whitespace +``` + +The linters +are as follows: +* Shell - shellcheck. Can be found in homebrew +* Python - flake8. Can be installed with 'pip install flake8' +* Golang - gofmt. gofmt comes with the standard golang installation. golang +is a compiled language so there is no standard linter. +* Terraform - terraform has a built-in linter in the 'terraform validate' +command. +* Dockerfiles - hadolint. Can be found in homebrew + +[upgrading-to-v2.0]: ../../docs/upgrading_to_v2.0.md +[upgrading-to-v3.0]: ../../docs/upgrading_to_v3.0.md +[terraform-provider-google-beta]: https://github.com/terraform-providers/terraform-provider-google-beta +[3.0.0]: https://registry.terraform.io/modules/terraform-google-modules/kubernetes-engine/google/3.0.0 +[terraform-0.12-upgrade]: https://www.terraform.io/upgrade-guides/0-12.html diff --git a/modules/beta-private-cluster-update-variant/auth.tf b/modules/beta-private-cluster-update-variant/auth.tf new file mode 100644 index 0000000000..c177eee5a7 --- /dev/null +++ b/modules/beta-private-cluster-update-variant/auth.tf @@ -0,0 +1,34 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file was automatically generated from a template in ./autogen + +/****************************************** + Retrieve authentication token + *****************************************/ +data "google_client_config" "default" { + provider = google-beta +} + +/****************************************** + Configure provider + *****************************************/ +provider "kubernetes" { + load_config_file = false + host = "https://${local.cluster_endpoint}" + token = data.google_client_config.default.access_token + cluster_ca_certificate = base64decode(local.cluster_ca_certificate) +} diff --git a/modules/beta-private-cluster-update-variant/cluster.tf b/modules/beta-private-cluster-update-variant/cluster.tf new file mode 100644 index 0000000000..cf1def945d --- /dev/null +++ b/modules/beta-private-cluster-update-variant/cluster.tf @@ -0,0 +1,418 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file was automatically generated from a template in ./autogen + +/****************************************** + Create Container Cluster + *****************************************/ +resource "google_container_cluster" "primary" { + provider = google-beta + + name = var.name + description = var.description + project = var.project_id + resource_labels = var.cluster_resource_labels + + location = local.location + node_locations = local.node_locations + cluster_ipv4_cidr = var.cluster_ipv4_cidr + network = data.google_compute_network.gke_network.self_link + + dynamic "network_policy" { + for_each = local.cluster_network_policy + + content { + enabled = network_policy.value.enabled + provider = network_policy.value.provider + } + } + + subnetwork = data.google_compute_subnetwork.gke_subnetwork.self_link + min_master_version = local.master_version + + logging_service = var.logging_service + monitoring_service = var.monitoring_service + + enable_binary_authorization = var.enable_binary_authorization + enable_intranode_visibility = var.enable_intranode_visibility + default_max_pods_per_node = var.default_max_pods_per_node + + vertical_pod_autoscaling { + enabled = var.enable_vertical_pod_autoscaling + } + + dynamic "pod_security_policy_config" { + for_each = var.pod_security_policy_config + content { + enabled = pod_security_policy_config.value.enabled + } + } + + dynamic "resource_usage_export_config" { + for_each = var.resource_usage_export_dataset_id != "" ? [var.resource_usage_export_dataset_id] : [] + content { + enable_network_egress_metering = true + bigquery_destination { + dataset_id = resource_usage_export_config.value + } + } + } + dynamic "master_authorized_networks_config" { + for_each = var.master_authorized_networks_config + content { + dynamic "cidr_blocks" { + for_each = master_authorized_networks_config.value.cidr_blocks + content { + cidr_block = lookup(cidr_blocks.value, "cidr_block", "") + display_name = lookup(cidr_blocks.value, "display_name", "") + } + } + } + } + + master_auth { + username = var.basic_auth_username + password = var.basic_auth_password + + client_certificate_config { + issue_client_certificate = var.issue_client_certificate + } + } + + addons_config { + http_load_balancing { + disabled = ! var.http_load_balancing + } + + horizontal_pod_autoscaling { + disabled = ! var.horizontal_pod_autoscaling + } + + kubernetes_dashboard { + disabled = ! var.kubernetes_dashboard + } + + network_policy_config { + disabled = ! var.network_policy + } + + istio_config { + disabled = ! var.istio + } + + dynamic "cloudrun_config" { + for_each = local.cluster_cloudrun_config + + content { + disabled = cloudrun_config.value.disabled + } + } + } + + ip_allocation_policy { + cluster_secondary_range_name = var.ip_range_pods + services_secondary_range_name = var.ip_range_services + } + + maintenance_policy { + daily_maintenance_window { + start_time = var.maintenance_start_time + } + } + + lifecycle { + ignore_changes = [node_pool, initial_node_count] + } + + timeouts { + create = "30m" + update = "30m" + delete = "30m" + } + + node_pool { + name = "default-pool" + initial_node_count = var.initial_node_count + + node_config { + service_account = lookup(var.node_pools[0], "service_account", local.service_account) + + dynamic "workload_metadata_config" { + for_each = local.cluster_node_metadata_config + + content { + node_metadata = workload_metadata_config.value.node_metadata + } + } + + dynamic "sandbox_config" { + for_each = local.cluster_sandbox_enabled + + content { + sandbox_type = sandbox_config.value + } + } + } + } + + private_cluster_config { + enable_private_endpoint = var.enable_private_endpoint + enable_private_nodes = var.enable_private_nodes + master_ipv4_cidr_block = var.master_ipv4_cidr_block + } + + remove_default_node_pool = var.remove_default_node_pool + + dynamic "database_encryption" { + for_each = var.database_encryption + + content { + key_name = database_encryption.value.key_name + state = database_encryption.value.state + } + } + + dynamic "workload_identity_config" { + for_each = local.cluster_workload_identity_config + + content { + identity_namespace = workload_identity_config.value.identity_namespace + } + } + + dynamic "authenticator_groups_config" { + for_each = local.cluster_authenticator_security_group + content { + security_group = authenticator_groups_config.value.security_group + } + } +} + +/****************************************** + Create Container Cluster node pools + *****************************************/ +locals { + force_node_pool_recreation_resources = [ + "disk_size_gb", + "disk_type", + "accelerator_count", + "accelerator_type", + "local_ssd_count", + "machine_type", + "preemptible", + "service_account", + ] +} + +# This keepers list is based on the terraform google provider schemaNodeConfig +# resources where "ForceNew" is "true". schemaNodeConfig can be found in node_config.go at +# https://github.com/terraform-providers/terraform-provider-google/blob/master/google/node_config.go#L22 +resource "random_id" "name" { + count = length(var.node_pools) + byte_length = 2 + prefix = format("%s-", lookup(var.node_pools[count.index], "name")) + keepers = merge( + zipmap( + local.force_node_pool_recreation_resources, + [for keeper in local.force_node_pool_recreation_resources : lookup(var.node_pools[count.index], keeper, "")] + ), + { + labels = join(",", + sort( + concat( + keys(var.node_pools_labels["all"]), + values(var.node_pools_labels["all"]), + keys(var.node_pools_labels[var.node_pools[count.index]["name"]]), + values(var.node_pools_labels[var.node_pools[count.index]["name"]]) + ) + ) + ) + }, + { + metadata = join(",", + sort( + concat( + keys(var.node_pools_metadata["all"]), + values(var.node_pools_metadata["all"]), + keys(var.node_pools_metadata[var.node_pools[count.index]["name"]]), + values(var.node_pools_metadata[var.node_pools[count.index]["name"]]) + ) + ) + ) + }, + { + oauth_scopes = join(",", + sort( + concat( + var.node_pools_oauth_scopes["all"], + var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]] + ) + ) + ) + }, + { + tags = join(",", + sort( + concat( + var.node_pools_tags["all"], + var.node_pools_tags[var.node_pools[count.index]["name"]] + ) + ) + ) + } + ) +} + +resource "google_container_node_pool" "pools" { + provider = google-beta + count = length(var.node_pools) + name = random_id.name.*.hex[count.index] + project = var.project_id + location = local.location + cluster = google_container_cluster.primary.name + version = lookup(var.node_pools[count.index], "auto_upgrade", false) ? "" : lookup( + var.node_pools[count.index], + "version", + local.node_version, + ) + initial_node_count = lookup( + var.node_pools[count.index], + "initial_node_count", + lookup(var.node_pools[count.index], "min_count", 1), + ) + max_pods_per_node = lookup(var.node_pools[count.index], "max_pods_per_node", null) + + node_count = lookup(var.node_pools[count.index], "autoscaling", true) ? null : lookup(var.node_pools[count.index], "min_count", 1) + + dynamic "autoscaling" { + for_each = lookup(var.node_pools[count.index], "autoscaling", true) ? [var.node_pools[count.index]] : [] + content { + min_node_count = lookup(autoscaling.value, "min_count", 1) + max_node_count = lookup(autoscaling.value, "max_count", 100) + } + } + + management { + auto_repair = lookup(var.node_pools[count.index], "auto_repair", true) + auto_upgrade = lookup(var.node_pools[count.index], "auto_upgrade", local.default_auto_upgrade) + } + + node_config { + image_type = lookup(var.node_pools[count.index], "image_type", "COS") + machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2") + labels = merge( + { + "cluster_name" = var.name + }, + { + "node_pool" = var.node_pools[count.index]["name"] + }, + var.node_pools_labels["all"], + var.node_pools_labels[var.node_pools[count.index]["name"]], + ) + metadata = merge( + { + "cluster_name" = var.name + }, + { + "node_pool" = var.node_pools[count.index]["name"] + }, + var.node_pools_metadata["all"], + var.node_pools_metadata[var.node_pools[count.index]["name"]], + { + "disable-legacy-endpoints" = var.disable_legacy_metadata_endpoints + }, + ) + dynamic "taint" { + for_each = concat( + var.node_pools_taints["all"], + var.node_pools_taints[var.node_pools[count.index]["name"]], + ) + content { + effect = taint.value.effect + key = taint.value.key + value = taint.value.value + } + } + tags = concat( + ["gke-${var.name}"], + ["gke-${var.name}-${var.node_pools[count.index]["name"]}"], + var.node_pools_tags["all"], + var.node_pools_tags[var.node_pools[count.index]["name"]], + ) + + disk_size_gb = lookup(var.node_pools[count.index], "disk_size_gb", 100) + disk_type = lookup(var.node_pools[count.index], "disk_type", "pd-standard") + service_account = lookup( + var.node_pools[count.index], + "service_account", + local.service_account, + ) + preemptible = lookup(var.node_pools[count.index], "preemptible", false) + + oauth_scopes = concat( + var.node_pools_oauth_scopes["all"], + var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]], + ) + + guest_accelerator = [ + for guest_accelerator in lookup(var.node_pools[count.index], "accelerator_count", 0) > 0 ? [{ + type = lookup(var.node_pools[count.index], "accelerator_type", "") + count = lookup(var.node_pools[count.index], "accelerator_count", 0) + }] : [] : { + type = guest_accelerator["type"] + count = guest_accelerator["count"] + } + ] + + dynamic "workload_metadata_config" { + for_each = local.cluster_node_metadata_config + + content { + node_metadata = workload_metadata_config.value.node_metadata + } + } + } + + lifecycle { + ignore_changes = [initial_node_count] + create_before_destroy = true + } + + timeouts { + create = "30m" + update = "30m" + delete = "30m" + } +} + +resource "null_resource" "wait_for_cluster" { + + provisioner "local-exec" { + command = "${path.module}/scripts/wait-for-cluster.sh ${var.project_id} ${var.name}" + } + + provisioner "local-exec" { + when = destroy + command = "${path.module}/scripts/wait-for-cluster.sh ${var.project_id} ${var.name}" + } + + depends_on = [ + google_container_cluster.primary, + google_container_node_pool.pools, + ] +} diff --git a/modules/beta-private-cluster-update-variant/dns.tf b/modules/beta-private-cluster-update-variant/dns.tf new file mode 100644 index 0000000000..b240a23e65 --- /dev/null +++ b/modules/beta-private-cluster-update-variant/dns.tf @@ -0,0 +1,120 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file was automatically generated from a template in ./autogen + +/****************************************** + Delete default kube-dns configmap + *****************************************/ +resource "null_resource" "delete_default_kube_dns_configmap" { + count = local.custom_kube_dns_config || local.upstream_nameservers_config ? 1 : 0 + + provisioner "local-exec" { + command = "${path.module}/scripts/kubectl_wrapper.sh https://${local.cluster_endpoint} ${data.google_client_config.default.access_token} ${local.cluster_ca_certificate} ${path.module}/scripts/delete-default-resource.sh kube-system configmap kube-dns" + } + + depends_on = [ + data.google_client_config.default, + google_container_cluster.primary, + google_container_node_pool.pools, + ] +} + +/****************************************** + Create kube-dns confimap + *****************************************/ +resource "kubernetes_config_map" "kube-dns" { + count = local.custom_kube_dns_config && ! local.upstream_nameservers_config ? 1 : 0 + + metadata { + name = "kube-dns" + namespace = "kube-system" + + labels = { + maintained_by = "terraform" + } + } + + data = { + stubDomains = < 0 + upstream_nameservers_config = length(var.upstream_nameservers) > 0 + network_project_id = var.network_project_id != "" ? var.network_project_id : var.project_id + zone_count = length(var.zones) + cluster_type = var.regional ? "regional" : "zonal" + // auto upgrade by defaults only for regional cluster as long it has multiple masters versus zonal clusters have only have a single master so upgrades are more dangerous. + default_auto_upgrade = var.regional ? true : false + + cluster_network_policy = var.network_policy ? [{ + enabled = true + provider = var.network_policy_provider + }] : [{ + enabled = false + provider = null + }] + + cluster_cloudrun_config = var.cloudrun ? [{ disabled = false }] : [] + + cluster_node_metadata_config = var.node_metadata == "UNSPECIFIED" ? [] : [{ + node_metadata = var.node_metadata + }] + + cluster_authenticator_security_group = var.authenticator_security_group == null ? [] : [{ + security_group = var.authenticator_security_group + }] + + cluster_sandbox_enabled = var.sandbox_enabled ? ["gvisor"] : [] + + + cluster_output_name = google_container_cluster.primary.name + cluster_output_location = google_container_cluster.primary.location + cluster_output_region = google_container_cluster.primary.region + cluster_output_regional_zones = google_container_cluster.primary.node_locations + cluster_output_zonal_zones = local.zone_count > 1 ? slice(var.zones, 1, local.zone_count) : [] + cluster_output_zones = local.cluster_output_regional_zones + + cluster_output_endpoint = var.deploy_using_private_endpoint ? google_container_cluster.primary.private_cluster_config.0.private_endpoint : google_container_cluster.primary.endpoint + + cluster_output_master_auth = concat(google_container_cluster.primary.*.master_auth, []) + cluster_output_master_version = google_container_cluster.primary.master_version + cluster_output_min_master_version = google_container_cluster.primary.min_master_version + cluster_output_logging_service = google_container_cluster.primary.logging_service + cluster_output_monitoring_service = google_container_cluster.primary.monitoring_service + cluster_output_network_policy_enabled = google_container_cluster.primary.addons_config.0.network_policy_config.0.disabled + cluster_output_http_load_balancing_enabled = google_container_cluster.primary.addons_config.0.http_load_balancing.0.disabled + cluster_output_horizontal_pod_autoscaling_enabled = google_container_cluster.primary.addons_config.0.horizontal_pod_autoscaling.0.disabled + cluster_output_kubernetes_dashboard_enabled = google_container_cluster.primary.addons_config.0.kubernetes_dashboard.0.disabled + + # BETA features + cluster_output_istio_disabled = google_container_cluster.primary.addons_config.0.istio_config != null && length(google_container_cluster.primary.addons_config.0.istio_config) == 1 ? google_container_cluster.primary.addons_config.0.istio_config.0.disabled : false + cluster_output_pod_security_policy_enabled = google_container_cluster.primary.pod_security_policy_config != null && length(google_container_cluster.primary.pod_security_policy_config) == 1 ? google_container_cluster.primary.pod_security_policy_config.0.enabled : false + cluster_output_intranode_visbility_enabled = google_container_cluster.primary.enable_intranode_visibility + cluster_output_vertical_pod_autoscaling_enabled = google_container_cluster.primary.vertical_pod_autoscaling != null && length(google_container_cluster.primary.vertical_pod_autoscaling) == 1 ? google_container_cluster.primary.vertical_pod_autoscaling.0.enabled : false + + # /BETA features + + cluster_output_node_pools_names = concat(google_container_node_pool.pools.*.name, [""]) + cluster_output_node_pools_versions = concat(google_container_node_pool.pools.*.version, [""]) + + cluster_master_auth_list_layer1 = local.cluster_output_master_auth + cluster_master_auth_list_layer2 = local.cluster_master_auth_list_layer1[0] + cluster_master_auth_map = local.cluster_master_auth_list_layer2[0] + # cluster locals + cluster_name = local.cluster_output_name + cluster_location = local.cluster_output_location + cluster_region = local.cluster_output_region + cluster_zones = sort(local.cluster_output_zones) + cluster_endpoint = local.cluster_output_endpoint + cluster_ca_certificate = local.cluster_master_auth_map["cluster_ca_certificate"] + cluster_master_version = local.cluster_output_master_version + cluster_min_master_version = local.cluster_output_min_master_version + cluster_logging_service = local.cluster_output_logging_service + cluster_monitoring_service = local.cluster_output_monitoring_service + cluster_node_pools_names = local.cluster_output_node_pools_names + cluster_node_pools_versions = local.cluster_output_node_pools_versions + cluster_network_policy_enabled = ! local.cluster_output_network_policy_enabled + cluster_http_load_balancing_enabled = ! local.cluster_output_http_load_balancing_enabled + cluster_horizontal_pod_autoscaling_enabled = ! local.cluster_output_horizontal_pod_autoscaling_enabled + cluster_kubernetes_dashboard_enabled = ! local.cluster_output_kubernetes_dashboard_enabled + # BETA features + cluster_istio_enabled = ! local.cluster_output_istio_disabled + cluster_cloudrun_enabled = var.cloudrun + cluster_pod_security_policy_enabled = local.cluster_output_pod_security_policy_enabled + cluster_intranode_visibility_enabled = local.cluster_output_intranode_visbility_enabled + cluster_vertical_pod_autoscaling_enabled = local.cluster_output_vertical_pod_autoscaling_enabled + cluster_workload_identity_config = var.identity_namespace == "" ? [] : [{ + identity_namespace = var.identity_namespace + }] + # /BETA features +} + +/****************************************** + Get available container engine versions + *****************************************/ +data "google_container_engine_versions" "region" { + location = local.location + project = var.project_id +} + +data "google_container_engine_versions" "zone" { + // Work around to prevent a lack of zone declaration from causing regional cluster creation from erroring out due to error + // + // data.google_container_engine_versions.zone: Cannot determine zone: set in this resource, or set provider-level zone. + // + location = local.zone_count == 0 ? data.google_compute_zones.available.names[0] : var.zones[0] + project = var.project_id +} diff --git a/modules/beta-private-cluster-update-variant/masq.tf b/modules/beta-private-cluster-update-variant/masq.tf new file mode 100644 index 0000000000..b6e411fc42 --- /dev/null +++ b/modules/beta-private-cluster-update-variant/masq.tf @@ -0,0 +1,48 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file was automatically generated from a template in ./autogen + +/****************************************** + Create ip-masq-agent confimap + *****************************************/ +resource "kubernetes_config_map" "ip-masq-agent" { + count = var.configure_ip_masq ? 1 : 0 + + metadata { + name = "ip-masq-agent" + namespace = "kube-system" + + labels = { + maintained_by = "terraform" + } + } + + data = { + config = <&2 echo "3 arguments expected. Exiting." + exit 1 +fi + +RESOURCE_NAMESPACE=$1 +RESOURCE_TYPE=$2 +RESOURCE_NAME=$3 + +RESOURCE_LIST=$(kubectl -n "${RESOURCE_NAMESPACE}" get "${RESOURCE_TYPE}" || exit 1) + +# Delete requested resource +if [[ $RESOURCE_LIST = *"${RESOURCE_NAME}"* ]]; then + RESOURCE_MAINTAINED_LABEL=$(kubectl -n "${RESOURCE_NAMESPACE}" get "${RESOURCE_TYPE}" -o json "${RESOURCE_NAME}" | jq -r '.metadata.labels."maintained_by"') + if [[ $RESOURCE_MAINTAINED_LABEL = "terraform" ]]; then + echo "Terraform maintained ${RESOURCE_NAME} ${RESOURCE_TYPE} appears to have already been created in ${RESOURCE_NAMESPACE} namespace" + else + echo "Deleting default ${RESOURCE_NAME} ${RESOURCE_TYPE} found in ${RESOURCE_NAMESPACE} namespace" + kubectl -n "${RESOURCE_NAMESPACE}" delete "${RESOURCE_TYPE}" "${RESOURCE_NAME}" + fi +else + echo "No default ${RESOURCE_NAME} ${RESOURCE_TYPE} found in ${RESOURCE_NAMESPACE} namespace" +fi diff --git a/modules/beta-private-cluster-update-variant/scripts/kubectl_wrapper.sh b/modules/beta-private-cluster-update-variant/scripts/kubectl_wrapper.sh new file mode 100755 index 0000000000..e92300bcb5 --- /dev/null +++ b/modules/beta-private-cluster-update-variant/scripts/kubectl_wrapper.sh @@ -0,0 +1,53 @@ +#!/bin/bash +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +set -e + +if [ "$#" -lt 3 ]; then + >&2 echo "Not all expected arguments set." + exit 1 +fi + +HOST=$1 +TOKEN=$2 +CA_CERTIFICATE=$3 + +shift 3 + +RANDOM_ID="${RANDOM}_${RANDOM}" +export TMPDIR="/tmp/kubectl_wrapper_${RANDOM_ID}" + +function cleanup { + rm -rf "${TMPDIR}" +} +trap cleanup EXIT + +mkdir "${TMPDIR}" + +export KUBECONFIG="${TMPDIR}/config" + +# shellcheck disable=SC1117 +base64 --help | grep "\--decode" && B64_ARG="--decode" || B64_ARG="-d" +echo "${CA_CERTIFICATE}" | base64 ${B64_ARG} > "${TMPDIR}/ca_certificate" + +kubectl config set-cluster kubectl-wrapper --server="${HOST}" --certificate-authority="${TMPDIR}/ca_certificate" --embed-certs=true 1>/dev/null +rm -f "${TMPDIR}/ca_certificate" +kubectl config set-context kubectl-wrapper --cluster=kubectl-wrapper --user=kubectl-wrapper --namespace=default 1>/dev/null +kubectl config set-credentials kubectl-wrapper --token="${TOKEN}" 1>/dev/null +kubectl config use-context kubectl-wrapper 1>/dev/null +kubectl version 1>/dev/null + +"$@" diff --git a/modules/beta-private-cluster-update-variant/scripts/wait-for-cluster.sh b/modules/beta-private-cluster-update-variant/scripts/wait-for-cluster.sh new file mode 100755 index 0000000000..6ff3253d58 --- /dev/null +++ b/modules/beta-private-cluster-update-variant/scripts/wait-for-cluster.sh @@ -0,0 +1,33 @@ +#!/bin/bash +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e + +PROJECT=$1 +CLUSTER_NAME=$2 +gcloud_command="gcloud container clusters list --project=$PROJECT --format=json" +jq_query=".[] | select(.name==\"$CLUSTER_NAME\") | .status" + +echo "Waiting for cluster $2 in project $1 to reconcile..." + +current_status=$($gcloud_command | jq -r "$jq_query") + +while [[ "${current_status}" == "RECONCILING" ]]; do + printf "." + sleep 5 + current_status=$($gcloud_command | jq -r "$jq_query") +done + +echo "Cluster is ready!" diff --git a/modules/beta-private-cluster-update-variant/variables.tf b/modules/beta-private-cluster-update-variant/variables.tf new file mode 100644 index 0000000000..975fe7a173 --- /dev/null +++ b/modules/beta-private-cluster-update-variant/variables.tf @@ -0,0 +1,406 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file was automatically generated from a template in ./autogen + +variable "project_id" { + type = string + description = "The project ID to host the cluster in (required)" +} + +variable "name" { + type = string + description = "The name of the cluster (required)" +} + +variable "description" { + type = string + description = "The description of the cluster" + default = "" +} + +variable "regional" { + type = bool + description = "Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!)" + default = true +} + +variable "region" { + type = string + description = "The region to host the cluster in (required)" +} + +variable "zones" { + type = list(string) + description = "The zones to host the cluster in (optional if regional cluster / required if zonal)" + default = [] +} + +variable "network" { + type = string + description = "The VPC network to host the cluster in (required)" +} + +variable "network_project_id" { + type = string + description = "The project ID of the shared VPC's host (for shared vpc support)" + default = "" +} + +variable "subnetwork" { + type = string + description = "The subnetwork to host the cluster in (required)" +} + +variable "kubernetes_version" { + type = string + description = "The Kubernetes version of the masters. If set to 'latest' it will pull latest available version in the selected region." + default = "latest" +} + +variable "node_version" { + type = string + description = "The Kubernetes version of the node pools. Defaults kubernetes_version (master) variable and can be overridden for individual node pools by setting the `version` key on them. Must be empyty or set the same as master at cluster creation." + default = "" +} + +variable "master_authorized_networks_config" { + type = list(object({ cidr_blocks = list(object({ cidr_block = string, display_name = string })) })) + description = "The desired configuration options for master authorized networks. The object format is {cidr_blocks = list(object({cidr_block = string, display_name = string}))}. Omit the nested cidr_blocks attribute to disallow external access (except the cluster node IPs, which GKE automatically whitelists)." + default = [] +} + +variable "horizontal_pod_autoscaling" { + type = bool + description = "Enable horizontal pod autoscaling addon" + default = true +} + +variable "http_load_balancing" { + type = bool + description = "Enable httpload balancer addon" + default = true +} + +variable "kubernetes_dashboard" { + type = bool + description = "Enable kubernetes dashboard addon" + default = false +} + +variable "network_policy" { + type = bool + description = "Enable network policy addon" + default = false +} + +variable "network_policy_provider" { + type = string + description = "The network policy provider." + default = "CALICO" +} + +variable "maintenance_start_time" { + type = string + description = "Time window specified for daily maintenance operations in RFC3339 format" + default = "05:00" +} + +variable "ip_range_pods" { + type = string + description = "The _name_ of the secondary subnet ip range to use for pods" +} + +variable "ip_range_services" { + type = string + description = "The _name_ of the secondary subnet range to use for services" +} + +variable "initial_node_count" { + type = number + description = "The number of nodes to create in this cluster's default node pool." + default = 0 +} + +variable "remove_default_node_pool" { + type = bool + description = "Remove default node pool while setting up the cluster" + default = false +} + +variable "disable_legacy_metadata_endpoints" { + type = bool + description = "Disable the /0.1/ and /v1beta1/ metadata server endpoints on the node. Changing this value will cause all node pools to be recreated." + default = true +} + +variable "node_pools" { + type = list(map(string)) + description = "List of maps containing node pools" + + default = [ + { + name = "default-node-pool" + }, + ] +} + +variable "node_pools_labels" { + type = map(map(string)) + description = "Map of maps containing node labels by node-pool name" + + default = { + all = {} + default-node-pool = {} + } +} + +variable "node_pools_metadata" { + type = map(map(string)) + description = "Map of maps containing node metadata by node-pool name" + + default = { + all = {} + default-node-pool = {} + } +} + +variable "node_pools_taints" { + type = map(list(object({ key = string, value = string, effect = string }))) + description = "Map of lists containing node taints by node-pool name" + + default = { + all = [] + default-node-pool = [] + } +} + +variable "node_pools_tags" { + type = map(list(string)) + description = "Map of lists containing node network tags by node-pool name" + + default = { + all = [] + default-node-pool = [] + } +} + +variable "node_pools_oauth_scopes" { + type = map(list(string)) + description = "Map of lists containing node oauth scopes by node-pool name" + + default = { + all = ["https://www.googleapis.com/auth/cloud-platform"] + default-node-pool = [] + } +} + +variable "stub_domains" { + type = map(list(string)) + description = "Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server" + default = {} +} + +variable "upstream_nameservers" { + type = "list" + description = "If specified, the values replace the nameservers taken by default from the node’s /etc/resolv.conf" + default = [] +} + +variable "non_masquerade_cidrs" { + type = list(string) + description = "List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading." + default = ["10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16"] +} + +variable "ip_masq_resync_interval" { + type = string + description = "The interval at which the agent attempts to sync its ConfigMap file from the disk." + default = "60s" +} + +variable "ip_masq_link_local" { + type = bool + description = "Whether to masquerade traffic to the link-local prefix (169.254.0.0/16)." + default = false +} + +variable "configure_ip_masq" { + description = "Enables the installation of ip masquerading, which is usually no longer required when using aliasied IP addresses. IP masquerading uses a kubectl call, so when you have a private cluster, you will need access to the API server." + default = false +} + +variable "logging_service" { + type = string + description = "The logging service that the cluster should write logs to. Available options include logging.googleapis.com, logging.googleapis.com/kubernetes (beta), and none" + default = "logging.googleapis.com" +} + +variable "monitoring_service" { + type = string + description = "The monitoring service that the cluster should write metrics to. Automatically send metrics from pods in the cluster to the Google Cloud Monitoring API. VM metrics will be collected by Google Compute Engine regardless of this setting Available options include monitoring.googleapis.com, monitoring.googleapis.com/kubernetes (beta) and none" + default = "monitoring.googleapis.com" +} + +variable "create_service_account" { + type = bool + description = "Defines if service account specified to run nodes should be created." + default = true +} + +variable "grant_registry_access" { + type = bool + description = "Grants created cluster-specific service account storage.objectViewer role." + default = false +} + +variable "service_account" { + type = string + description = "The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created." + default = "" +} + +variable "basic_auth_username" { + type = string + description = "The username to be used with Basic Authentication. An empty value will disable Basic Authentication, which is the recommended configuration." + default = "" +} + +variable "basic_auth_password" { + type = string + description = "The password to be used with Basic Authentication." + default = "" +} + +variable "issue_client_certificate" { + type = bool + description = "Issues a client certificate to authenticate to the cluster endpoint. To maximize the security of your cluster, leave this option disabled. Client certificates don't automatically rotate and aren't easily revocable. WARNING: changing this after cluster creation is destructive!" + default = false +} + +variable "cluster_ipv4_cidr" { + default = "" + description = "The IP address range of the kubernetes pods in this cluster. Default is an automatically assigned CIDR." +} + +variable "cluster_resource_labels" { + type = map(string) + description = "The GCE resource labels (a map of key/value pairs) to be applied to the cluster" + default = {} +} + + +variable "deploy_using_private_endpoint" { + type = bool + description = "(Beta) A toggle for Terraform and kubectl to connect to the master's internal IP address during deployment." + default = false +} + +variable "enable_private_endpoint" { + type = bool + description = "(Beta) Whether the master's internal IP address is used as the cluster endpoint" + default = false +} + +variable "enable_private_nodes" { + type = bool + description = "(Beta) Whether nodes have internal IP addresses only" + default = false +} + +variable "master_ipv4_cidr_block" { + type = string + description = "(Beta) The IP range in CIDR notation to use for the hosted master network" + default = "10.0.0.0/28" +} + +variable "istio" { + description = "(Beta) Enable Istio addon" + default = false +} + +variable "default_max_pods_per_node" { + description = "The maximum number of pods to schedule per node" + default = 110 +} + +variable "database_encryption" { + description = "Application-layer Secrets Encryption settings. The object format is {state = string, key_name = string}. Valid values of state are: \"ENCRYPTED\"; \"DECRYPTED\". key_name is the name of a CloudKMS key." + type = list(object({ state = string, key_name = string })) + default = [{ + state = "DECRYPTED" + key_name = "" + }] +} + +variable "cloudrun" { + description = "(Beta) Enable CloudRun addon" + default = false +} + +variable "enable_binary_authorization" { + description = "Enable BinAuthZ Admission controller" + default = false +} + +variable "pod_security_policy_config" { + description = "enabled - Enable the PodSecurityPolicy controller for this cluster. If enabled, pods must be valid under a PodSecurityPolicy to be created." + default = [{ + "enabled" = false + }] +} + +variable "resource_usage_export_dataset_id" { + type = string + description = "The dataset id for which network egress metering for this cluster will be enabled. If enabled, a daemonset will be created in the cluster to meter network egress traffic." + default = "" +} + +variable "node_metadata" { + description = "Specifies how node metadata is exposed to the workload running on the node" + default = "UNSPECIFIED" +} + +variable "sandbox_enabled" { + type = bool + description = "(Beta) Enable GKE Sandbox (Do not forget to set `image_type` = `COS_CONTAINERD` and `node_version` = `1.12.7-gke.17` or later to use it)." + default = false +} + +variable "enable_intranode_visibility" { + type = bool + description = "Whether Intra-node visibility is enabled for this cluster. This makes same node pod to pod traffic visible for VPC network" + default = false +} + +variable "enable_vertical_pod_autoscaling" { + type = bool + description = "Vertical Pod Autoscaling automatically adjusts the resources of pods controlled by it" + default = false +} + +variable "identity_namespace" { + description = "Workload Identity namespace" + type = string + default = "" +} + +variable "authenticator_security_group" { + type = string + description = "The name of the RBAC security group for use with Google security groups in Kubernetes RBAC. Group name must be in format gke-security-groups@yourdomain.com" + default = null +} + diff --git a/modules/beta-private-cluster-update-variant/versions.tf b/modules/beta-private-cluster-update-variant/versions.tf new file mode 100644 index 0000000000..832ec1df39 --- /dev/null +++ b/modules/beta-private-cluster-update-variant/versions.tf @@ -0,0 +1,19 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +terraform { + required_version = ">= 0.12" +} diff --git a/modules/private-cluster-update-variant/cluster.tf b/modules/private-cluster-update-variant/cluster.tf index 25b85c67c4..e8db91a77a 100644 --- a/modules/private-cluster-update-variant/cluster.tf +++ b/modules/private-cluster-update-variant/cluster.tf @@ -156,30 +156,46 @@ resource "random_id" "name" { ), { labels = join(",", - keys(var.node_pools_labels["all"]), - keys(var.node_pools_labels[var.node_pools[count.index]["name"]]), - values(var.node_pools_labels["all"]), - values(var.node_pools_labels[var.node_pools[count.index]["name"]]) + sort( + concat( + keys(var.node_pools_labels["all"]), + values(var.node_pools_labels["all"]), + keys(var.node_pools_labels[var.node_pools[count.index]["name"]]), + values(var.node_pools_labels[var.node_pools[count.index]["name"]]) + ) + ) ) }, { metadata = join(",", - keys(var.node_pools_metadata["all"]), - keys(var.node_pools_metadata[var.node_pools[count.index]["name"]]), - values(var.node_pools_metadata["all"]), - values(var.node_pools_metadata[var.node_pools[count.index]["name"]]) + sort( + concat( + keys(var.node_pools_metadata["all"]), + values(var.node_pools_metadata["all"]), + keys(var.node_pools_metadata[var.node_pools[count.index]["name"]]), + values(var.node_pools_metadata[var.node_pools[count.index]["name"]]) + ) + ) ) }, { oauth_scopes = join(",", - var.node_pools_oauth_scopes["all"], - var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]] + sort( + concat( + var.node_pools_oauth_scopes["all"], + var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]] + ) + ) ) }, { tags = join(",", - var.node_pools_tags["all"], - var.node_pools_tags[var.node_pools[count.index]["name"]] + sort( + concat( + var.node_pools_tags["all"], + var.node_pools_tags[var.node_pools[count.index]["name"]] + ) + ) ) } ) From 99a179de65475fc12b401ba4cf085d1452123121 Mon Sep 17 00:00:00 2001 From: Aaron Sproul Date: Wed, 11 Sep 2019 11:38:36 -0700 Subject: [PATCH 20/82] Add two examples for the update_variant and test of a private zonal cluster --- examples/node_pool_update_variant/README.md | 45 ++++++ .../data/shutdown-script.sh | 17 +++ examples/node_pool_update_variant/main.tf | 119 +++++++++++++++ examples/node_pool_update_variant/outputs.tf | 35 +++++ .../node_pool_update_variant/test_outputs.tf | 63 ++++++++ .../node_pool_update_variant/variables.tf | 54 +++++++ .../node_pool_update_variant_beta/README.md | 46 ++++++ .../data/shutdown-script.sh | 17 +++ .../node_pool_update_variant_beta/main.tf | 138 ++++++++++++++++++ .../node_pool_update_variant_beta/outputs.tf | 35 +++++ .../test_outputs.tf | 63 ++++++++ .../variables.tf | 57 ++++++++ .../node_pool_update_variant/example.tf | 29 ++++ .../node_pool_update_variant/network.tf | 48 ++++++ .../node_pool_update_variant/outputs.tf | 1 + .../node_pool_update_variant/terraform.tfvars | 1 + .../node_pool_update_variant/variables.tf | 1 + 17 files changed, 769 insertions(+) create mode 100644 examples/node_pool_update_variant/README.md create mode 100644 examples/node_pool_update_variant/data/shutdown-script.sh create mode 100644 examples/node_pool_update_variant/main.tf create mode 100644 examples/node_pool_update_variant/outputs.tf create mode 100644 examples/node_pool_update_variant/test_outputs.tf create mode 100644 examples/node_pool_update_variant/variables.tf create mode 100644 examples/node_pool_update_variant_beta/README.md create mode 100644 examples/node_pool_update_variant_beta/data/shutdown-script.sh create mode 100644 examples/node_pool_update_variant_beta/main.tf create mode 100644 examples/node_pool_update_variant_beta/outputs.tf create mode 100644 examples/node_pool_update_variant_beta/test_outputs.tf create mode 100644 examples/node_pool_update_variant_beta/variables.tf create mode 100644 test/fixtures/node_pool_update_variant/example.tf create mode 100644 test/fixtures/node_pool_update_variant/network.tf create mode 120000 test/fixtures/node_pool_update_variant/outputs.tf create mode 120000 test/fixtures/node_pool_update_variant/terraform.tfvars create mode 120000 test/fixtures/node_pool_update_variant/variables.tf diff --git a/examples/node_pool_update_variant/README.md b/examples/node_pool_update_variant/README.md new file mode 100644 index 0000000000..9215f091cb --- /dev/null +++ b/examples/node_pool_update_variant/README.md @@ -0,0 +1,45 @@ +# Node Pool Cluster + +This example illustrates how to create a cluster with multiple custom node-pool configurations with node labels, taints, and network tags. + + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| cluster\_name\_suffix | A suffix to append to the default cluster name | string | `""` | no | +| compute\_engine\_service\_account | Service account to associate to the nodes in the cluster | string | n/a | yes | +| ip\_range\_pods | The secondary ip range to use for pods | string | n/a | yes | +| ip\_range\_services | The secondary ip range to use for pods | string | n/a | yes | +| network | The VPC network to host the cluster in | string | n/a | yes | +| project\_id | The project ID to host the cluster in | string | n/a | yes | +| region | The region to host the cluster in | string | n/a | yes | +| subnetwork | The subnetwork to host the cluster in | string | n/a | yes | +| zones | The zone to host the cluster in (required if is a zonal cluster) | list(string) | n/a | yes | + +## Outputs + +| Name | Description | +|------|-------------| +| ca\_certificate | | +| client\_token | | +| cluster\_name | Cluster name | +| ip\_range\_pods | The secondary IP range used for pods | +| ip\_range\_services | The secondary IP range used for services | +| kubernetes\_endpoint | | +| location | | +| master\_kubernetes\_version | The master Kubernetes version | +| network | | +| project\_id | | +| region | | +| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | +| subnetwork | | +| zones | List of zones in which the cluster resides | + + + +To provision this example, run the following from within this directory: +- `terraform init` to get the plugins +- `terraform plan` to see the infrastructure plan +- `terraform apply` to apply the infrastructure build +- `terraform destroy` to destroy the built infrastructure diff --git a/examples/node_pool_update_variant/data/shutdown-script.sh b/examples/node_pool_update_variant/data/shutdown-script.sh new file mode 100644 index 0000000000..f1ff19c353 --- /dev/null +++ b/examples/node_pool_update_variant/data/shutdown-script.sh @@ -0,0 +1,17 @@ +#!/bin/bash -e + +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +kubectl --kubeconfig=/var/lib/kubelet/kubeconfig drain --force=true --ignore-daemonsets=true --delete-local-data "$HOSTNAME" diff --git a/examples/node_pool_update_variant/main.tf b/examples/node_pool_update_variant/main.tf new file mode 100644 index 0000000000..c10e797511 --- /dev/null +++ b/examples/node_pool_update_variant/main.tf @@ -0,0 +1,119 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +locals { + cluster_type = "node-pool-update-variant" +} + +provider "google" { + version = "~> 2.12.0" + region = var.region +} + +data "google_compute_subnetwork" "subnetwork" { + name = var.subnetwork + project = var.project_id + region = var.region +} + +module "gke" { + source = "../../modules/private-cluster-update-variant" + project_id = var.project_id + name = "${local.cluster_type}-cluster${var.cluster_name_suffix}" + regional = false + region = var.region + zones = var.zones + network = var.network + subnetwork = var.subnetwork + ip_range_pods = var.ip_range_pods + ip_range_services = var.ip_range_services + create_service_account = false + service_account = var.compute_engine_service_account + enable_private_endpoint = true + enable_private_nodes = true + master_ipv4_cidr_block = "172.16.0.0/28" + + master_authorized_networks_config = [ + { + cidr_blocks = [ + { + cidr_block = data.google_compute_subnetwork.subnetwork.ip_cidr_range + display_name = "VPC" + }, + ] + }, + ] + + node_pools = [ + { + name = "pool-01" + min_count = 1 + max_count = 2 + service_account = var.compute_engine_service_account + auto_upgrade = true + }, + { + name = "pool-02" + machine_type = "n1-standard-2" + min_count = 1 + max_count = 2 + disk_size_gb = 30 + disk_type = "pd-standard" + accelerator_count = 1 + accelerator_type = "nvidia-tesla-p4" + image_type = "COS" + auto_repair = false + service_account = var.compute_engine_service_account + }, + ] + + node_pools_oauth_scopes = { + all = [] + pool-01 = [] + pool-02 = [] + } + + node_pools_metadata = { + all = {} + pool-01 = { + shutdown-script = file("${path.module}/data/shutdown-script.sh") + } + pool-02 = {} + } + + node_pools_labels = { + all = { + all-pools-example = true + } + pool-01 = { + pool-01-example = true + } + pool-02 = {} + } + + node_pools_tags = { + all = [ + "all-node-example", + ] + pool-01 = [ + "pool-01-example", + ] + pool-02 = [] + } +} + +data "google_client_config" "default" { +} diff --git a/examples/node_pool_update_variant/outputs.tf b/examples/node_pool_update_variant/outputs.tf new file mode 100644 index 0000000000..0d972dcd88 --- /dev/null +++ b/examples/node_pool_update_variant/outputs.tf @@ -0,0 +1,35 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +output "kubernetes_endpoint" { + sensitive = true + value = module.gke.endpoint +} + +output "client_token" { + sensitive = true + value = base64encode(data.google_client_config.default.access_token) +} + +output "ca_certificate" { + value = module.gke.ca_certificate +} + +output "service_account" { + description = "The service account to default running nodes as if not overridden in `node_pools`." + value = module.gke.service_account +} + diff --git a/examples/node_pool_update_variant/test_outputs.tf b/examples/node_pool_update_variant/test_outputs.tf new file mode 100644 index 0000000000..e64c40e477 --- /dev/null +++ b/examples/node_pool_update_variant/test_outputs.tf @@ -0,0 +1,63 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// These outputs are used to test the module with kitchen-terraform +// They do not need to be included in real-world uses of this module + +output "project_id" { + value = var.project_id +} + +output "region" { + value = module.gke.region +} + +output "cluster_name" { + description = "Cluster name" + value = module.gke.name +} + +output "network" { + value = var.network +} + +output "subnetwork" { + value = var.subnetwork +} + +output "location" { + value = module.gke.location +} + +output "ip_range_pods" { + description = "The secondary IP range used for pods" + value = var.ip_range_pods +} + +output "ip_range_services" { + description = "The secondary IP range used for services" + value = var.ip_range_services +} + +output "zones" { + description = "List of zones in which the cluster resides" + value = module.gke.zones +} + +output "master_kubernetes_version" { + description = "The master Kubernetes version" + value = module.gke.master_version +} diff --git a/examples/node_pool_update_variant/variables.tf b/examples/node_pool_update_variant/variables.tf new file mode 100644 index 0000000000..040c78d2c4 --- /dev/null +++ b/examples/node_pool_update_variant/variables.tf @@ -0,0 +1,54 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +variable "project_id" { + description = "The project ID to host the cluster in" +} + +variable "cluster_name_suffix" { + description = "A suffix to append to the default cluster name" + default = "" +} + +variable "region" { + description = "The region to host the cluster in" +} + +variable "zones" { + type = list(string) + description = "The zone to host the cluster in (required if is a zonal cluster)" +} + +variable "network" { + description = "The VPC network to host the cluster in" +} + +variable "subnetwork" { + description = "The subnetwork to host the cluster in" +} + +variable "ip_range_pods" { + description = "The secondary ip range to use for pods" +} + +variable "ip_range_services" { + description = "The secondary ip range to use for pods" +} + +variable "compute_engine_service_account" { + description = "Service account to associate to the nodes in the cluster" +} + diff --git a/examples/node_pool_update_variant_beta/README.md b/examples/node_pool_update_variant_beta/README.md new file mode 100644 index 0000000000..e95af795e9 --- /dev/null +++ b/examples/node_pool_update_variant_beta/README.md @@ -0,0 +1,46 @@ +# Node Pool Cluster + +This example illustrates how to create a cluster with multiple custom node-pool configurations with node labels, taints, and network tags. + + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| cluster\_name\_suffix | A suffix to append to the default cluster name | string | `""` | no | +| compute\_engine\_service\_account | Service account to associate to the nodes in the cluster | string | n/a | yes | +| credentials\_path | The path to the GCP credentials JSON file | string | n/a | yes | +| ip\_range\_pods | The secondary ip range to use for pods | string | n/a | yes | +| ip\_range\_services | The secondary ip range to use for pods | string | n/a | yes | +| network | The VPC network to host the cluster in | string | n/a | yes | +| project\_id | The project ID to host the cluster in | string | n/a | yes | +| region | The region to host the cluster in | string | n/a | yes | +| subnetwork | The subnetwork to host the cluster in | string | n/a | yes | +| zones | The zone to host the cluster in (required if is a zonal cluster) | list(string) | n/a | yes | + +## Outputs + +| Name | Description | +|------|-------------| +| ca\_certificate | | +| client\_token | | +| cluster\_name | Cluster name | +| ip\_range\_pods | The secondary IP range used for pods | +| ip\_range\_services | The secondary IP range used for services | +| kubernetes\_endpoint | | +| location | | +| master\_kubernetes\_version | The master Kubernetes version | +| network | | +| project\_id | | +| region | | +| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | +| subnetwork | | +| zones | List of zones in which the cluster resides | + + + +To provision this example, run the following from within this directory: +- `terraform init` to get the plugins +- `terraform plan` to see the infrastructure plan +- `terraform apply` to apply the infrastructure build +- `terraform destroy` to destroy the built infrastructure diff --git a/examples/node_pool_update_variant_beta/data/shutdown-script.sh b/examples/node_pool_update_variant_beta/data/shutdown-script.sh new file mode 100644 index 0000000000..f1ff19c353 --- /dev/null +++ b/examples/node_pool_update_variant_beta/data/shutdown-script.sh @@ -0,0 +1,17 @@ +#!/bin/bash -e + +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +kubectl --kubeconfig=/var/lib/kubelet/kubeconfig drain --force=true --ignore-daemonsets=true --delete-local-data "$HOSTNAME" diff --git a/examples/node_pool_update_variant_beta/main.tf b/examples/node_pool_update_variant_beta/main.tf new file mode 100644 index 0000000000..373fd59f30 --- /dev/null +++ b/examples/node_pool_update_variant_beta/main.tf @@ -0,0 +1,138 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +locals { + cluster_type = "node-pool-update-variant-beta" +} + +provider "google-beta" { + version = "~> 2.12.0" + credentials = file(var.credentials_path) + region = var.region +} + +data "google_compute_subnetwork" "subnetwork" { + name = var.subnetwork + project = var.project_id + region = var.region +} + +module "gke" { + source = "../../modules/beta-private-cluster-update-variant" + project_id = var.project_id + name = "${local.cluster_type}-cluster${var.cluster_name_suffix}" + regional = false + region = var.region + zones = var.zones + network = var.network + subnetwork = var.subnetwork + ip_range_pods = var.ip_range_pods + ip_range_services = var.ip_range_services + create_service_account = false + service_account = var.compute_engine_service_account + enable_private_endpoint = true + enable_private_nodes = true + master_ipv4_cidr_block = "172.16.0.0/28" + + master_authorized_networks_config = [ + { + cidr_blocks = [ + { + cidr_block = data.google_compute_subnetwork.subnetwork.ip_cidr_range + display_name = "VPC" + }, + ] + }, + ] + + node_pools = [ + { + name = "pool-01" + min_count = 1 + max_count = 2 + service_account = var.compute_engine_service_account + auto_upgrade = true + }, + { + name = "pool-02" + machine_type = "n1-standard-2" + min_count = 1 + max_count = 2 + disk_size_gb = 30 + disk_type = "pd-standard" + accelerator_count = 1 + accelerator_type = "nvidia-tesla-p4" + image_type = "COS" + auto_repair = false + service_account = var.compute_engine_service_account + }, + ] + + node_pools_oauth_scopes = { + all = [] + pool-01 = [] + pool-02 = [] + } + + node_pools_metadata = { + all = {} + pool-01 = { + shutdown-script = file("${path.module}/data/shutdown-script.sh") + } + pool-02 = {} + } + + node_pools_labels = { + all = { + all-pools-example = true + } + pool-01 = { + pool-01-example = true + } + pool-02 = {} + } + + node_pools_taints = { + all = [ + { + key = "all-pools-example" + value = true + effect = "PREFER_NO_SCHEDULE" + }, + ] + pool-01 = [ + { + key = "pool-01-example" + value = true + effect = "PREFER_NO_SCHEDULE" + }, + ] + pool-02 = [] + } + + node_pools_tags = { + all = [ + "all-node-example", + ] + pool-01 = [ + "pool-01-example", + ] + pool-02 = [] + } +} + +data "google_client_config" "default" { +} diff --git a/examples/node_pool_update_variant_beta/outputs.tf b/examples/node_pool_update_variant_beta/outputs.tf new file mode 100644 index 0000000000..0d972dcd88 --- /dev/null +++ b/examples/node_pool_update_variant_beta/outputs.tf @@ -0,0 +1,35 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +output "kubernetes_endpoint" { + sensitive = true + value = module.gke.endpoint +} + +output "client_token" { + sensitive = true + value = base64encode(data.google_client_config.default.access_token) +} + +output "ca_certificate" { + value = module.gke.ca_certificate +} + +output "service_account" { + description = "The service account to default running nodes as if not overridden in `node_pools`." + value = module.gke.service_account +} + diff --git a/examples/node_pool_update_variant_beta/test_outputs.tf b/examples/node_pool_update_variant_beta/test_outputs.tf new file mode 100644 index 0000000000..e64c40e477 --- /dev/null +++ b/examples/node_pool_update_variant_beta/test_outputs.tf @@ -0,0 +1,63 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// These outputs are used to test the module with kitchen-terraform +// They do not need to be included in real-world uses of this module + +output "project_id" { + value = var.project_id +} + +output "region" { + value = module.gke.region +} + +output "cluster_name" { + description = "Cluster name" + value = module.gke.name +} + +output "network" { + value = var.network +} + +output "subnetwork" { + value = var.subnetwork +} + +output "location" { + value = module.gke.location +} + +output "ip_range_pods" { + description = "The secondary IP range used for pods" + value = var.ip_range_pods +} + +output "ip_range_services" { + description = "The secondary IP range used for services" + value = var.ip_range_services +} + +output "zones" { + description = "List of zones in which the cluster resides" + value = module.gke.zones +} + +output "master_kubernetes_version" { + description = "The master Kubernetes version" + value = module.gke.master_version +} diff --git a/examples/node_pool_update_variant_beta/variables.tf b/examples/node_pool_update_variant_beta/variables.tf new file mode 100644 index 0000000000..9dc3873177 --- /dev/null +++ b/examples/node_pool_update_variant_beta/variables.tf @@ -0,0 +1,57 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +variable "project_id" { + description = "The project ID to host the cluster in" +} + +variable "credentials_path" { + description = "The path to the GCP credentials JSON file" +} + +variable "cluster_name_suffix" { + description = "A suffix to append to the default cluster name" + default = "" +} + +variable "region" { + description = "The region to host the cluster in" +} + +variable "zones" { + type = list(string) + description = "The zone to host the cluster in (required if is a zonal cluster)" +} + +variable "network" { + description = "The VPC network to host the cluster in" +} + +variable "subnetwork" { + description = "The subnetwork to host the cluster in" +} + +variable "ip_range_pods" { + description = "The secondary ip range to use for pods" +} + +variable "ip_range_services" { + description = "The secondary ip range to use for pods" +} + +variable "compute_engine_service_account" { + description = "Service account to associate to the nodes in the cluster" +} diff --git a/test/fixtures/node_pool_update_variant/example.tf b/test/fixtures/node_pool_update_variant/example.tf new file mode 100644 index 0000000000..c3a21df3d5 --- /dev/null +++ b/test/fixtures/node_pool_update_variant/example.tf @@ -0,0 +1,29 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +module "example" { + source = "../../../examples/node_pool_update_variant" + + project_id = var.project_id + cluster_name_suffix = "-${random_string.suffix.result}" + region = var.region + zones = slice(var.zones, 0, 1) + network = google_compute_network.main.name + subnetwork = google_compute_subnetwork.main.name + ip_range_pods = google_compute_subnetwork.main.secondary_ip_range[0].range_name + ip_range_services = google_compute_subnetwork.main.secondary_ip_range[1].range_name + compute_engine_service_account = var.compute_engine_service_account +} diff --git a/test/fixtures/node_pool_update_variant/network.tf b/test/fixtures/node_pool_update_variant/network.tf new file mode 100644 index 0000000000..e1292eae3b --- /dev/null +++ b/test/fixtures/node_pool_update_variant/network.tf @@ -0,0 +1,48 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +resource "random_string" "suffix" { + length = 4 + special = false + upper = false +} + +provider "google" { + project = var.project_id +} + +resource "google_compute_network" "main" { + name = "cft-gke-test-${random_string.suffix.result}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "main" { + name = "cft-gke-test-${random_string.suffix.result}" + ip_cidr_range = "10.0.0.0/17" + region = var.region + network = google_compute_network.main.self_link + + secondary_ip_range { + range_name = "cft-gke-test-pods-${random_string.suffix.result}" + ip_cidr_range = "192.168.0.0/18" + } + + secondary_ip_range { + range_name = "cft-gke-test-services-${random_string.suffix.result}" + ip_cidr_range = "192.168.64.0/18" + } +} + diff --git a/test/fixtures/node_pool_update_variant/outputs.tf b/test/fixtures/node_pool_update_variant/outputs.tf new file mode 120000 index 0000000000..726bdc722f --- /dev/null +++ b/test/fixtures/node_pool_update_variant/outputs.tf @@ -0,0 +1 @@ +../shared/outputs.tf \ No newline at end of file diff --git a/test/fixtures/node_pool_update_variant/terraform.tfvars b/test/fixtures/node_pool_update_variant/terraform.tfvars new file mode 120000 index 0000000000..08ac6f4724 --- /dev/null +++ b/test/fixtures/node_pool_update_variant/terraform.tfvars @@ -0,0 +1 @@ +../shared/terraform.tfvars \ No newline at end of file diff --git a/test/fixtures/node_pool_update_variant/variables.tf b/test/fixtures/node_pool_update_variant/variables.tf new file mode 120000 index 0000000000..c113c00a3d --- /dev/null +++ b/test/fixtures/node_pool_update_variant/variables.tf @@ -0,0 +1 @@ +../shared/variables.tf \ No newline at end of file From 86ad23ca7b97da1a59612a7153765ffc5c80fe01 Mon Sep 17 00:00:00 2001 From: Aaron Sproul Date: Mon, 16 Sep 2019 10:17:47 -0700 Subject: [PATCH 21/82] generate docs after a master merge --- modules/beta-private-cluster-update-variant/README.md | 2 +- modules/beta-private-cluster-update-variant/variables.tf | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/modules/beta-private-cluster-update-variant/README.md b/modules/beta-private-cluster-update-variant/README.md index 96d645c115..020ab728c0 100644 --- a/modules/beta-private-cluster-update-variant/README.md +++ b/modules/beta-private-cluster-update-variant/README.md @@ -177,7 +177,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | network\_policy | Enable network policy addon | bool | `"false"` | no | | network\_policy\_provider | The network policy provider. | string | `"CALICO"` | no | | network\_project\_id | The project ID of the shared VPC's host (for shared vpc support) | string | `""` | no | -| node\_metadata | Specifies how node metadata is exposed to the workload running on the node | string | `"UNSPECIFIED"` | no | +| node\_metadata | Specifies how node metadata is exposed to the workload running on the node | string | `"SECURE"` | no | | node\_pools | List of maps containing node pools | list(map(string)) | `` | no | | node\_pools\_labels | Map of maps containing node labels by node-pool name | map(map(string)) | `` | no | | node\_pools\_metadata | Map of maps containing node metadata by node-pool name | map(map(string)) | `` | no | diff --git a/modules/beta-private-cluster-update-variant/variables.tf b/modules/beta-private-cluster-update-variant/variables.tf index 975fe7a173..9a869a830f 100644 --- a/modules/beta-private-cluster-update-variant/variables.tf +++ b/modules/beta-private-cluster-update-variant/variables.tf @@ -371,7 +371,8 @@ variable "resource_usage_export_dataset_id" { variable "node_metadata" { description = "Specifies how node metadata is exposed to the workload running on the node" - default = "UNSPECIFIED" + default = "SECURE" + type = string } variable "sandbox_enabled" { From 3b98ff4d12df79e50c1042abb35f56dc32a38c1d Mon Sep 17 00:00:00 2001 From: Morgante Pell Date: Wed, 28 Aug 2019 18:18:19 -0400 Subject: [PATCH 22/82] Updated docs and migration script for v5.0. --- CHANGELOG.md | 15 +- docs/upgrading_to_v5.0.md | 71 +++++++++ helpers/migrate.py | 304 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 388 insertions(+), 2 deletions(-) create mode 100644 docs/upgrading_to_v5.0.md create mode 100755 helpers/migrate.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 017b69bba6..fb8cf39f47 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,10 +8,15 @@ Extending the adopted spec, each change should have a link to its corresponding ## [Unreleased] +## [v5.0.0] - 2019-XX-XX +v5.0.0 is a backwards-incompatible release. Please see the [upgrading guide](./docs/upgrading_to_v5.0.md). + +The v5.0.0 module requires using the [2.12 version](https://github.com/terraform-providers/terraform-provider-google/blob/master/CHANGELOG.md#2120-august-01-2019) of the Google provider. + ### Changed -* All Beta functionality removed from non-beta clusters, some properties like node_pool taints available only in beta cluster now [#228] * **Breaking**: Enabled metadata-concealment by default [#248] +* All beta functionality removed from non-beta clusters, moved `node_pool_taints` to beta modules [#228] ### Added * Added support for resource usage export config [#238] @@ -22,6 +27,10 @@ Extending the adopted spec, each change should have a link to its corresponding * Support for Google Groups based RBAC beta feature [#217] * Support for disabling node pool autoscaling by setting `autoscaling` to `false` within the node pool variable. [#250] +### Fixed + +* Fixed issue with passing a dynamically created Service Account to the module. [#27] + ## [v4.1.0] 2019-07-24 ### Added @@ -164,7 +173,8 @@ Extending the adopted spec, each change should have a link to its corresponding * Initial release of module. -[Unreleased]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v4.1.0...HEAD +[Unreleased]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v5.0.0...HEAD +[v5.0.0]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v4.1.0...v5.0.0 [v4.1.0]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v4.0.0...v4.1.0 [v4.0.0]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v3.0.0...v4.0.0 [v3.0.0]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v2.1.0...v3.0.0 @@ -186,6 +196,7 @@ Extending the adopted spec, each change should have a link to its corresponding [#236]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/236 [#217]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/217 [#234]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/234 +[#27]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/issues/27 [#216]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/216 [#214]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/214 [#210]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/210 diff --git a/docs/upgrading_to_v5.0.md b/docs/upgrading_to_v5.0.md new file mode 100644 index 0000000000..c3ad44ccdf --- /dev/null +++ b/docs/upgrading_to_v5.0.md @@ -0,0 +1,71 @@ +# Upgrading to v5.0 + +The v5.0 release of *kubernetes-engine* is a backwards incompatible +release. + +## Migration Instructions + +### Node pool taints +Previously, node pool taints could be set on all module versions. + +Now, to set taints you must use the beta version of the module. + +```diff + module "kubernetes_engine_private_cluster" { +- source = "terraform-google-modules/kubernetes-engine/google" ++ source = "terraform-google-modules/kubernetes-engine/google//modules/beta-public-cluster" +- version = "~> 4.0" ++ version = "~> 5.0" + } +``` + +### Service Account creation + +Previously, if you explicitly specified a Service Account using the `service_account` variable on the module this was sufficient to force that Service Account to be used. + +Now, an additional `create_service_account` has been added with a default value of `true`. If you would like to use an explicitly created Service Account from outside the module, you will need to set `create_service_account` to `false` (in addition to passing in the Service Account email). + +No action is needed if you use the module's default service account. + +```diff + module "kubernetes_engine_private_cluster" { + source = "terraform-google-modules/kubernetes-engine/google" +- version = "~> 4.0" ++ version = "~> 5.0" + + service_account = "project-service-account@my-project.iam.gserviceaccount.com" ++ create_service_account = false + # ... + } +``` + +### Resource simplification +The `google_container_cluster` and `google_container_node_pool` resources previously were different between regional and zonal clusters. They have now been collapsed into a single resource using the `location` variable. + +If you are using regional clusters, no migration is needed. If you are using zonal clusters, a state migration is needed. You can use a [script](../helpers/migrate.py) we provided to determine the required state changes: + +1. Download the script + + ``` + curl -O https://raw.githubusercontent.com/terraform-google-modules/terraform-google-kubernetes-engine/v5.0.0/helpers/migrate.py + chmod +x migrate.py + ``` + +2. Execute the migration script + + ``` + ./migrate.py + ``` + + Output will be similar to the following: + ``` + ---- Migrating the following modules: + -- module.gke-cluster-dev.module.gke + ---- Commands to run: + terraform state mv -state terraform.tfstate "module.gke-cluster-dev.module.gke.google_container_cluster.zonal_primary[0]" "module.gke-cluster-dev.module.gke.google_container_cluster.primary[0]" + terraform state mv "module.gke-cluster-dev.module.gke.google_container_node_pool.zonal_pools[0]" "module.gke-cluster-dev.module.gke.google_container_node_pool.pools[0]" + ``` + +3. Execute the provided state migration commands (backups are automatically created). + +4. Run `terraform plan` to confirm no changes are expected. diff --git a/helpers/migrate.py b/helpers/migrate.py new file mode 100755 index 0000000000..0973b61d01 --- /dev/null +++ b/helpers/migrate.py @@ -0,0 +1,304 @@ +#!/usr/bin/env python3 + +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import copy +import subprocess +import sys +import shutil +import re + +MIGRATIONS = [ + { + "resource_type": "google_container_cluster", + "name": "zonal_primary", + "rename": "primary", + "module": "" + }, + { + "resource_type": "google_container_node_pool", + "name": "zonal_pools", + "rename": "pools", + "module": "" + }, +] + +class ModuleMigration: + """ + Migrate the resources from a flat project factory to match the new + module structure created by the G Suite refactor. + """ + + def __init__(self, source_module): + self.source_module = source_module + + def moves(self): + """ + Generate the set of old/new resource pairs that will be migrated + to the `destination` module. + """ + resources = self.targets() + moves = [] + for (old, migration) in resources: + new = copy.deepcopy(old) + new.module += migration["module"] + + # Update the copied resource with the "rename" value if it is set + if "rename" in migration: + new.name = migration["rename"] + + pair = (old.path(), new.path()) + moves.append(pair) + return moves + + def targets(self): + """ + A list of resources that will be moved to the new module """ + to_move = [] + + for migration in MIGRATIONS: + resource_type = migration["resource_type"] + resource_name = migration["name"] + matching_resources = self.source_module.get_resources( + resource_type, + resource_name) + to_move += [(r, migration) for r in matching_resources] + + return to_move + +class TerraformModule: + """ + A Terraform module with associated resources. + """ + + def __init__(self, name, resources): + """ + Create a new module and associate it with a list of resources. + """ + self.name = name + self.resources = resources + + def get_resources(self, resource_type=None, resource_name=None): + """ + Return a list of resources matching the given resource type and name. + """ + + ret = [] + for resource in self.resources: + matches_type = (resource_type is None or + resource_type == resource.resource_type) + + name_pattern = re.compile(r'%s(\[\d+\])?' % resource_name) + matches_name = (resource_name is None or + name_pattern.match(resource.name)) + + if matches_type and matches_name: + ret.append(resource) + + return ret + + def has_resource(self, resource_type=None, resource_name=None): + """ + Does this module contain a resource with the matching type and name? + """ + for resource in self.resources: + matches_type = (resource_type is None or + resource_type == resource.resource_type) + + matches_name = (resource_name is None or + resource_name in resource.name) + + if matches_type and matches_name: + return True + + return False + + def __repr__(self): + return "{}({!r}, {!r})".format( + self.__class__.__name__, + self.name, + [repr(resource) for resource in self.resources]) + + +class TerraformResource: + """ + A Terraform resource, defined by the the identifier of that resource. + """ + + @classmethod + def from_path(cls, path): + """ + Generate a new Terraform resource, based on the fully qualified + Terraform resource path. + """ + if re.match(r'\A[\w.\[\]-]+\Z', path) is None: + raise ValueError( + "Invalid Terraform resource path {!r}".format(path)) + + parts = path.split(".") + name = parts.pop() + resource_type = parts.pop() + module = ".".join(parts) + return cls(module, resource_type, name) + + def __init__(self, module, resource_type, name): + """ + Create a new TerraformResource from a pre-parsed path. + """ + self.module = module + self.resource_type = resource_type + + find_suffix = re.match('(^.+)\[(\d+)\]', name) + if find_suffix: + self.name = find_suffix.group(1) + self.index = find_suffix.group(2) + else: + self.name = name + self.index = -1 + + def path(self): + """ + Return the fully qualified resource path. + """ + parts = [self.module, self.resource_type, self.name] + if parts[0] == '': + del parts[0] + path = ".".join(parts) + if self.index is not -1: + path = "{0}[{1}]".format(path, self.index) + return path + + def __repr__(self): + return "{}({!r}, {!r}, {!r})".format( + self.__class__.__name__, + self.module, + self.resource_type, + self.name) + +def group_by_module(resources): + """ + Group a set of resources according to their containing module. + """ + + groups = {} + for resource in resources: + if resource.module in groups: + groups[resource.module].append(resource) + else: + groups[resource.module] = [resource] + + return [ + TerraformModule(name, contained) + for name, contained in groups.items() + ] + + +def read_state(statefile=None): + """ + Read the terraform state at the given path. + """ + argv = ["terraform", "state", "list"] + result = subprocess.run(argv, + capture_output=True, + check=True, + encoding='utf-8') + elements = result.stdout.split("\n") + elements.pop() + return elements + + +def state_changes_for_module(module, statefile=None): + """ + Compute the Terraform state changes (deletions and moves) for a single + module. + """ + commands = [] + + migration = ModuleMigration(module) + + for (old, new) in migration.moves(): + wrapper = '"{0}"' + argv = ["terraform", "state", "mv", wrapper.format(old), wrapper.format(new)] + commands.append(argv) + + return commands + + +def migrate(statefile=None, dryrun=False): + """ + Migrate the terraform state in `statefile` to match the post-refactor + resource structure. + """ + + # Generate a list of Terraform resource states from the output of + # `terraform state list` + resources = [ + TerraformResource.from_path(path) + for path in read_state(statefile) + ] + + # Group resources based on the module where they're defined. + modules = group_by_module(resources) + + # Filter our list of Terraform modules down to anything that looks like a + # zonal GKE module. We key this off the presence off of + # `google_container_cluster.zonal_primary` since that should almost always be + # unique to a GKE module. + modules_to_migrate = [ + module for module in modules + if module.has_resource("google_container_cluster", "zonal_primary") + ] + + print("---- Migrating the following modules:") + for module in modules_to_migrate: + print("-- " + module.name) + + # Collect a list of resources for each module + commands = [] + for module in modules_to_migrate: + commands += state_changes_for_module(module, statefile) + + print("---- Commands to run:") + for argv in commands: + if dryrun: + print(" ".join(argv)) + else: + subprocess.run(argv, check=True, encoding='utf-8') + +def main(argv): + parser = argparser() + args = parser.parse_args(argv[1:]) + + # print("cp {} {}".format(args.oldstate, args.newstate)) + # shutil.copy(args.oldstate, args.newstate) + + migrate(dryrun=True) + +def argparser(): + parser = argparse.ArgumentParser(description='Migrate Terraform state') + # parser.add_argument('oldstate', metavar='oldstate.json', + # help='The current Terraform state (will not be ' + # 'modified)') + # parser.add_argument('newstate', metavar='newstate.json', + # help='The path to the new state file') + # parser.add_argument('--dryrun', action='store_true', + # help='Print the `terraform state mv` commands instead ' + # 'of running the commands.') + return parser + + +if __name__ == "__main__": + main(sys.argv) \ No newline at end of file From 4461904dc723731f4b9c41842f67f4eefbb3a74f Mon Sep 17 00:00:00 2001 From: Morgante Pell Date: Wed, 25 Sep 2019 18:00:48 -0400 Subject: [PATCH 23/82] Fix migration script --- helpers/migrate.py | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/helpers/migrate.py b/helpers/migrate.py index 0973b61d01..91161665c7 100755 --- a/helpers/migrate.py +++ b/helpers/migrate.py @@ -34,6 +34,13 @@ "rename": "pools", "module": "" }, + { + "resource_type": "null_resource", + "name": "wait_for_zonal_cluster", + "rename": "wait_for_cluster", + "module": "", + "plural": False + }, ] class ModuleMigration: @@ -60,6 +67,9 @@ def moves(self): if "rename" in migration: new.name = migration["rename"] + old.plural = migration.get("plural", True) + new.plural = migration.get("plural", True) + pair = (old.path(), new.path()) moves.append(pair) return moves @@ -177,7 +187,7 @@ def path(self): if parts[0] == '': del parts[0] path = ".".join(parts) - if self.index is not -1: + if self.index is not -1 and self.plural: path = "{0}[{1}]".format(path, self.index) return path @@ -276,6 +286,7 @@ def migrate(statefile=None, dryrun=False): if dryrun: print(" ".join(argv)) else: + argv = [arg.strip('"') for arg in argv] subprocess.run(argv, check=True, encoding='utf-8') def main(argv): @@ -285,7 +296,7 @@ def main(argv): # print("cp {} {}".format(args.oldstate, args.newstate)) # shutil.copy(args.oldstate, args.newstate) - migrate(dryrun=True) + migrate(dryrun=args.dryrun) def argparser(): parser = argparse.ArgumentParser(description='Migrate Terraform state') @@ -294,9 +305,9 @@ def argparser(): # 'modified)') # parser.add_argument('newstate', metavar='newstate.json', # help='The path to the new state file') - # parser.add_argument('--dryrun', action='store_true', - # help='Print the `terraform state mv` commands instead ' - # 'of running the commands.') + parser.add_argument('--dryrun', action='store_true', + help='Print the `terraform state mv` commands instead ' + 'of running the commands.') return parser From 5486fe97c6dc69e395e1ab3cf30655b4051ce28d Mon Sep 17 00:00:00 2001 From: Morgante Pell Date: Wed, 25 Sep 2019 18:05:40 -0400 Subject: [PATCH 24/82] Final v5.0.0 tweaks --- docs/upgrading_to_v5.0.md | 27 +++++++++++++++++++-------- helpers/migrate.py | 5 ----- 2 files changed, 19 insertions(+), 13 deletions(-) diff --git a/docs/upgrading_to_v5.0.md b/docs/upgrading_to_v5.0.md index c3ad44ccdf..39abfbe8a5 100644 --- a/docs/upgrading_to_v5.0.md +++ b/docs/upgrading_to_v5.0.md @@ -46,19 +46,16 @@ If you are using regional clusters, no migration is needed. If you are using zon 1. Download the script - ``` + ```sh curl -O https://raw.githubusercontent.com/terraform-google-modules/terraform-google-kubernetes-engine/v5.0.0/helpers/migrate.py chmod +x migrate.py ``` -2. Execute the migration script +2. Run the script in dryrun mode to confirm the expected changes: - ``` - ./migrate.py - ``` + ```sh + $ ./migrate.py --dryrun - Output will be similar to the following: - ``` ---- Migrating the following modules: -- module.gke-cluster-dev.module.gke ---- Commands to run: @@ -66,6 +63,20 @@ If you are using regional clusters, no migration is needed. If you are using zon terraform state mv "module.gke-cluster-dev.module.gke.google_container_node_pool.zonal_pools[0]" "module.gke-cluster-dev.module.gke.google_container_node_pool.pools[0]" ``` -3. Execute the provided state migration commands (backups are automatically created). +3. Execute the migration script + + ```sh + $ ./migrate.py + + ---- Migrating the following modules: + -- module.gke-cluster-dev.module.gke + ---- Commands to run: + Move "module.gke-cluster-dev.module.gke.google_container_cluster.zonal_primary[0]" to "module.gke-cluster-dev.module.gke.google_container_cluster.primary[0]" + Successfully moved 1 object(s). + Move "module.gke-cluster-dev.module.gke.google_container_node_pool.zonal_pools[0]" to "module.gke-cluster-dev.module.gke.google_container_node_pool.pools[0]" + Successfully moved 1 object(s). + Move "module.gke-cluster-dev.module.gke.null_resource.wait_for_zonal_cluster" to "module.gke-cluster-dev.module.gke.null_resource.wait_for_cluster" + Successfully moved 1 object(s). + ``` 4. Run `terraform plan` to confirm no changes are expected. diff --git a/helpers/migrate.py b/helpers/migrate.py index 91161665c7..a22a7ce751 100755 --- a/helpers/migrate.py +++ b/helpers/migrate.py @@ -300,11 +300,6 @@ def main(argv): def argparser(): parser = argparse.ArgumentParser(description='Migrate Terraform state') - # parser.add_argument('oldstate', metavar='oldstate.json', - # help='The current Terraform state (will not be ' - # 'modified)') - # parser.add_argument('newstate', metavar='newstate.json', - # help='The path to the new state file') parser.add_argument('--dryrun', action='store_true', help='Print the `terraform state mv` commands instead ' 'of running the commands.') From fc3fc6aeb97d3a076ed7699c45c05bb692ca8830 Mon Sep 17 00:00:00 2001 From: Morgante Pell Date: Wed, 25 Sep 2019 18:09:50 -0400 Subject: [PATCH 25/82] Move old upgrading guide references to CHANGELOG --- CHANGELOG.md | 14 ++++++++++++++ README.md | 16 ---------------- 2 files changed, 14 insertions(+), 16 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index fb8cf39f47..a6b5509726 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -48,6 +48,8 @@ The v5.0.0 module requires using the [2.12 version](https://github.com/terraform * Supported version of Terraform is 0.12. [#177] ## [v3.0.0] - 2019-07-08 +v3.0.0 is a breaking release. Refer to the +[Upgrading to v3.0 guide][upgrading-to-v3.0] for details. ### Added @@ -88,6 +90,8 @@ The v5.0.0 module requires using the [2.12 version](https://github.com/terraform 2.3. [#148] ## [v2.0.0] - 2019-04-12 +v2.0.0 is a breaking release. Refer to the +[Upgrading to v2.0 guide][upgrading-to-v2.0] for details. ### Added @@ -119,6 +123,10 @@ The v5.0.0 module requires using the [2.12 version](https://github.com/terraform * Fix empty zone list. [#132] ## [v1.0.0] - 2019-03-25 +Version 1.0.0 of this module introduces a breaking change: adding the `disable-legacy-endpoints` metadata field to all node pools. This metadata is required by GKE and [determines whether the `/0.1/` and `/v1beta1/` paths are available in the nodes' metadata server](https://cloud.google.com/kubernetes-engine/docs/how-to/protecting-cluster-metadata#disable-legacy-apis). If your applications do not require access to the node's metadata server, you can leave the default value of `true` provided by the module. If your applications require access to the metadata server, be sure to read the linked documentation to see if you need to set the value for this field to `false` to allow your applications access to the above metadata server paths. + +In either case, upgrading to module version `v1.0.0` will trigger a recreation of all node pools in the cluster. + ### Added * Allow creation of service accounts. [#80] * Add support for private clusters via submodule. [#69] @@ -251,3 +259,9 @@ The v5.0.0 module requires using the [2.12 version](https://github.com/terraform [#15]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/issues/15 [#10]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/10 [#9]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/9 + +[upgrading-to-v2.0]: docs/upgrading_to_v2.0.md +[upgrading-to-v3.0]: docs/upgrading_to_v3.0.md +[terraform-provider-google]: https://github.com/terraform-providers/terraform-provider-google +[3.0.0]: https://registry.terraform.io/modules/terraform-google-modules/kubernetes-engine/google/3.0.0 +[terraform-0.12-upgrade]: https://www.terraform.io/upgrade-guides/0-12.html diff --git a/README.md b/README.md index 923d3f7a09..afa0cac122 100644 --- a/README.md +++ b/README.md @@ -108,22 +108,6 @@ Then perform the following commands on the root folder: - `terraform apply` to apply the infrastructure build - `terraform destroy` to destroy the built infrastructure -## Upgrade to v3.0.0 - -v3.0.0 is a breaking release. Refer to the -[Upgrading to v3.0 guide][upgrading-to-v3.0] for details. - -## Upgrade to v2.0.0 - -v2.0.0 is a breaking release. Refer to the -[Upgrading to v2.0 guide][upgrading-to-v2.0] for details. - -## Upgrade to v1.0.0 - -Version 1.0.0 of this module introduces a breaking change: adding the `disable-legacy-endpoints` metadata field to all node pools. This metadata is required by GKE and [determines whether the `/0.1/` and `/v1beta1/` paths are available in the nodes' metadata server](https://cloud.google.com/kubernetes-engine/docs/how-to/protecting-cluster-metadata#disable-legacy-apis). If your applications do not require access to the node's metadata server, you can leave the default value of `true` provided by the module. If your applications require access to the metadata server, be sure to read the linked documentation to see if you need to set the value for this field to `false` to allow your applications access to the above metadata server paths. - -In either case, upgrading to module version `v1.0.0` will trigger a recreation of all node pools in the cluster. - ## Inputs From 86523c4148048c6c5d977280fd6332cdbeb16308 Mon Sep 17 00:00:00 2001 From: Morgante Pell Date: Wed, 25 Sep 2019 18:11:22 -0400 Subject: [PATCH 26/82] Tag v5.0.0 --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a6b5509726..9200790618 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,7 +8,7 @@ Extending the adopted spec, each change should have a link to its corresponding ## [Unreleased] -## [v5.0.0] - 2019-XX-XX +## [v5.0.0] - 2019-09-25 v5.0.0 is a backwards-incompatible release. Please see the [upgrading guide](./docs/upgrading_to_v5.0.md). The v5.0.0 module requires using the [2.12 version](https://github.com/terraform-providers/terraform-provider-google/blob/master/CHANGELOG.md#2120-august-01-2019) of the Google provider. From f7d2a6cd34fd3eef0dac8b82b92fe4cc9b74be2a Mon Sep 17 00:00:00 2001 From: pp Date: Wed, 18 Sep 2019 15:22:39 +0300 Subject: [PATCH 27/82] Update integration tests to use new approach * Fix #245 * Fixed lint-tests * Backwards compatibility of the `generate` function * Disabled kitchen test `stub-domains-private-local` (issue #264) --- .dockerignore | 3 +- .kitchen.yml | 46 +- .ruby-version | 1 - CONTRIBUTING.md | 122 +++ Makefile | 215 ++---- README.md | 135 ---- autogen/README.md | 231 +++--- build/int.cloudbuild.yaml | 41 ++ build/lint.cloudbuild.yaml | 27 + helpers/terraform_docs | 694 ------------------ helpers/terraform_validate | 23 - modules/beta-private-cluster/README.md | 135 ---- modules/beta-public-cluster/README.md | 135 ---- modules/private-cluster/README.md | 135 ---- test/.gitignore | 1 + test/boilerplate/boilerplate.Dockerfile.txt | 13 - test/boilerplate/boilerplate.Makefile.txt | 13 - test/boilerplate/boilerplate.go.txt | 15 - test/boilerplate/boilerplate.py.txt | 13 - test/boilerplate/boilerplate.sh.txt | 13 - test/boilerplate/boilerplate.xml.txt | 15 - test/boilerplate/boilerplate.yaml.txt | 13 - test/ci/workload-metadata-config.yml | 3 +- test/ci_integration.sh | 70 -- test/fixtures/deploy_service/terraform.tfvars | 1 - .../disable_client_cert/terraform.tfvars | 1 - test/fixtures/node_pool/terraform.tfvars | 1 - test/fixtures/shared/terraform.tfvars | 0 test/fixtures/shared/terraform.tfvars.sample | 4 - test/fixtures/shared/variables.tf | 3 +- test/fixtures/shared_vpc/terraform.tfvars | 1 - .../fixtures/simple_regional/terraform.tfvars | 1 - .../simple_regional_private/terraform.tfvars | 1 - test/fixtures/simple_zonal/terraform.tfvars | 1 - .../simple_zonal_private/terraform.tfvars | 1 - test/fixtures/stub_domains/terraform.tfvars | 1 - .../stub_domains_private/terraform.tfvars | 1 - .../terraform.tfvars | 1 - .../upstream_nameservers/terraform.tfvars | 1 - .../workload_metadata_config/terraform.tfvars | 1 - .../deploy_service/controls/gcloud.rb | 4 +- .../deploy_service/controls/kubectl.rb | 4 +- .../disable_client_cert/controls/gcloud.rb | 4 +- test/integration/node_pool/controls/gcloud.rb | 4 +- .../integration/node_pool/controls/kubectl.rb | 4 +- .../integration/shared_vpc/controls/gcloud.rb | 4 +- .../simple_regional/controls/gcloud.rb | 4 +- .../controls/gcloud.rb | 4 +- .../simple_zonal/controls/gcloud.rb | 4 +- test/integration/simple_zonal/controls/gcp.rb | 4 +- test/integration/simple_zonal/inspec.yml | 3 +- .../simple_zonal_private/controls/gcloud.rb | 4 +- .../stub_domains/controls/gcloud.rb | 4 +- .../stub_domains/controls/kubectl.rb | 4 +- .../stub_domains_private/controls/gcloud.rb | 4 +- .../stub_domains_private/controls/kubectl.rb | 4 +- .../controls/gcloud.rb | 4 +- .../controls/kubectl.rb | 4 +- .../upstream_nameservers/controls/gcloud.rb | 4 +- .../upstream_nameservers/controls/kubectl.rb | 4 +- .../controls/gcloud.rb | 4 +- test/make.sh | 225 ------ test/setup/.gitignore | 2 + test/setup/iam.tf | 58 ++ test/setup/main.tf | 41 ++ Gemfile => test/setup/make_source.sh | 20 +- test/setup/outputs.tf | 28 + test/setup/variables.tf | 26 + .../boilerplate.tf.txt => setup/versions.tf} | 14 +- test/task_helper_functions.sh | 64 ++ test/test_verify_boilerplate.py | 136 ---- test/verify_boilerplate.py | 279 ------- 72 files changed, 670 insertions(+), 2438 deletions(-) delete mode 100644 .ruby-version create mode 100644 CONTRIBUTING.md create mode 100644 build/int.cloudbuild.yaml create mode 100644 build/lint.cloudbuild.yaml delete mode 100755 helpers/terraform_docs delete mode 100755 helpers/terraform_validate create mode 100644 test/.gitignore delete mode 100644 test/boilerplate/boilerplate.Dockerfile.txt delete mode 100644 test/boilerplate/boilerplate.Makefile.txt delete mode 100644 test/boilerplate/boilerplate.go.txt delete mode 100644 test/boilerplate/boilerplate.py.txt delete mode 100644 test/boilerplate/boilerplate.sh.txt delete mode 100644 test/boilerplate/boilerplate.xml.txt delete mode 100644 test/boilerplate/boilerplate.yaml.txt delete mode 100755 test/ci_integration.sh delete mode 120000 test/fixtures/deploy_service/terraform.tfvars delete mode 120000 test/fixtures/disable_client_cert/terraform.tfvars delete mode 120000 test/fixtures/node_pool/terraform.tfvars delete mode 100644 test/fixtures/shared/terraform.tfvars delete mode 100644 test/fixtures/shared/terraform.tfvars.sample delete mode 120000 test/fixtures/shared_vpc/terraform.tfvars delete mode 120000 test/fixtures/simple_regional/terraform.tfvars delete mode 120000 test/fixtures/simple_regional_private/terraform.tfvars delete mode 120000 test/fixtures/simple_zonal/terraform.tfvars delete mode 120000 test/fixtures/simple_zonal_private/terraform.tfvars delete mode 120000 test/fixtures/stub_domains/terraform.tfvars delete mode 120000 test/fixtures/stub_domains_private/terraform.tfvars delete mode 120000 test/fixtures/stub_domains_upstream_nameservers/terraform.tfvars delete mode 120000 test/fixtures/upstream_nameservers/terraform.tfvars delete mode 120000 test/fixtures/workload_metadata_config/terraform.tfvars delete mode 100755 test/make.sh create mode 100644 test/setup/.gitignore create mode 100644 test/setup/iam.tf create mode 100644 test/setup/main.tf rename Gemfile => test/setup/make_source.sh (50%) mode change 100644 => 100755 create mode 100644 test/setup/outputs.tf create mode 100644 test/setup/variables.tf rename test/{boilerplate/boilerplate.tf.txt => setup/versions.tf} (76%) create mode 100755 test/task_helper_functions.sh delete mode 100755 test/test_verify_boilerplate.py delete mode 100644 test/verify_boilerplate.py diff --git a/.dockerignore b/.dockerignore index 5e7ebff64e..ab75c635fe 100644 --- a/.dockerignore +++ b/.dockerignore @@ -7,4 +7,5 @@ test/fixtures/*/.terraform test/fixtures/*/terraform.tfstate.d examples/.kitchen examples/*/.terraform -examples/*/terraform.tfstate.d \ No newline at end of file +examples/*/terraform.tfstate.d + diff --git a/.kitchen.yml b/.kitchen.yml index 6bf414c21f..9f5df5a03e 100644 --- a/.kitchen.yml +++ b/.kitchen.yml @@ -29,13 +29,15 @@ platforms: - name: local suites: - - name: "deploy_service" - driver: - root_module_directory: test/fixtures/deploy_service - verifier: - systems: - - name: deploy_service - backend: local +# Disabled due to issue #274 +# (https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/issues/274) +# - name: "deploy_service" +# driver: +# root_module_directory: test/fixtures/deploy_service +# verifier: +# systems: +# - name: deploy_service +# backend: local - name: "disable_client_cert" driver: root_module_directory: test/fixtures/disable_client_cert @@ -43,13 +45,15 @@ suites: systems: - name: disable_client_cert backend: local - - name: "node_pool" - driver: - root_module_directory: test/fixtures/node_pool - verifier: - systems: - - name: node_pool - backend: local +# Disabled due to issue #274 +# (https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/issues/274) +# - name: "node_pool" +# driver: +# root_module_directory: test/fixtures/node_pool +# verifier: +# systems: +# - name: node_pool +# backend: local - name: "shared_vpc" driver: root_module_directory: test/fixtures/shared_vpc @@ -98,12 +102,14 @@ suites: systems: - name: stub_domains backend: local - - name: stub_domains_private - driver: - root_module_directory: test/fixtures/stub_domains_private - systems: - - name: stub_domains_private - backend: local +# Disabled due to issue #264 +# (https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/issues/264) +# - name: stub_domains_private +# driver: +# root_module_directory: test/fixtures/stub_domains_private +# systems: +# - name: stub_domains_private +# backend: local - name: "upstream_nameservers" driver: root_module_directory: test/fixtures/upstream_nameservers diff --git a/.ruby-version b/.ruby-version deleted file mode 100644 index aedc15bb0c..0000000000 --- a/.ruby-version +++ /dev/null @@ -1 +0,0 @@ -2.5.3 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000000..cd4943578a --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,122 @@ +# Contributing + +This document provides guidelines for contributing to the module. + +## Dependencies + +The following dependencies must be installed on the development system: + +- [Docker Engine][docker-engine] +- [Google Cloud SDK][google-cloud-sdk] +- [make] + +## Generating Documentation for Inputs and Outputs + +The Inputs and Outputs tables in the READMEs of the root module, +submodules, and example modules are automatically generated based on +the `variables` and `outputs` of the respective modules. These tables +must be refreshed if the module interfaces are changed. + +## Templating + +To more cleanly handle cases where desired functionality would require complex duplication of Terraform resources (i.e. [PR 51](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/51)), this repository is largely generated from the [`autogen`](/autogen) directory. + +The root module is generated by running `make generate`. Changes to this repository should be made in the [`autogen`](/autogen) directory where appropriate. + +Note: The correct sequence to update the repo using autogen functionality is to run +`make docker_generate && make docker_generate_docs`. This will create the various Terraform files, and then +generate the Terraform documentation using `terraform-docs`. + +### Autogeneration of documentation from .tf files +To generate new Inputs and Outputs tables run +``` +make docker_generate_docs +``` + +## Integration Testing + +Integration tests are used to verify the behaviour of the root module, +submodules, and example modules. Additions, changes, and fixes should +be accompanied with tests. + +The integration tests are run using [Kitchen][kitchen], +[Kitchen-Terraform][kitchen-terraform], and [InSpec][inspec]. These +tools are packaged within a Docker image for convenience. + +The general strategy for these tests is to verify the behaviour of the +[example modules](./examples/), thus ensuring that the root module, +submodules, and example modules are all functionally correct. + +Six test-kitchen instances are defined: + +- `deploy-service` +- `node-pool` +- `shared-vpc` +- `simple-regional` +- `simple-zonal` +- `stub-domains` + +The test-kitchen instances in `test/fixtures/` wrap identically-named examples in the `examples/` directory.` + +### Test Environment +The easiest way to test the module is in an isolated test project. The setup for such a project is defined in [test/setup](./test/setup/) directory. + +To use this setup, you need a service account with Project Creator access on a folder. Export the Service Account credentials to your environment like so: + +``` +export SERVICE_ACCOUNT_JSON=$(< credentials.json) +``` + +You will also need to set a few environment variables: +``` +export TF_VAR_org_id="your_org_id" +export TF_VAR_folder_id="your_folder_id" +export TF_VAR_billing_account="your_billing_account_id" +``` + +With these settings in place, you can prepare a test project using Docker: +``` +make docker_test_prepare +``` + +### Noninteractive Execution + +Run `make docker_test_integration` to test all of the example modules +noninteractively, using the prepared test project. + +### Interactive Execution + +1. Run `make docker_run` to start the testing Docker container in + interactive mode. + +1. Run `kitchen_do create ` to initialize the working + directory for an example module. + +1. Run `kitchen_do converge ` to apply the example module. + +1. Run `kitchen_do verify ` to test the example module. + +1. Run `kitchen_do destroy ` to destroy the example module + state. + +## Linting and Formatting + +Many of the files in the repository can be linted or formatted to +maintain a standard of quality. + +### Execution + +Run `make docker_test_lint`. + +[docker-engine]: https://www.docker.com/products/docker-engine +[flake8]: http://flake8.pycqa.org/en/latest/ +[gofmt]: https://golang.org/cmd/gofmt/ +[google-cloud-sdk]: https://cloud.google.com/sdk/install +[hadolint]: https://github.com/hadolint/hadolint +[inspec]: https://inspec.io/ +[kitchen-terraform]: https://github.com/newcontext-oss/kitchen-terraform +[kitchen]: https://kitchen.ci/ +[make]: https://en.wikipedia.org/wiki/Make_(software) +[shellcheck]: https://www.shellcheck.net/ +[terraform-docs]: https://github.com/segmentio/terraform-docs +[terraform]: https://terraform.io/ diff --git a/Makefile b/Makefile index 21d7a2764f..5039822a75 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,172 +12,85 @@ # See the License for the specific language governing permissions and # limitations under the License. +# Please note that this file was generated from [terraform-google-module-template](https://github.com/terraform-google-modules/terraform-google-module-template). +# Please make sure to contribute relevant changes upstream! + # Make will use bash instead of sh SHELL := /usr/bin/env bash -# Docker build config variables -CREDENTIALS_PATH ?= /cft/workdir/credentials.json -DOCKER_ORG := gcr.io/cloud-foundation-cicd -DOCKER_TAG_BASE_KITCHEN_TERRAFORM ?= 2.3.0 -DOCKER_REPO_BASE_KITCHEN_TERRAFORM := ${DOCKER_ORG}/cft/kitchen-terraform:${DOCKER_TAG_BASE_KITCHEN_TERRAFORM} - -# All is the first target in the file so it will get picked up when you just run 'make' on its own -.PHONY: all -all: check generate_docs - -.PHONY: check -check: check_shell check_python check_golang check_terraform check_base_files test_check_headers check_headers check_trailing_whitespace check_generate check_generate_docs - -# The .PHONY directive tells make that this isn't a real target and so -# the presence of a file named 'check_shell' won't cause this target to stop -# working -.PHONY: check_shell -check_shell: - @source test/make.sh && check_shell - -.PHONY: check_python -check_python: - @source test/make.sh && check_python - -.PHONY: check_golang -check_golang: - @source test/make.sh && golang - -.PHONY: check_terraform -check_terraform: - @source test/make.sh && check_terraform - -.PHONY: check_base_files -check_base_files: - @source test/make.sh && basefiles +DOCKER_TAG_VERSION_DEVELOPER_TOOLS := 0.1.0 +DOCKER_IMAGE_DEVELOPER_TOOLS := cft/developer-tools +REGISTRY_URL := gcr.io/cloud-foundation-cicd -.PHONY: check_shebangs -check_shebangs: - @source test/make.sh && check_bash - -.PHONY: check_trailing_whitespace -check_trailing_whitespace: - @source test/make.sh && check_trailing_whitespace - -.PHONY: test_check_headers -test_check_headers: - @echo "Testing the validity of the header check" - @python test/test_verify_boilerplate.py - -.PHONY: check_headers -check_headers: - @echo "Checking file headers" - @python test/verify_boilerplate.py - -.PHONY: check_generate -check_generate: ## Check that `make generate` does not generate a diff - @source test/make.sh && check_generate - -.PHONY: check_generate_docs -check_generate_docs: ## Check that `make generate_docs` does not generate a diff - @source test/make.sh && check_generate_docs - -# Integration tests -.PHONY: test_integration -test_integration: - test/ci_integration.sh - -.PHONY: generate_docs -generate_docs: - @source test/make.sh && generate_docs - -.PHONY: generate -generate: - @source test/make.sh && generate - -.PHONY: dev -dev: generate generate_docs - @echo "Updated files" - -# Versioning -.PHONY: version -version: - @source helpers/version-repo.sh - -# Run docker +# Enter docker container for local development .PHONY: docker_run docker_run: docker run --rm -it \ - -e COMPUTE_ENGINE_SERVICE_ACCOUNT \ - -e PROJECT_ID \ - -e REGION \ - -e ZONES \ -e SERVICE_ACCOUNT_JSON \ - -e CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE=${CREDENTIALS_PATH} \ - -e GOOGLE_APPLICATION_CREDENTIALS=${CREDENTIALS_PATH} \ - -v "$(CURDIR)":/cft/workdir \ - ${DOCKER_REPO_BASE_KITCHEN_TERRAFORM} \ - /bin/bash -c "source test/ci_integration.sh && setup_environment && exec /bin/bash" + -v $(CURDIR):/workspace \ + $(REGISTRY_URL)/${DOCKER_IMAGE_DEVELOPER_TOOLS}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ + /bin/bash -.PHONY: docker_create -docker_create: docker_build_kitchen_terraform +# Execute prepare tests within the docker container +.PHONY: docker_test_prepare +docker_test_prepare: docker run --rm -it \ - -e COMPUTE_ENGINE_SERVICE_ACCOUNT \ - -e PROJECT_ID \ - -e REGION \ - -e ZONES \ -e SERVICE_ACCOUNT_JSON \ - -e CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE=${CREDENTIALS_PATH} \ - -e GOOGLE_APPLICATION_CREDENTIALS=${CREDENTIALS_PATH} \ - -v "$(CURDIR)":/cft/workdir \ - ${DOCKER_REPO_BASE_KITCHEN_TERRAFORM} \ - /bin/bash -c "source test/ci_integration.sh && setup_environment && kitchen create" - -.PHONY: docker_converge -docker_converge: + -e TF_VAR_org_id \ + -e TF_VAR_folder_id \ + -e TF_VAR_billing_account \ + -v $(CURDIR):/workspace \ + $(REGISTRY_URL)/${DOCKER_IMAGE_DEVELOPER_TOOLS}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ + /usr/local/bin/execute_with_credentials.sh prepare_environment + +# Clean up test environment within the docker container +.PHONY: docker_test_cleanup +docker_test_cleanup: docker run --rm -it \ - -e COMPUTE_ENGINE_SERVICE_ACCOUNT \ - -e PROJECT_ID \ - -e REGION \ - -e ZONES \ -e SERVICE_ACCOUNT_JSON \ - -e CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE=${CREDENTIALS_PATH} \ - -e GOOGLE_APPLICATION_CREDENTIALS=${CREDENTIALS_PATH} \ - -v "$(CURDIR)":/cft/workdir \ - ${DOCKER_REPO_BASE_KITCHEN_TERRAFORM} \ - /bin/bash -c "source test/ci_integration.sh && setup_environment && kitchen converge && kitchen converge" - -.PHONY: docker_verify -docker_verify: + -e TF_VAR_org_id \ + -e TF_VAR_folder_id \ + -e TF_VAR_billing_account \ + -v $(CURDIR):/workspace \ + $(REGISTRY_URL)/${DOCKER_IMAGE_DEVELOPER_TOOLS}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ + /usr/local/bin/execute_with_credentials.sh cleanup_environment + +# Execute integration tests within the docker container +.PHONY: docker_test_integration +docker_test_integration: docker run --rm -it \ - -e COMPUTE_ENGINE_SERVICE_ACCOUNT \ - -e PROJECT_ID \ - -e REGION \ - -e ZONES \ -e SERVICE_ACCOUNT_JSON \ - -e CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE=${CREDENTIALS_PATH} \ - -e GOOGLE_APPLICATION_CREDENTIALS=${CREDENTIALS_PATH} \ - -v "$(CURDIR)":/cft/workdir \ - ${DOCKER_REPO_BASE_KITCHEN_TERRAFORM} \ - /bin/bash -c "source test/ci_integration.sh && setup_environment && kitchen verify" + -v $(CURDIR):/workspace \ + $(REGISTRY_URL)/${DOCKER_IMAGE_DEVELOPER_TOOLS}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ + /usr/local/bin/test_integration.sh -.PHONY: docker_destroy -docker_destroy: +# Execute lint tests within the docker container +.PHONY: docker_test_lint +docker_test_lint: docker run --rm -it \ - -e COMPUTE_ENGINE_SERVICE_ACCOUNT \ - -e PROJECT_ID \ - -e REGION \ - -e ZONES \ - -e SERVICE_ACCOUNT_JSON \ - -e CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE=${CREDENTIALS_PATH} \ - -e GOOGLE_APPLICATION_CREDENTIALS=${CREDENTIALS_PATH} \ - -v "$(CURDIR)":/cft/workdir \ - ${DOCKER_REPO_BASE_KITCHEN_TERRAFORM} \ - /bin/bash -c "source test/ci_integration.sh && setup_environment && kitchen destroy" + -v $(CURDIR):/workspace \ + $(REGISTRY_URL)/${DOCKER_IMAGE_DEVELOPER_TOOLS}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ + /usr/local/bin/test_lint.sh -.PHONY: test_integration_docker -test_integration_docker: +# Generate documentation +.PHONY: docker_generate_docs +docker_generate_docs: docker run --rm -it \ - -e COMPUTE_ENGINE_SERVICE_ACCOUNT \ - -e PROJECT_ID \ - -e REGION \ - -e ZONES \ - -e SERVICE_ACCOUNT_JSON \ - -v "$(CURDIR)":/cft/workdir \ - ${DOCKER_REPO_BASE_KITCHEN_TERRAFORM} \ - /bin/bash -c "test/ci_integration.sh" + -v $(CURDIR):/workspace \ + $(REGISTRY_URL)/${DOCKER_IMAGE_DEVELOPER_TOOLS}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ + /bin/bash -c 'source /usr/local/bin/task_helper_functions.sh && generate_docs' + +# Generate files from autogen +.PHONY: docker_generate +docker_generate: + docker run --rm -it \ + -v $(CURDIR):/workspace \ + $(REGISTRY_URL)/${DOCKER_IMAGE_DEVELOPER_TOOLS}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ + /bin/bash -c 'source /usr/local/bin/task_helper_functions.sh && generate' + +# Alias for backwards compatibility +.PHONY: generate_docs +generate_docs: docker_generate_docs + +.PHONY: generate +generate: docker_generate diff --git a/README.md b/README.md index 923d3f7a09..f879d0f7dc 100644 --- a/README.md +++ b/README.md @@ -248,141 +248,6 @@ The project has the following folders and files: - /README.MD: This file. - /modules: Private and beta sub modules. -## Templating - -To more cleanly handle cases where desired functionality would require complex duplication of Terraform resources (i.e. [PR 51](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/51)), this repository is largely generated from the [`autogen`](/autogen) directory. - -The root module is generated by running `make generate`. Changes to this repository should be made in the [`autogen`](/autogen) directory where appropriate. - -Note: The correct sequence to update the repo using autogen functionality is to run -`make generate && make generate_docs`. This will create the various Terraform files, and then -generate the Terraform documentation using `terraform-docs`. - -## Testing - -### Requirements -- [bundler](https://github.com/bundler/bundler) -- [gcloud](https://cloud.google.com/sdk/install) -- [terraform-docs](https://github.com/segmentio/terraform-docs/releases) 0.6.0 - -### Autogeneration of documentation from .tf files -Run -``` -make generate_docs -``` - -### Integration test - -Integration tests are run though [test-kitchen](https://github.com/test-kitchen/test-kitchen), [kitchen-terraform](https://github.com/newcontext-oss/kitchen-terraform), and [InSpec](https://github.com/inspec/inspec). - -Six test-kitchen instances are defined: - -- `deploy-service` -- `node-pool` -- `shared-vpc` -- `simple-regional` -- `simple-zonal` -- `stub-domains` - -The test-kitchen instances in `test/fixtures/` wrap identically-named examples in the `examples/` directory. - -#### Setup - -1. Configure the [test fixtures](#test-configuration) -2. Download a Service Account key with the necessary permissions and put it in the module's root directory with the name `credentials.json`. - - Requires the [permissions to run the module](#configure-a-service-account) - - Requires `roles/compute.networkAdmin` to create the test suite's networks - - Requires `roles/resourcemanager.projectIamAdmin` since service account creation is tested -3. Build the Docker container for testing: - - ``` - make docker_build_kitchen_terraform - ``` -4. Run the testing container in interactive mode: - - ``` - make docker_run - ``` - - The module root directory will be loaded into the Docker container at `/cft/workdir/`. -5. Run kitchen-terraform to test the infrastructure: - - 1. `kitchen create` creates Terraform state and downloads modules, if applicable. - 2. `kitchen converge` creates the underlying resources. Run `kitchen converge ` to create resources for a specific test case. - 3. Run `kitchen converge` again. This is necessary due to an oddity in how `networkPolicyConfig` is handled by the upstream API. (See [#72](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/issues/72) for details). - 4. `kitchen verify` tests the created infrastructure. Run `kitchen verify ` to run a specific test case. - 5. `kitchen destroy` tears down the underlying resources created by `kitchen converge`. Run `kitchen destroy ` to tear down resources for a specific test case. - -Alternatively, you can simply run `make test_integration_docker` to run all the test steps non-interactively. - -If you wish to parallelize running the test suites, it is also possible to offload the work onto Concourse to run each test suite for you using the command `make test_integration_concourse`. The `.concourse` directory will be created and contain all of the logs from the running test suites. - -When running tests locally, you will need to use your own test project environment. You can configure your environment by setting all of the following variables: - -``` -export COMPUTE_ENGINE_SERVICE_ACCOUNT="" -export PROJECT_ID="" -export REGION="" -export ZONES='[""]' -export SERVICE_ACCOUNT_JSON="$(cat "")" -export CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE="" -export GOOGLE_APPLICATION_CREDENTIALS="" -``` - -#### Test configuration - -Each test-kitchen instance is configured with a `variables.tfvars` file in the test fixture directory, e.g. `test/fixtures/node_pool/terraform.tfvars`. -For convenience, since all of the variables are project-specific, these files have been symlinked to `test/fixtures/shared/terraform.tfvars`. -Similarly, each test fixture has a `variables.tf` to define these variables, and an `outputs.tf` to facilitate providing necessary information for `inspec` to locate and query against created resources. - -Each test-kitchen instance creates a GCP Network and Subnetwork fixture to house resources, and may create any other necessary fixture data as needed. - -### Autogeneration of documentation from .tf files -Run -``` -make generate_docs -``` - -### Linting -The makefile in this project will lint or sometimes just format any shell, -Python, golang, Terraform, or Dockerfiles. The linters will only be run if -the makefile finds files with the appropriate file extension. - -All of the linter checks are in the default make target, so you just have to -run - -``` -make -s -``` - -The -s is for 'silent'. Successful output looks like this - -``` -Running shellcheck -Running flake8 -Running go fmt and go vet -Running terraform validate -Running hadolint on Dockerfiles -Checking for required files -Testing the validity of the header check -.. ----------------------------------------------------------------------- -Ran 2 tests in 0.026s - -OK -Checking file headers -The following lines have trailing whitespace -``` - -The linters -are as follows: -* Shell - shellcheck. Can be found in homebrew -* Python - flake8. Can be installed with 'pip install flake8' -* Golang - gofmt. gofmt comes with the standard golang installation. golang -is a compiled language so there is no standard linter. -* Terraform - terraform has a built-in linter in the 'terraform validate' -command. -* Dockerfiles - hadolint. Can be found in homebrew [upgrading-to-v2.0]: docs/upgrading_to_v2.0.md [upgrading-to-v3.0]: docs/upgrading_to_v3.0.md diff --git a/autogen/README.md b/autogen/README.md index 73a6314289..421e4a2605 100644 --- a/autogen/README.md +++ b/autogen/README.md @@ -139,6 +139,102 @@ Version 1.0.0 of this module introduces a breaking change: adding the `disable-l In either case, upgrading to module version `v1.0.0` will trigger a recreation of all node pools in the cluster. +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| authenticator\_security\_group | The name of the RBAC security group for use with Google security groups in Kubernetes RBAC. Group name must be in format gke-security-groups@yourdomain.com | string | `"null"` | no | +| basic\_auth\_password | The password to be used with Basic Authentication. | string | `""` | no | +| basic\_auth\_username | The username to be used with Basic Authentication. An empty value will disable Basic Authentication, which is the recommended configuration. | string | `""` | no | +| cloudrun | (Beta) Enable CloudRun addon | string | `"false"` | no | +| cluster\_ipv4\_cidr | The IP address range of the kubernetes pods in this cluster. Default is an automatically assigned CIDR. | string | `""` | no | +| cluster\_resource\_labels | The GCE resource labels (a map of key/value pairs) to be applied to the cluster | map(string) | `` | no | +| configure\_ip\_masq | Enables the installation of ip masquerading, which is usually no longer required when using aliasied IP addresses. IP masquerading uses a kubectl call, so when you have a private cluster, you will need access to the API server. | string | `"false"` | no | +| create\_service\_account | Defines if service account specified to run nodes should be created. | bool | `"true"` | no | +| database\_encryption | Application-layer Secrets Encryption settings. The object format is {state = string, key_name = string}. Valid values of state are: "ENCRYPTED"; "DECRYPTED". key_name is the name of a CloudKMS key. | object | `` | no | +| default\_max\_pods\_per\_node | The maximum number of pods to schedule per node | string | `"110"` | no | +| deploy\_using\_private\_endpoint | (Beta) A toggle for Terraform and kubectl to connect to the master's internal IP address during deployment. | bool | `"false"` | no | +| description | The description of the cluster | string | `""` | no | +| disable\_legacy\_metadata\_endpoints | Disable the /0.1/ and /v1beta1/ metadata server endpoints on the node. Changing this value will cause all node pools to be recreated. | bool | `"true"` | no | +| enable\_binary\_authorization | Enable BinAuthZ Admission controller | string | `"false"` | no | +| enable\_intranode\_visibility | Whether Intra-node visibility is enabled for this cluster. This makes same node pod to pod traffic visible for VPC network | bool | `"false"` | no | +| enable\_private\_endpoint | (Beta) Whether the master's internal IP address is used as the cluster endpoint | bool | `"false"` | no | +| enable\_private\_nodes | (Beta) Whether nodes have internal IP addresses only | bool | `"false"` | no | +| enable\_vertical\_pod\_autoscaling | Vertical Pod Autoscaling automatically adjusts the resources of pods controlled by it | bool | `"false"` | no | +| grant\_registry\_access | Grants created cluster-specific service account storage.objectViewer role. | bool | `"false"` | no | +| horizontal\_pod\_autoscaling | Enable horizontal pod autoscaling addon | bool | `"true"` | no | +| http\_load\_balancing | Enable httpload balancer addon | bool | `"true"` | no | +| identity\_namespace | Workload Identity namespace | string | `""` | no | +| initial\_node\_count | The number of nodes to create in this cluster's default node pool. | number | `"0"` | no | +| ip\_masq\_link\_local | Whether to masquerade traffic to the link-local prefix (169.254.0.0/16). | bool | `"false"` | no | +| ip\_masq\_resync\_interval | The interval at which the agent attempts to sync its ConfigMap file from the disk. | string | `"60s"` | no | +| ip\_range\_pods | The _name_ of the secondary subnet ip range to use for pods | string | n/a | yes | +| ip\_range\_services | The _name_ of the secondary subnet range to use for services | string | n/a | yes | +| issue\_client\_certificate | Issues a client certificate to authenticate to the cluster endpoint. To maximize the security of your cluster, leave this option disabled. Client certificates don't automatically rotate and aren't easily revocable. WARNING: changing this after cluster creation is destructive! | bool | `"false"` | no | +| istio | (Beta) Enable Istio addon | string | `"false"` | no | +| kubernetes\_dashboard | Enable kubernetes dashboard addon | bool | `"false"` | no | +| kubernetes\_version | The Kubernetes version of the masters. If set to 'latest' it will pull latest available version in the selected region. | string | `"latest"` | no | +| logging\_service | The logging service that the cluster should write logs to. Available options include logging.googleapis.com, logging.googleapis.com/kubernetes (beta), and none | string | `"logging.googleapis.com"` | no | +| maintenance\_start\_time | Time window specified for daily maintenance operations in RFC3339 format | string | `"05:00"` | no | +| master\_authorized\_networks\_config | The desired configuration options for master authorized networks. The object format is {cidr_blocks = list(object({cidr_block = string, display_name = string}))}. Omit the nested cidr_blocks attribute to disallow external access (except the cluster node IPs, which GKE automatically whitelists). | object | `` | no | +| master\_ipv4\_cidr\_block | (Beta) The IP range in CIDR notation to use for the hosted master network | string | `"10.0.0.0/28"` | no | +| monitoring\_service | The monitoring service that the cluster should write metrics to. Automatically send metrics from pods in the cluster to the Google Cloud Monitoring API. VM metrics will be collected by Google Compute Engine regardless of this setting Available options include monitoring.googleapis.com, monitoring.googleapis.com/kubernetes (beta) and none | string | `"monitoring.googleapis.com"` | no | +| name | The name of the cluster (required) | string | n/a | yes | +| network | The VPC network to host the cluster in (required) | string | n/a | yes | +| network\_policy | Enable network policy addon | bool | `"false"` | no | +| network\_policy\_provider | The network policy provider. | string | `"CALICO"` | no | +| network\_project\_id | The project ID of the shared VPC's host (for shared vpc support) | string | `""` | no | +| node\_metadata | Specifies how node metadata is exposed to the workload running on the node | string | `"SECURE"` | no | +| node\_pools | List of maps containing node pools | list(map(string)) | `` | no | +| node\_pools\_labels | Map of maps containing node labels by node-pool name | map(map(string)) | `` | no | +| node\_pools\_metadata | Map of maps containing node metadata by node-pool name | map(map(string)) | `` | no | +| node\_pools\_oauth\_scopes | Map of lists containing node oauth scopes by node-pool name | map(list(string)) | `` | no | +| node\_pools\_tags | Map of lists containing node network tags by node-pool name | map(list(string)) | `` | no | +| node\_pools\_taints | Map of lists containing node taints by node-pool name | object | `` | no | +| node\_version | The Kubernetes version of the node pools. Defaults kubernetes_version (master) variable and can be overridden for individual node pools by setting the `version` key on them. Must be empyty or set the same as master at cluster creation. | string | `""` | no | +| non\_masquerade\_cidrs | List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading. | list(string) | `` | no | +| pod\_security\_policy\_config | enabled - Enable the PodSecurityPolicy controller for this cluster. If enabled, pods must be valid under a PodSecurityPolicy to be created. | list | `` | no | +| project\_id | The project ID to host the cluster in (required) | string | n/a | yes | +| region | The region to host the cluster in (required) | string | n/a | yes | +| regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | +| remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | +| resource\_usage\_export\_dataset\_id | The dataset id for which network egress metering for this cluster will be enabled. If enabled, a daemonset will be created in the cluster to meter network egress traffic. | string | `""` | no | +| sandbox\_enabled | (Beta) Enable GKE Sandbox (Do not forget to set `image_type` = `COS_CONTAINERD` and `node_version` = `1.12.7-gke.17` or later to use it). | bool | `"false"` | no | +| service\_account | The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created. | string | `""` | no | +| stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | map(list(string)) | `` | no | +| subnetwork | The subnetwork to host the cluster in (required) | string | n/a | yes | +| upstream\_nameservers | If specified, the values replace the nameservers taken by default from the node’s /etc/resolv.conf | list | `` | no | +| zones | The zones to host the cluster in (optional if regional cluster / required if zonal) | list(string) | `` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| ca\_certificate | Cluster ca certificate (base64 encoded) | +| cloudrun\_enabled | Whether CloudRun enabled | +| endpoint | Cluster endpoint | +| horizontal\_pod\_autoscaling\_enabled | Whether horizontal pod autoscaling enabled | +| http\_load\_balancing\_enabled | Whether http load balancing enabled | +| intranode\_visibility\_enabled | Whether intra-node visibility is enabled | +| istio\_enabled | Whether Istio is enabled | +| kubernetes\_dashboard\_enabled | Whether kubernetes dashboard enabled | +| location | Cluster location (region if regional cluster, zone if zonal cluster) | +| logging\_service | Logging service used | +| master\_authorized\_networks\_config | Networks from which access to master is permitted | +| master\_version | Current master kubernetes version | +| min\_master\_version | Minimum master kubernetes version | +| monitoring\_service | Monitoring service used | +| name | Cluster name | +| network\_policy\_enabled | Whether network policy enabled | +| node\_pools\_names | List of node pools names | +| node\_pools\_versions | List of node pools versions | +| pod\_security\_policy\_enabled | Whether pod security policy is enabled | +| region | Cluster region | +| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | +| type | Cluster type (regional / zonal) | +| vertical\_pod\_autoscaling\_enabled | Whether veritical pod autoscaling is enabled | +| zones | List of zones in which the cluster resides | + ## Requirements @@ -193,141 +289,6 @@ The project has the following folders and files: - /README.MD: This file. - /modules: Private and beta sub modules. -## Templating - -To more cleanly handle cases where desired functionality would require complex duplication of Terraform resources (i.e. [PR 51](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/51)), this repository is largely generated from the [`autogen`](/autogen) directory. - -The root module is generated by running `make generate`. Changes to this repository should be made in the [`autogen`](/autogen) directory where appropriate. - -Note: The correct sequence to update the repo using autogen functionality is to run -`make generate && make generate_docs`. This will create the various Terraform files, and then -generate the Terraform documentation using `terraform-docs`. - -## Testing - -### Requirements -- [bundler](https://github.com/bundler/bundler) -- [gcloud](https://cloud.google.com/sdk/install) -- [terraform-docs](https://github.com/segmentio/terraform-docs/releases) 0.6.0 - -### Autogeneration of documentation from .tf files -Run -``` -make generate_docs -``` - -### Integration test - -Integration tests are run though [test-kitchen](https://github.com/test-kitchen/test-kitchen), [kitchen-terraform](https://github.com/newcontext-oss/kitchen-terraform), and [InSpec](https://github.com/inspec/inspec). - -Six test-kitchen instances are defined: - -- `deploy-service` -- `node-pool` -- `shared-vpc` -- `simple-regional` -- `simple-zonal` -- `stub-domains` - -The test-kitchen instances in `test/fixtures/` wrap identically-named examples in the `examples/` directory. - -#### Setup - -1. Configure the [test fixtures](#test-configuration) -2. Download a Service Account key with the necessary permissions and put it in the module's root directory with the name `credentials.json`. - - Requires the [permissions to run the module](#configure-a-service-account) - - Requires `roles/compute.networkAdmin` to create the test suite's networks - - Requires `roles/resourcemanager.projectIamAdmin` since service account creation is tested -3. Build the Docker container for testing: - - ``` - make docker_build_kitchen_terraform - ``` -4. Run the testing container in interactive mode: - - ``` - make docker_run - ``` - - The module root directory will be loaded into the Docker container at `/cft/workdir/`. -5. Run kitchen-terraform to test the infrastructure: - - 1. `kitchen create` creates Terraform state and downloads modules, if applicable. - 2. `kitchen converge` creates the underlying resources. Run `kitchen converge ` to create resources for a specific test case. - 3. Run `kitchen converge` again. This is necessary due to an oddity in how `networkPolicyConfig` is handled by the upstream API. (See [#72](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/issues/72) for details). - 4. `kitchen verify` tests the created infrastructure. Run `kitchen verify ` to run a specific test case. - 5. `kitchen destroy` tears down the underlying resources created by `kitchen converge`. Run `kitchen destroy ` to tear down resources for a specific test case. - -Alternatively, you can simply run `make test_integration_docker` to run all the test steps non-interactively. - -If you wish to parallelize running the test suites, it is also possible to offload the work onto Concourse to run each test suite for you using the command `make test_integration_concourse`. The `.concourse` directory will be created and contain all of the logs from the running test suites. - -When running tests locally, you will need to use your own test project environment. You can configure your environment by setting all of the following variables: - -``` -export COMPUTE_ENGINE_SERVICE_ACCOUNT="" -export PROJECT_ID="" -export REGION="" -export ZONES='[""]' -export SERVICE_ACCOUNT_JSON="$(cat "")" -export CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE="" -export GOOGLE_APPLICATION_CREDENTIALS="" -``` - -#### Test configuration - -Each test-kitchen instance is configured with a `variables.tfvars` file in the test fixture directory, e.g. `test/fixtures/node_pool/terraform.tfvars`. -For convenience, since all of the variables are project-specific, these files have been symlinked to `test/fixtures/shared/terraform.tfvars`. -Similarly, each test fixture has a `variables.tf` to define these variables, and an `outputs.tf` to facilitate providing necessary information for `inspec` to locate and query against created resources. - -Each test-kitchen instance creates a GCP Network and Subnetwork fixture to house resources, and may create any other necessary fixture data as needed. - -### Autogeneration of documentation from .tf files -Run -``` -make generate_docs -``` - -### Linting -The makefile in this project will lint or sometimes just format any shell, -Python, golang, Terraform, or Dockerfiles. The linters will only be run if -the makefile finds files with the appropriate file extension. - -All of the linter checks are in the default make target, so you just have to -run - -``` -make -s -``` - -The -s is for 'silent'. Successful output looks like this - -``` -Running shellcheck -Running flake8 -Running go fmt and go vet -Running terraform validate -Running hadolint on Dockerfiles -Checking for required files -Testing the validity of the header check -.. ----------------------------------------------------------------------- -Ran 2 tests in 0.026s - -OK -Checking file headers -The following lines have trailing whitespace -``` - -The linters -are as follows: -* Shell - shellcheck. Can be found in homebrew -* Python - flake8. Can be installed with 'pip install flake8' -* Golang - gofmt. gofmt comes with the standard golang installation. golang -is a compiled language so there is no standard linter. -* Terraform - terraform has a built-in linter in the 'terraform validate' -command. -* Dockerfiles - hadolint. Can be found in homebrew {% if private_cluster %} [upgrading-to-v2.0]: ../../docs/upgrading_to_v2.0.md diff --git a/build/int.cloudbuild.yaml b/build/int.cloudbuild.yaml new file mode 100644 index 0000000000..85139efe7d --- /dev/null +++ b/build/int.cloudbuild.yaml @@ -0,0 +1,41 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +timeout: 12600s +steps: +- id: prepare + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && prepare_environment'] + env: + - 'TF_VAR_org_id=$_ORG_ID' + - 'TF_VAR_folder_id=$_FOLDER_ID' + - 'TF_VAR_billing_account=$_BILLING_ACCOUNT' +- id: create + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do create'] +- id: converge + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge'] +- id: verify + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify'] +- id: destroy + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy'] +tags: +- 'ci' +- 'integration' +substitutions: + _DOCKER_IMAGE_DEVELOPER_TOOLS: 'cft/developer-tools' + _DOCKER_TAG_VERSION_DEVELOPER_TOOLS: '0.1.0' diff --git a/build/lint.cloudbuild.yaml b/build/lint.cloudbuild.yaml new file mode 100644 index 0000000000..3b7306297c --- /dev/null +++ b/build/lint.cloudbuild.yaml @@ -0,0 +1,27 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +steps: +- id: 'lint-generation' + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && check_generate'] +- id: 'lint-tests' + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/usr/local/bin/test_lint.sh'] +tags: +- 'ci' +- 'lint' +substitutions: + _DOCKER_IMAGE_DEVELOPER_TOOLS: 'cft/developer-tools' + _DOCKER_TAG_VERSION_DEVELOPER_TOOLS: '0.1.0' diff --git a/helpers/terraform_docs b/helpers/terraform_docs deleted file mode 100755 index c33230959b..0000000000 --- a/helpers/terraform_docs +++ /dev/null @@ -1,694 +0,0 @@ -#!/usr/bin/env bash - -set -e - -main() { - declare argv - argv=$(getopt -o a: --long args: -- "$@") || return - eval "set -- $argv" - - declare args - declare files - - for argv; do - case $argv in - (-a|--args) - shift - args="$1" - shift - ;; - (--) - shift - files="$@" - break - ;; - esac - done - - local hack_terraform_docs=$(terraform version | head -1 | grep -c 0.12) - - if [[ "$hack_terraform_docs" == "1" ]]; then - which awk 2>&1 >/dev/null || ( echo "awk is required for terraform-docs hack to work with Terraform 0.12"; exit 1) - - tmp_file_awk=$(mktemp "${TMPDIR:-/tmp}/terraform-docs-XXXXXXXXXX") - terraform_docs_awk "$tmp_file_awk" - terraform_docs "$tmp_file_awk" "$args" "$files" - rm -f "$tmp_file_awk" - else - terraform_docs "0" "$args" "$files" - fi - -} - -terraform_docs() { - readonly terraform_docs_awk_file="$1" - readonly args="$2" - readonly files="$3" - - declare -a paths - declare -a tfvars_files - - index=0 - - for file_with_path in $files; do - file_with_path="${file_with_path// /__REPLACED__SPACE__}" - - paths[index]=$(dirname "$file_with_path") - - if [[ "$file_with_path" == *".tfvars" ]]; then - tfvars_files+=("$file_with_path") - fi - - ((index+=1)) - done - - readonly tmp_file=$(mktemp) - readonly text_file="README.md" - - for path_uniq in $(echo "${paths[*]}" | tr ' ' '\n' | sort -u); do - path_uniq="${path_uniq//__REPLACED__SPACE__/ }" - - pushd "$path_uniq" > /dev/null - - if [[ ! -f "$text_file" ]]; then - popd > /dev/null - continue - fi - - if [[ "$terraform_docs_awk_file" == "0" ]]; then - terraform-docs $args md ./ > "$tmp_file" - else - # Can't append extension for mktemp, so renaming instead - tmp_file_docs=$(mktemp "${TMPDIR:-/tmp}/terraform-docs-XXXXXXXXXX") - mv "$tmp_file_docs" "$tmp_file_docs.tf" - tmp_file_docs_tf="$tmp_file_docs.tf" - - awk -f "$terraform_docs_awk_file" ./*.tf > "$tmp_file_docs_tf" - terraform-docs $args md "$tmp_file_docs_tf" > "$tmp_file" - rm -f "$tmp_file_docs_tf" - fi - - # Replace content between markers with the placeholder - https://stackoverflow.com/questions/1212799/how-do-i-extract-lines-between-two-line-delimiters-in-perl#1212834 - perl -i -ne 'if (/BEGINNING OF PRE-COMMIT-TERRAFORM DOCS HOOK/../END OF PRE-COMMIT-TERRAFORM DOCS HOOK/) { print $_ if /BEGINNING OF PRE-COMMIT-TERRAFORM DOCS HOOK/; print "I_WANT_TO_BE_REPLACED\n$_" if /END OF PRE-COMMIT-TERRAFORM DOCS HOOK/;} else { print $_ }' "$text_file" - - # Replace placeholder with the content of the file - perl -i -e 'open(F, "'"$tmp_file"'"); $f = join "", ; while(<>){if (/I_WANT_TO_BE_REPLACED/) {print $f} else {print $_};}' "$text_file" - - rm -f "$tmp_file" - - popd > /dev/null - done -} - -terraform_docs_awk() { - readonly output_file=$1 - - cat <<"EOF" > $output_file -# This script converts Terraform 0.12 variables/outputs to something suitable for `terraform-docs` -# As of terraform-docs v0.6.0, HCL2 is not supported. This script is a *dirty hack* to get around it. -# https://github.com/segmentio/terraform-docs/ -# https://github.com/segmentio/terraform-docs/issues/62 - -# Script was originally found here: https://github.com/cloudposse/build-harness/blob/master/bin/terraform-docs.awk - -{ - if ( $0 ~ /\{/ ) { - braceCnt++ - } - - if ( $0 ~ /\}/ ) { - braceCnt-- - } - - # [START] variable or output block started - if ($0 ~ /^[[:space:]]*(variable|output)[[:space:]][[:space:]]*"(.*?)"/) { - # Normalize the braceCnt (should be 1 now) - braceCnt = 1 - # [CLOSE] "default" block - if (blockDefCnt > 0) { - blockDefCnt = 0 - } - blockCnt++ - print $0 - } - - # [START] multiline default statement started - if (blockCnt > 0) { - if ($0 ~ /^[[:space:]][[:space:]]*(default)[[:space:]][[:space:]]*=/) { - if ($3 ~ "null") { - print " default = \"null\"" - } else { - print $0 - blockDefCnt++ - blockDefStart=1 - } - } - } - - # [PRINT] single line "description" - if (blockCnt > 0) { - if (blockDefCnt == 0) { - if ($0 ~ /^[[:space:]][[:space:]]*description[[:space:]][[:space:]]*=/) { - # [CLOSE] "default" block - if (blockDefCnt > 0) { - blockDefCnt = 0 - } - print $0 - } - } - } - - # [PRINT] single line "type" - if (blockCnt > 0) { - if ($0 ~ /^[[:space:]][[:space:]]*type[[:space:]][[:space:]]*=/ ) { - # [CLOSE] "default" block - if (blockDefCnt > 0) { - blockDefCnt = 0 - } - type=$3 - if (type ~ "object") { - print " type = \"object\"" - } else { - # legacy quoted types: "string", "list", and "map" - if ($3 ~ /^[[:space:]]*"(.*?)"[[:space:]]*$/) { - print " type = " $3 - } else { - print " type = \"" $3 "\"" - } - } - } - } - - # [CLOSE] variable/output block - if (blockCnt > 0) { - if (braceCnt == 0 && blockCnt > 0) { - blockCnt-- - print $0 - } - } - - # [PRINT] Multiline "default" statement - if (blockCnt > 0 && blockDefCnt > 0) { - if (blockDefStart == 1) { - blockDefStart = 0 - } else { - print $0 - } - } -} -EOF - -} - -getopt() { - # pure-getopt, a drop-in replacement for GNU getopt in pure Bash. - # version 1.4.3 - # - # Copyright 2012-2018 Aron Griffis - # - # Permission is hereby granted, free of charge, to any person obtaining - # a copy of this software and associated documentation files (the - # "Software"), to deal in the Software without restriction, including - # without limitation the rights to use, copy, modify, merge, publish, - # distribute, sublicense, and/or sell copies of the Software, and to - # permit persons to whom the Software is furnished to do so, subject to - # the following conditions: - # - # The above copyright notice and this permission notice shall be included - # in all copies or substantial portions of the Software. - # - # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - _getopt_main() { - # Returns one of the following statuses: - # 0 success - # 1 error parsing parameters - # 2 error in getopt invocation - # 3 internal error - # 4 reserved for -T - # - # For statuses 0 and 1, generates normalized and shell-quoted - # "options -- parameters" on stdout. - - declare parsed status - declare short long name flags - declare have_short=false - - # Synopsis from getopt man-page: - # - # getopt optstring parameters - # getopt [options] [--] optstring parameters - # getopt [options] -o|--options optstring [options] [--] parameters - # - # The first form can be normalized to the third form which - # _getopt_parse() understands. The second form can be recognized after - # first parse when $short hasn't been set. - - if [[ -n ${GETOPT_COMPATIBLE+isset} || $1 == [^-]* ]]; then - # Enable compatibility mode - flags=c$flags - # Normalize first to third synopsis form - set -- -o "$1" -- "${@:2}" - fi - - # First parse always uses flags=p since getopt always parses its own - # arguments effectively in this mode. - parsed=$(_getopt_parse getopt ahl:n:o:qQs:TuV \ - alternative,help,longoptions:,name:,options:,quiet,quiet-output,shell:,test,version \ - p "$@") - status=$? - if [[ $status != 0 ]]; then - if [[ $status == 1 ]]; then - echo "Try \`getopt --help' for more information." >&2 - # Since this is the first parse, convert status 1 to 2 - status=2 - fi - return $status - fi - eval "set -- $parsed" - - while [[ $# -gt 0 ]]; do - case $1 in - (-a|--alternative) - flags=a$flags ;; - - (-h|--help) - _getopt_help - return 2 # as does GNU getopt - ;; - - (-l|--longoptions) - long="$long${long:+,}$2" - shift ;; - - (-n|--name) - name=$2 - shift ;; - - (-o|--options) - short=$2 - have_short=true - shift ;; - - (-q|--quiet) - flags=q$flags ;; - - (-Q|--quiet-output) - flags=Q$flags ;; - - (-s|--shell) - case $2 in - (sh|bash) - flags=${flags//t/} ;; - (csh|tcsh) - flags=t$flags ;; - (*) - echo 'getopt: unknown shell after -s or --shell argument' >&2 - echo "Try \`getopt --help' for more information." >&2 - return 2 ;; - esac - shift ;; - - (-u|--unquoted) - flags=u$flags ;; - - (-T|--test) - return 4 ;; - - (-V|--version) - echo "pure-getopt 1.4.3" - return 0 ;; - - (--) - shift - break ;; - esac - - shift - done - - if ! $have_short; then - # $short was declared but never set, not even to an empty string. - # This implies the second form in the synopsis. - if [[ $# == 0 ]]; then - echo 'getopt: missing optstring argument' >&2 - echo "Try \`getopt --help' for more information." >&2 - return 2 - fi - short=$1 - have_short=true - shift - fi - - if [[ $short == -* ]]; then - # Leading dash means generate output in place rather than reordering, - # unless we're already in compatibility mode. - [[ $flags == *c* ]] || flags=i$flags - short=${short#?} - elif [[ $short == +* ]]; then - # Leading plus means POSIXLY_CORRECT, unless we're already in - # compatibility mode. - [[ $flags == *c* ]] || flags=p$flags - short=${short#?} - fi - - # This should fire if POSIXLY_CORRECT is in the environment, even if - # it's an empty string. That's the difference between :+ and + - flags=${POSIXLY_CORRECT+p}$flags - - _getopt_parse "${name:-getopt}" "$short" "$long" "$flags" "$@" - } - - _getopt_parse() { - # Inner getopt parser, used for both first parse and second parse. - # Returns 0 for success, 1 for error parsing, 3 for internal error. - # In the case of status 1, still generates stdout with whatever could - # be parsed. - # - # $flags is a string of characters with the following meanings: - # a - alternative parsing mode - # c - GETOPT_COMPATIBLE - # i - generate output in place rather than reordering - # p - POSIXLY_CORRECT - # q - disable error reporting - # Q - disable normal output - # t - quote for csh/tcsh - # u - unquoted output - - declare name="$1" short="$2" long="$3" flags="$4" - shift 4 - - # Split $long on commas, prepend double-dashes, strip colons; - # for use with _getopt_resolve_abbrev - declare -a longarr - _getopt_split longarr "$long" - longarr=( "${longarr[@]/#/--}" ) - longarr=( "${longarr[@]%:}" ) - longarr=( "${longarr[@]%:}" ) - - # Parse and collect options and parameters - declare -a opts params - declare o alt_recycled=false error=0 - - while [[ $# -gt 0 ]]; do - case $1 in - (--) - params=( "${params[@]}" "${@:2}" ) - break ;; - - (--*=*) - o=${1%%=*} - if ! o=$(_getopt_resolve_abbrev "$o" "${longarr[@]}"); then - error=1 - elif [[ ,"$long", == *,"${o#--}"::,* ]]; then - opts=( "${opts[@]}" "$o" "${1#*=}" ) - elif [[ ,"$long", == *,"${o#--}":,* ]]; then - opts=( "${opts[@]}" "$o" "${1#*=}" ) - elif [[ ,"$long", == *,"${o#--}",* ]]; then - if $alt_recycled; then o=${o#-}; fi - _getopt_err "$name: option '$o' doesn't allow an argument" - error=1 - else - echo "getopt: assertion failed (1)" >&2 - return 3 - fi - alt_recycled=false - ;; - - (--?*) - o=$1 - if ! o=$(_getopt_resolve_abbrev "$o" "${longarr[@]}"); then - error=1 - elif [[ ,"$long", == *,"${o#--}",* ]]; then - opts=( "${opts[@]}" "$o" ) - elif [[ ,"$long", == *,"${o#--}::",* ]]; then - opts=( "${opts[@]}" "$o" '' ) - elif [[ ,"$long", == *,"${o#--}:",* ]]; then - if [[ $# -ge 2 ]]; then - shift - opts=( "${opts[@]}" "$o" "$1" ) - else - if $alt_recycled; then o=${o#-}; fi - _getopt_err "$name: option '$o' requires an argument" - error=1 - fi - else - echo "getopt: assertion failed (2)" >&2 - return 3 - fi - alt_recycled=false - ;; - - (-*) - if [[ $flags == *a* ]]; then - # Alternative parsing mode! - # Try to handle as a long option if any of the following apply: - # 1. There's an equals sign in the mix -x=3 or -xy=3 - # 2. There's 2+ letters and an abbreviated long match -xy - # 3. There's a single letter and an exact long match - # 4. There's a single letter and no short match - o=${1::2} # temp for testing #4 - if [[ $1 == *=* || $1 == -?? || \ - ,$long, == *,"${1#-}"[:,]* || \ - ,$short, != *,"${o#-}"[:,]* ]]; then - o=$(_getopt_resolve_abbrev "${1%%=*}" "${longarr[@]}" 2>/dev/null) - case $? in - (0) - # Unambiguous match. Let the long options parser handle - # it, with a flag to get the right error message. - set -- "-$1" "${@:2}" - alt_recycled=true - continue ;; - (1) - # Ambiguous match, generate error and continue. - _getopt_resolve_abbrev "${1%%=*}" "${longarr[@]}" >/dev/null - error=1 - shift - continue ;; - (2) - # No match, fall through to single-character check. - true ;; - (*) - echo "getopt: assertion failed (3)" >&2 - return 3 ;; - esac - fi - fi - - o=${1::2} - if [[ "$short" == *"${o#-}"::* ]]; then - if [[ ${#1} -gt 2 ]]; then - opts=( "${opts[@]}" "$o" "${1:2}" ) - else - opts=( "${opts[@]}" "$o" '' ) - fi - elif [[ "$short" == *"${o#-}":* ]]; then - if [[ ${#1} -gt 2 ]]; then - opts=( "${opts[@]}" "$o" "${1:2}" ) - elif [[ $# -ge 2 ]]; then - shift - opts=( "${opts[@]}" "$o" "$1" ) - else - _getopt_err "$name: option requires an argument -- '${o#-}'" - error=1 - fi - elif [[ "$short" == *"${o#-}"* ]]; then - opts=( "${opts[@]}" "$o" ) - if [[ ${#1} -gt 2 ]]; then - set -- "$o" "-${1:2}" "${@:2}" - fi - else - if [[ $flags == *a* ]]; then - # Alternative parsing mode! Report on the entire failed - # option. GNU includes =value but we omit it for sanity with - # very long values. - _getopt_err "$name: unrecognized option '${1%%=*}'" - else - _getopt_err "$name: invalid option -- '${o#-}'" - if [[ ${#1} -gt 2 ]]; then - set -- "$o" "-${1:2}" "${@:2}" - fi - fi - error=1 - fi ;; - - (*) - # GNU getopt in-place mode (leading dash on short options) - # overrides POSIXLY_CORRECT - if [[ $flags == *i* ]]; then - opts=( "${opts[@]}" "$1" ) - elif [[ $flags == *p* ]]; then - params=( "${params[@]}" "$@" ) - break - else - params=( "${params[@]}" "$1" ) - fi - esac - - shift - done - - if [[ $flags == *Q* ]]; then - true # generate no output - else - echo -n ' ' - if [[ $flags == *[cu]* ]]; then - printf '%s -- %s' "${opts[*]}" "${params[*]}" - else - if [[ $flags == *t* ]]; then - _getopt_quote_csh "${opts[@]}" -- "${params[@]}" - else - _getopt_quote "${opts[@]}" -- "${params[@]}" - fi - fi - echo - fi - - return $error - } - - _getopt_err() { - if [[ $flags != *q* ]]; then - printf '%s\n' "$1" >&2 - fi - } - - _getopt_resolve_abbrev() { - # Resolves an abbrevation from a list of possibilities. - # If the abbreviation is unambiguous, echoes the expansion on stdout - # and returns 0. If the abbreviation is ambiguous, prints a message on - # stderr and returns 1. (For first parse this should convert to exit - # status 2.) If there is no match at all, prints a message on stderr - # and returns 2. - declare a q="$1" - declare -a matches - shift - for a; do - if [[ $q == "$a" ]]; then - # Exact match. Squash any other partial matches. - matches=( "$a" ) - break - elif [[ $flags == *a* && $q == -[^-]* && $a == -"$q" ]]; then - # Exact alternative match. Squash any other partial matches. - matches=( "$a" ) - break - elif [[ $a == "$q"* ]]; then - # Abbreviated match. - matches=( "${matches[@]}" "$a" ) - elif [[ $flags == *a* && $q == -[^-]* && $a == -"$q"* ]]; then - # Abbreviated alternative match. - matches=( "${matches[@]}" "${a#-}" ) - fi - done - case ${#matches[@]} in - (0) - [[ $flags == *q* ]] || \ - printf "$name: unrecognized option %s\\n" >&2 \ - "$(_getopt_quote "$q")" - return 2 ;; - (1) - printf '%s' "${matches[0]}"; return 0 ;; - (*) - [[ $flags == *q* ]] || \ - printf "$name: option %s is ambiguous; possibilities: %s\\n" >&2 \ - "$(_getopt_quote "$q")" "$(_getopt_quote "${matches[@]}")" - return 1 ;; - esac - } - - _getopt_split() { - # Splits $2 at commas to build array specified by $1 - declare IFS=, - eval "$1=( \$2 )" - } - - _getopt_quote() { - # Quotes arguments with single quotes, escaping inner single quotes - declare s space q=\' - for s; do - printf "$space'%s'" "${s//$q/$q\\$q$q}" - space=' ' - done - } - - _getopt_quote_csh() { - # Quotes arguments with single quotes, escaping inner single quotes, - # bangs, backslashes and newlines - declare s i c space - for s; do - echo -n "$space'" - for ((i=0; i<${#s}; i++)); do - c=${s:i:1} - case $c in - (\\|\'|!) - echo -n "'\\$c'" ;; - ($'\n') - echo -n "\\$c" ;; - (*) - echo -n "$c" ;; - esac - done - echo -n \' - space=' ' - done - } - - _getopt_help() { - cat <<-EOT >&2 - - Usage: - getopt - getopt [options] [--] - getopt [options] -o|--options [options] [--] - - Parse command options. - - Options: - -a, --alternative allow long options starting with single - - -l, --longoptions the long options to be recognized - -n, --name the name under which errors are reported - -o, --options the short options to be recognized - -q, --quiet disable error reporting by getopt(3) - -Q, --quiet-output no normal output - -s, --shell set quoting conventions to those of - -T, --test test for getopt(1) version - -u, --unquoted do not quote the output - - -h, --help display this help and exit - -V, --version output version information and exit - - For more details see getopt(1). - EOT - } - - _getopt_version_check() { - if [[ -z $BASH_VERSION ]]; then - echo "getopt: unknown version of bash might not be compatible" >&2 - return 1 - fi - - # This is a lexical comparison that should be sufficient forever. - if [[ $BASH_VERSION < 2.05b ]]; then - echo "getopt: bash $BASH_VERSION might not be compatible" >&2 - return 1 - fi - - return 0 - } - - _getopt_version_check - _getopt_main "$@" - declare status=$? - unset -f _getopt_main _getopt_err _getopt_parse _getopt_quote \ - _getopt_quote_csh _getopt_resolve_abbrev _getopt_split _getopt_help \ - _getopt_version_check - return $status -} - -[[ $BASH_SOURCE != "$0" ]] || main "$@" \ No newline at end of file diff --git a/helpers/terraform_validate b/helpers/terraform_validate deleted file mode 100755 index 0c284194ac..0000000000 --- a/helpers/terraform_validate +++ /dev/null @@ -1,23 +0,0 @@ -#! /bin/bash -# -# Copyright 2019 Google LLC. This software is provided as-is, without warranty -# or representation for any use or purpose. Your use of it is subject to your -# agreement with Google. -# -# This script initializes modules so that terraform validate as of 0.12 behaves -# as expected and does not issue errors such as: -# -# Error: Module not installed -# -# on test/fixtures/shared_vpc_no_subnets/main.tf line 37: -# 37: module "project-factory" { -# -# This module is not yet installed. Run "terraform init" to install all modules -# required by this configuration. - -# The first and only argument to this script is the directory containing *.tf -# files to validate. This directory is assumed to be a root module. - -cd "$1" -terraform init -backend=false -terraform validate \ No newline at end of file diff --git a/modules/beta-private-cluster/README.md b/modules/beta-private-cluster/README.md index 1175553890..988d48ead8 100644 --- a/modules/beta-private-cluster/README.md +++ b/modules/beta-private-cluster/README.md @@ -278,141 +278,6 @@ The project has the following folders and files: - /README.MD: This file. - /modules: Private and beta sub modules. -## Templating - -To more cleanly handle cases where desired functionality would require complex duplication of Terraform resources (i.e. [PR 51](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/51)), this repository is largely generated from the [`autogen`](/autogen) directory. - -The root module is generated by running `make generate`. Changes to this repository should be made in the [`autogen`](/autogen) directory where appropriate. - -Note: The correct sequence to update the repo using autogen functionality is to run -`make generate && make generate_docs`. This will create the various Terraform files, and then -generate the Terraform documentation using `terraform-docs`. - -## Testing - -### Requirements -- [bundler](https://github.com/bundler/bundler) -- [gcloud](https://cloud.google.com/sdk/install) -- [terraform-docs](https://github.com/segmentio/terraform-docs/releases) 0.6.0 - -### Autogeneration of documentation from .tf files -Run -``` -make generate_docs -``` - -### Integration test - -Integration tests are run though [test-kitchen](https://github.com/test-kitchen/test-kitchen), [kitchen-terraform](https://github.com/newcontext-oss/kitchen-terraform), and [InSpec](https://github.com/inspec/inspec). - -Six test-kitchen instances are defined: - -- `deploy-service` -- `node-pool` -- `shared-vpc` -- `simple-regional` -- `simple-zonal` -- `stub-domains` - -The test-kitchen instances in `test/fixtures/` wrap identically-named examples in the `examples/` directory. - -#### Setup - -1. Configure the [test fixtures](#test-configuration) -2. Download a Service Account key with the necessary permissions and put it in the module's root directory with the name `credentials.json`. - - Requires the [permissions to run the module](#configure-a-service-account) - - Requires `roles/compute.networkAdmin` to create the test suite's networks - - Requires `roles/resourcemanager.projectIamAdmin` since service account creation is tested -3. Build the Docker container for testing: - - ``` - make docker_build_kitchen_terraform - ``` -4. Run the testing container in interactive mode: - - ``` - make docker_run - ``` - - The module root directory will be loaded into the Docker container at `/cft/workdir/`. -5. Run kitchen-terraform to test the infrastructure: - - 1. `kitchen create` creates Terraform state and downloads modules, if applicable. - 2. `kitchen converge` creates the underlying resources. Run `kitchen converge ` to create resources for a specific test case. - 3. Run `kitchen converge` again. This is necessary due to an oddity in how `networkPolicyConfig` is handled by the upstream API. (See [#72](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/issues/72) for details). - 4. `kitchen verify` tests the created infrastructure. Run `kitchen verify ` to run a specific test case. - 5. `kitchen destroy` tears down the underlying resources created by `kitchen converge`. Run `kitchen destroy ` to tear down resources for a specific test case. - -Alternatively, you can simply run `make test_integration_docker` to run all the test steps non-interactively. - -If you wish to parallelize running the test suites, it is also possible to offload the work onto Concourse to run each test suite for you using the command `make test_integration_concourse`. The `.concourse` directory will be created and contain all of the logs from the running test suites. - -When running tests locally, you will need to use your own test project environment. You can configure your environment by setting all of the following variables: - -``` -export COMPUTE_ENGINE_SERVICE_ACCOUNT="" -export PROJECT_ID="" -export REGION="" -export ZONES='[""]' -export SERVICE_ACCOUNT_JSON="$(cat "")" -export CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE="" -export GOOGLE_APPLICATION_CREDENTIALS="" -``` - -#### Test configuration - -Each test-kitchen instance is configured with a `variables.tfvars` file in the test fixture directory, e.g. `test/fixtures/node_pool/terraform.tfvars`. -For convenience, since all of the variables are project-specific, these files have been symlinked to `test/fixtures/shared/terraform.tfvars`. -Similarly, each test fixture has a `variables.tf` to define these variables, and an `outputs.tf` to facilitate providing necessary information for `inspec` to locate and query against created resources. - -Each test-kitchen instance creates a GCP Network and Subnetwork fixture to house resources, and may create any other necessary fixture data as needed. - -### Autogeneration of documentation from .tf files -Run -``` -make generate_docs -``` - -### Linting -The makefile in this project will lint or sometimes just format any shell, -Python, golang, Terraform, or Dockerfiles. The linters will only be run if -the makefile finds files with the appropriate file extension. - -All of the linter checks are in the default make target, so you just have to -run - -``` -make -s -``` - -The -s is for 'silent'. Successful output looks like this - -``` -Running shellcheck -Running flake8 -Running go fmt and go vet -Running terraform validate -Running hadolint on Dockerfiles -Checking for required files -Testing the validity of the header check -.. ----------------------------------------------------------------------- -Ran 2 tests in 0.026s - -OK -Checking file headers -The following lines have trailing whitespace -``` - -The linters -are as follows: -* Shell - shellcheck. Can be found in homebrew -* Python - flake8. Can be installed with 'pip install flake8' -* Golang - gofmt. gofmt comes with the standard golang installation. golang -is a compiled language so there is no standard linter. -* Terraform - terraform has a built-in linter in the 'terraform validate' -command. -* Dockerfiles - hadolint. Can be found in homebrew [upgrading-to-v2.0]: ../../docs/upgrading_to_v2.0.md [upgrading-to-v3.0]: ../../docs/upgrading_to_v3.0.md diff --git a/modules/beta-public-cluster/README.md b/modules/beta-public-cluster/README.md index 1f78c95082..7d59e927bf 100644 --- a/modules/beta-public-cluster/README.md +++ b/modules/beta-public-cluster/README.md @@ -269,141 +269,6 @@ The project has the following folders and files: - /README.MD: This file. - /modules: Private and beta sub modules. -## Templating - -To more cleanly handle cases where desired functionality would require complex duplication of Terraform resources (i.e. [PR 51](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/51)), this repository is largely generated from the [`autogen`](/autogen) directory. - -The root module is generated by running `make generate`. Changes to this repository should be made in the [`autogen`](/autogen) directory where appropriate. - -Note: The correct sequence to update the repo using autogen functionality is to run -`make generate && make generate_docs`. This will create the various Terraform files, and then -generate the Terraform documentation using `terraform-docs`. - -## Testing - -### Requirements -- [bundler](https://github.com/bundler/bundler) -- [gcloud](https://cloud.google.com/sdk/install) -- [terraform-docs](https://github.com/segmentio/terraform-docs/releases) 0.6.0 - -### Autogeneration of documentation from .tf files -Run -``` -make generate_docs -``` - -### Integration test - -Integration tests are run though [test-kitchen](https://github.com/test-kitchen/test-kitchen), [kitchen-terraform](https://github.com/newcontext-oss/kitchen-terraform), and [InSpec](https://github.com/inspec/inspec). - -Six test-kitchen instances are defined: - -- `deploy-service` -- `node-pool` -- `shared-vpc` -- `simple-regional` -- `simple-zonal` -- `stub-domains` - -The test-kitchen instances in `test/fixtures/` wrap identically-named examples in the `examples/` directory. - -#### Setup - -1. Configure the [test fixtures](#test-configuration) -2. Download a Service Account key with the necessary permissions and put it in the module's root directory with the name `credentials.json`. - - Requires the [permissions to run the module](#configure-a-service-account) - - Requires `roles/compute.networkAdmin` to create the test suite's networks - - Requires `roles/resourcemanager.projectIamAdmin` since service account creation is tested -3. Build the Docker container for testing: - - ``` - make docker_build_kitchen_terraform - ``` -4. Run the testing container in interactive mode: - - ``` - make docker_run - ``` - - The module root directory will be loaded into the Docker container at `/cft/workdir/`. -5. Run kitchen-terraform to test the infrastructure: - - 1. `kitchen create` creates Terraform state and downloads modules, if applicable. - 2. `kitchen converge` creates the underlying resources. Run `kitchen converge ` to create resources for a specific test case. - 3. Run `kitchen converge` again. This is necessary due to an oddity in how `networkPolicyConfig` is handled by the upstream API. (See [#72](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/issues/72) for details). - 4. `kitchen verify` tests the created infrastructure. Run `kitchen verify ` to run a specific test case. - 5. `kitchen destroy` tears down the underlying resources created by `kitchen converge`. Run `kitchen destroy ` to tear down resources for a specific test case. - -Alternatively, you can simply run `make test_integration_docker` to run all the test steps non-interactively. - -If you wish to parallelize running the test suites, it is also possible to offload the work onto Concourse to run each test suite for you using the command `make test_integration_concourse`. The `.concourse` directory will be created and contain all of the logs from the running test suites. - -When running tests locally, you will need to use your own test project environment. You can configure your environment by setting all of the following variables: - -``` -export COMPUTE_ENGINE_SERVICE_ACCOUNT="" -export PROJECT_ID="" -export REGION="" -export ZONES='[""]' -export SERVICE_ACCOUNT_JSON="$(cat "")" -export CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE="" -export GOOGLE_APPLICATION_CREDENTIALS="" -``` - -#### Test configuration - -Each test-kitchen instance is configured with a `variables.tfvars` file in the test fixture directory, e.g. `test/fixtures/node_pool/terraform.tfvars`. -For convenience, since all of the variables are project-specific, these files have been symlinked to `test/fixtures/shared/terraform.tfvars`. -Similarly, each test fixture has a `variables.tf` to define these variables, and an `outputs.tf` to facilitate providing necessary information for `inspec` to locate and query against created resources. - -Each test-kitchen instance creates a GCP Network and Subnetwork fixture to house resources, and may create any other necessary fixture data as needed. - -### Autogeneration of documentation from .tf files -Run -``` -make generate_docs -``` - -### Linting -The makefile in this project will lint or sometimes just format any shell, -Python, golang, Terraform, or Dockerfiles. The linters will only be run if -the makefile finds files with the appropriate file extension. - -All of the linter checks are in the default make target, so you just have to -run - -``` -make -s -``` - -The -s is for 'silent'. Successful output looks like this - -``` -Running shellcheck -Running flake8 -Running go fmt and go vet -Running terraform validate -Running hadolint on Dockerfiles -Checking for required files -Testing the validity of the header check -.. ----------------------------------------------------------------------- -Ran 2 tests in 0.026s - -OK -Checking file headers -The following lines have trailing whitespace -``` - -The linters -are as follows: -* Shell - shellcheck. Can be found in homebrew -* Python - flake8. Can be installed with 'pip install flake8' -* Golang - gofmt. gofmt comes with the standard golang installation. golang -is a compiled language so there is no standard linter. -* Terraform - terraform has a built-in linter in the 'terraform validate' -command. -* Dockerfiles - hadolint. Can be found in homebrew [upgrading-to-v2.0]: docs/upgrading_to_v2.0.md [upgrading-to-v3.0]: ../../docs/upgrading_to_v3.0.md diff --git a/modules/private-cluster/README.md b/modules/private-cluster/README.md index c29d58ee93..d823f640fa 100644 --- a/modules/private-cluster/README.md +++ b/modules/private-cluster/README.md @@ -257,141 +257,6 @@ The project has the following folders and files: - /README.MD: This file. - /modules: Private and beta sub modules. -## Templating - -To more cleanly handle cases where desired functionality would require complex duplication of Terraform resources (i.e. [PR 51](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/51)), this repository is largely generated from the [`autogen`](/autogen) directory. - -The root module is generated by running `make generate`. Changes to this repository should be made in the [`autogen`](/autogen) directory where appropriate. - -Note: The correct sequence to update the repo using autogen functionality is to run -`make generate && make generate_docs`. This will create the various Terraform files, and then -generate the Terraform documentation using `terraform-docs`. - -## Testing - -### Requirements -- [bundler](https://github.com/bundler/bundler) -- [gcloud](https://cloud.google.com/sdk/install) -- [terraform-docs](https://github.com/segmentio/terraform-docs/releases) 0.6.0 - -### Autogeneration of documentation from .tf files -Run -``` -make generate_docs -``` - -### Integration test - -Integration tests are run though [test-kitchen](https://github.com/test-kitchen/test-kitchen), [kitchen-terraform](https://github.com/newcontext-oss/kitchen-terraform), and [InSpec](https://github.com/inspec/inspec). - -Six test-kitchen instances are defined: - -- `deploy-service` -- `node-pool` -- `shared-vpc` -- `simple-regional` -- `simple-zonal` -- `stub-domains` - -The test-kitchen instances in `test/fixtures/` wrap identically-named examples in the `examples/` directory. - -#### Setup - -1. Configure the [test fixtures](#test-configuration) -2. Download a Service Account key with the necessary permissions and put it in the module's root directory with the name `credentials.json`. - - Requires the [permissions to run the module](#configure-a-service-account) - - Requires `roles/compute.networkAdmin` to create the test suite's networks - - Requires `roles/resourcemanager.projectIamAdmin` since service account creation is tested -3. Build the Docker container for testing: - - ``` - make docker_build_kitchen_terraform - ``` -4. Run the testing container in interactive mode: - - ``` - make docker_run - ``` - - The module root directory will be loaded into the Docker container at `/cft/workdir/`. -5. Run kitchen-terraform to test the infrastructure: - - 1. `kitchen create` creates Terraform state and downloads modules, if applicable. - 2. `kitchen converge` creates the underlying resources. Run `kitchen converge ` to create resources for a specific test case. - 3. Run `kitchen converge` again. This is necessary due to an oddity in how `networkPolicyConfig` is handled by the upstream API. (See [#72](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/issues/72) for details). - 4. `kitchen verify` tests the created infrastructure. Run `kitchen verify ` to run a specific test case. - 5. `kitchen destroy` tears down the underlying resources created by `kitchen converge`. Run `kitchen destroy ` to tear down resources for a specific test case. - -Alternatively, you can simply run `make test_integration_docker` to run all the test steps non-interactively. - -If you wish to parallelize running the test suites, it is also possible to offload the work onto Concourse to run each test suite for you using the command `make test_integration_concourse`. The `.concourse` directory will be created and contain all of the logs from the running test suites. - -When running tests locally, you will need to use your own test project environment. You can configure your environment by setting all of the following variables: - -``` -export COMPUTE_ENGINE_SERVICE_ACCOUNT="" -export PROJECT_ID="" -export REGION="" -export ZONES='[""]' -export SERVICE_ACCOUNT_JSON="$(cat "")" -export CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE="" -export GOOGLE_APPLICATION_CREDENTIALS="" -``` - -#### Test configuration - -Each test-kitchen instance is configured with a `variables.tfvars` file in the test fixture directory, e.g. `test/fixtures/node_pool/terraform.tfvars`. -For convenience, since all of the variables are project-specific, these files have been symlinked to `test/fixtures/shared/terraform.tfvars`. -Similarly, each test fixture has a `variables.tf` to define these variables, and an `outputs.tf` to facilitate providing necessary information for `inspec` to locate and query against created resources. - -Each test-kitchen instance creates a GCP Network and Subnetwork fixture to house resources, and may create any other necessary fixture data as needed. - -### Autogeneration of documentation from .tf files -Run -``` -make generate_docs -``` - -### Linting -The makefile in this project will lint or sometimes just format any shell, -Python, golang, Terraform, or Dockerfiles. The linters will only be run if -the makefile finds files with the appropriate file extension. - -All of the linter checks are in the default make target, so you just have to -run - -``` -make -s -``` - -The -s is for 'silent'. Successful output looks like this - -``` -Running shellcheck -Running flake8 -Running go fmt and go vet -Running terraform validate -Running hadolint on Dockerfiles -Checking for required files -Testing the validity of the header check -.. ----------------------------------------------------------------------- -Ran 2 tests in 0.026s - -OK -Checking file headers -The following lines have trailing whitespace -``` - -The linters -are as follows: -* Shell - shellcheck. Can be found in homebrew -* Python - flake8. Can be installed with 'pip install flake8' -* Golang - gofmt. gofmt comes with the standard golang installation. golang -is a compiled language so there is no standard linter. -* Terraform - terraform has a built-in linter in the 'terraform validate' -command. -* Dockerfiles - hadolint. Can be found in homebrew [upgrading-to-v2.0]: ../../docs/upgrading_to_v2.0.md [upgrading-to-v3.0]: ../../docs/upgrading_to_v3.0.md diff --git a/test/.gitignore b/test/.gitignore new file mode 100644 index 0000000000..d69ba0d42f --- /dev/null +++ b/test/.gitignore @@ -0,0 +1 @@ +source.sh diff --git a/test/boilerplate/boilerplate.Dockerfile.txt b/test/boilerplate/boilerplate.Dockerfile.txt deleted file mode 100644 index b0c7da3d77..0000000000 --- a/test/boilerplate/boilerplate.Dockerfile.txt +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/test/boilerplate/boilerplate.Makefile.txt b/test/boilerplate/boilerplate.Makefile.txt deleted file mode 100644 index b0c7da3d77..0000000000 --- a/test/boilerplate/boilerplate.Makefile.txt +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/test/boilerplate/boilerplate.go.txt b/test/boilerplate/boilerplate.go.txt deleted file mode 100644 index 557e16f064..0000000000 --- a/test/boilerplate/boilerplate.go.txt +++ /dev/null @@ -1,15 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ diff --git a/test/boilerplate/boilerplate.py.txt b/test/boilerplate/boilerplate.py.txt deleted file mode 100644 index b0c7da3d77..0000000000 --- a/test/boilerplate/boilerplate.py.txt +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/test/boilerplate/boilerplate.sh.txt b/test/boilerplate/boilerplate.sh.txt deleted file mode 100644 index 2e94f3e551..0000000000 --- a/test/boilerplate/boilerplate.sh.txt +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/test/boilerplate/boilerplate.xml.txt b/test/boilerplate/boilerplate.xml.txt deleted file mode 100644 index 3d98cdc6e5..0000000000 --- a/test/boilerplate/boilerplate.xml.txt +++ /dev/null @@ -1,15 +0,0 @@ - diff --git a/test/boilerplate/boilerplate.yaml.txt b/test/boilerplate/boilerplate.yaml.txt deleted file mode 100644 index b0c7da3d77..0000000000 --- a/test/boilerplate/boilerplate.yaml.txt +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/test/ci/workload-metadata-config.yml b/test/ci/workload-metadata-config.yml index 23874671db..231c8dfc3a 100644 --- a/test/ci/workload-metadata-config.yml +++ b/test/ci/workload-metadata-config.yml @@ -15,4 +15,5 @@ params: SUITE: "workload-metadata-config-local" COMPUTE_ENGINE_SERVICE_ACCOUNT: "" REGION: "us-east4" - ZONES: '["us-east4-a", "us-east4-b", "us-east4-c"]' \ No newline at end of file + ZONES: '["us-east4-a", "us-east4-b", "us-east4-c"]' + diff --git a/test/ci_integration.sh b/test/ci_integration.sh deleted file mode 100755 index 365ed3862e..0000000000 --- a/test/ci_integration.sh +++ /dev/null @@ -1,70 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Always clean up. -DELETE_AT_EXIT="$(mktemp -d)" -finish() { - echo 'BEGIN: finish() trap handler' >&2 - kitchen destroy "$SUITE" - [[ -d "${DELETE_AT_EXIT}" ]] && rm -rf "${DELETE_AT_EXIT}" - echo 'END: finish() trap handler' >&2 -} - -# Map the input parameters provided by Concourse CI, or whatever mechanism is -# running the tests to Terraform input variables. Also setup credentials for -# use with kitchen-terraform, inspec, and gcloud. -setup_environment() { - local tmpfile - tmpfile="$(mktemp)" - echo "${SERVICE_ACCOUNT_JSON}" > "${tmpfile}" - - echo "${SERVICE_ACCOUNT_JSON}" > "test/fixtures/shared/credentials.json" - - # gcloud variables - export CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE="${tmpfile}" - # Application default credentials (Terraform google provider and inspec-gcp) - export GOOGLE_APPLICATION_CREDENTIALS="${tmpfile}" - - # Terraform variables - export TF_VAR_project_id="$PROJECT_ID" - export TF_VAR_credentials_path_relative="../shared/credentials.json" - export TF_VAR_region="$REGION" - export TF_VAR_zones="$ZONES" - export TF_VAR_compute_engine_service_account="$COMPUTE_ENGINE_SERVICE_ACCOUNT" -} - -main() { - export SUITE="${SUITE:-}" - - set -eu - # Setup trap handler to auto-cleanup - export TMPDIR="${DELETE_AT_EXIT}" - trap finish EXIT - - # Setup environment variables - setup_environment - set -x - - # Execute the test lifecycle - kitchen create "$SUITE" - kitchen converge "$SUITE" - kitchen verify "$SUITE" -} - -# if script is being executed and not sourced. -if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then - main "$@" -fi diff --git a/test/fixtures/deploy_service/terraform.tfvars b/test/fixtures/deploy_service/terraform.tfvars deleted file mode 120000 index 08ac6f4724..0000000000 --- a/test/fixtures/deploy_service/terraform.tfvars +++ /dev/null @@ -1 +0,0 @@ -../shared/terraform.tfvars \ No newline at end of file diff --git a/test/fixtures/disable_client_cert/terraform.tfvars b/test/fixtures/disable_client_cert/terraform.tfvars deleted file mode 120000 index 08ac6f4724..0000000000 --- a/test/fixtures/disable_client_cert/terraform.tfvars +++ /dev/null @@ -1 +0,0 @@ -../shared/terraform.tfvars \ No newline at end of file diff --git a/test/fixtures/node_pool/terraform.tfvars b/test/fixtures/node_pool/terraform.tfvars deleted file mode 120000 index 08ac6f4724..0000000000 --- a/test/fixtures/node_pool/terraform.tfvars +++ /dev/null @@ -1 +0,0 @@ -../shared/terraform.tfvars \ No newline at end of file diff --git a/test/fixtures/shared/terraform.tfvars b/test/fixtures/shared/terraform.tfvars deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/fixtures/shared/terraform.tfvars.sample b/test/fixtures/shared/terraform.tfvars.sample deleted file mode 100644 index 3110e9b3d5..0000000000 --- a/test/fixtures/shared/terraform.tfvars.sample +++ /dev/null @@ -1,4 +0,0 @@ -project_id="" -region="us-east4" -zones=["us-east4-a","us-east4-b","us-east4-c"] -compute_engine_service_account="" diff --git a/test/fixtures/shared/variables.tf b/test/fixtures/shared/variables.tf index f8e3d6dfa4..76280e0065 100644 --- a/test/fixtures/shared/variables.tf +++ b/test/fixtures/shared/variables.tf @@ -20,12 +20,13 @@ variable "project_id" { variable "region" { description = "The GCP region to create and test resources in" + default = "us-east4" } variable "zones" { type = list(string) description = "The GCP zones to create and test resources in, for applicable tests" - default = [] + default = ["us-east4-a", "us-east4-b", "us-east4-c"] } variable "compute_engine_service_account" { diff --git a/test/fixtures/shared_vpc/terraform.tfvars b/test/fixtures/shared_vpc/terraform.tfvars deleted file mode 120000 index 08ac6f4724..0000000000 --- a/test/fixtures/shared_vpc/terraform.tfvars +++ /dev/null @@ -1 +0,0 @@ -../shared/terraform.tfvars \ No newline at end of file diff --git a/test/fixtures/simple_regional/terraform.tfvars b/test/fixtures/simple_regional/terraform.tfvars deleted file mode 120000 index 08ac6f4724..0000000000 --- a/test/fixtures/simple_regional/terraform.tfvars +++ /dev/null @@ -1 +0,0 @@ -../shared/terraform.tfvars \ No newline at end of file diff --git a/test/fixtures/simple_regional_private/terraform.tfvars b/test/fixtures/simple_regional_private/terraform.tfvars deleted file mode 120000 index 08ac6f4724..0000000000 --- a/test/fixtures/simple_regional_private/terraform.tfvars +++ /dev/null @@ -1 +0,0 @@ -../shared/terraform.tfvars \ No newline at end of file diff --git a/test/fixtures/simple_zonal/terraform.tfvars b/test/fixtures/simple_zonal/terraform.tfvars deleted file mode 120000 index 08ac6f4724..0000000000 --- a/test/fixtures/simple_zonal/terraform.tfvars +++ /dev/null @@ -1 +0,0 @@ -../shared/terraform.tfvars \ No newline at end of file diff --git a/test/fixtures/simple_zonal_private/terraform.tfvars b/test/fixtures/simple_zonal_private/terraform.tfvars deleted file mode 120000 index 08ac6f4724..0000000000 --- a/test/fixtures/simple_zonal_private/terraform.tfvars +++ /dev/null @@ -1 +0,0 @@ -../shared/terraform.tfvars \ No newline at end of file diff --git a/test/fixtures/stub_domains/terraform.tfvars b/test/fixtures/stub_domains/terraform.tfvars deleted file mode 120000 index 08ac6f4724..0000000000 --- a/test/fixtures/stub_domains/terraform.tfvars +++ /dev/null @@ -1 +0,0 @@ -../shared/terraform.tfvars \ No newline at end of file diff --git a/test/fixtures/stub_domains_private/terraform.tfvars b/test/fixtures/stub_domains_private/terraform.tfvars deleted file mode 120000 index 08ac6f4724..0000000000 --- a/test/fixtures/stub_domains_private/terraform.tfvars +++ /dev/null @@ -1 +0,0 @@ -../shared/terraform.tfvars \ No newline at end of file diff --git a/test/fixtures/stub_domains_upstream_nameservers/terraform.tfvars b/test/fixtures/stub_domains_upstream_nameservers/terraform.tfvars deleted file mode 120000 index 08ac6f4724..0000000000 --- a/test/fixtures/stub_domains_upstream_nameservers/terraform.tfvars +++ /dev/null @@ -1 +0,0 @@ -../shared/terraform.tfvars \ No newline at end of file diff --git a/test/fixtures/upstream_nameservers/terraform.tfvars b/test/fixtures/upstream_nameservers/terraform.tfvars deleted file mode 120000 index 08ac6f4724..0000000000 --- a/test/fixtures/upstream_nameservers/terraform.tfvars +++ /dev/null @@ -1 +0,0 @@ -../shared/terraform.tfvars \ No newline at end of file diff --git a/test/fixtures/workload_metadata_config/terraform.tfvars b/test/fixtures/workload_metadata_config/terraform.tfvars deleted file mode 120000 index 08ac6f4724..0000000000 --- a/test/fixtures/workload_metadata_config/terraform.tfvars +++ /dev/null @@ -1 +0,0 @@ -../shared/terraform.tfvars \ No newline at end of file diff --git a/test/integration/deploy_service/controls/gcloud.rb b/test/integration/deploy_service/controls/gcloud.rb index 2f8cfb2a38..fd72b9180b 100644 --- a/test/integration/deploy_service/controls/gcloud.rb +++ b/test/integration/deploy_service/controls/gcloud.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/integration/deploy_service/controls/kubectl.rb b/test/integration/deploy_service/controls/kubectl.rb index 1443f94057..2d4a473d2c 100644 --- a/test/integration/deploy_service/controls/kubectl.rb +++ b/test/integration/deploy_service/controls/kubectl.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/integration/disable_client_cert/controls/gcloud.rb b/test/integration/disable_client_cert/controls/gcloud.rb index c4739ffdaa..91d0c9df87 100644 --- a/test/integration/disable_client_cert/controls/gcloud.rb +++ b/test/integration/disable_client_cert/controls/gcloud.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/integration/node_pool/controls/gcloud.rb b/test/integration/node_pool/controls/gcloud.rb index a9696c211a..6ff5fdd201 100644 --- a/test/integration/node_pool/controls/gcloud.rb +++ b/test/integration/node_pool/controls/gcloud.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/integration/node_pool/controls/kubectl.rb b/test/integration/node_pool/controls/kubectl.rb index fb11abad17..471f9cb33f 100644 --- a/test/integration/node_pool/controls/kubectl.rb +++ b/test/integration/node_pool/controls/kubectl.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/integration/shared_vpc/controls/gcloud.rb b/test/integration/shared_vpc/controls/gcloud.rb index 2f8cfb2a38..fd72b9180b 100644 --- a/test/integration/shared_vpc/controls/gcloud.rb +++ b/test/integration/shared_vpc/controls/gcloud.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/integration/simple_regional/controls/gcloud.rb b/test/integration/simple_regional/controls/gcloud.rb index e3fba671b3..e6bbcfc047 100644 --- a/test/integration/simple_regional/controls/gcloud.rb +++ b/test/integration/simple_regional/controls/gcloud.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/integration/simple_regional_private/controls/gcloud.rb b/test/integration/simple_regional_private/controls/gcloud.rb index f4df827813..b15dafcd02 100644 --- a/test/integration/simple_regional_private/controls/gcloud.rb +++ b/test/integration/simple_regional_private/controls/gcloud.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/integration/simple_zonal/controls/gcloud.rb b/test/integration/simple_zonal/controls/gcloud.rb index cab5f8e4fd..c2e72936b0 100644 --- a/test/integration/simple_zonal/controls/gcloud.rb +++ b/test/integration/simple_zonal/controls/gcloud.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/integration/simple_zonal/controls/gcp.rb b/test/integration/simple_zonal/controls/gcp.rb index 8e4cf6f96c..6e9ade64ff 100644 --- a/test/integration/simple_zonal/controls/gcp.rb +++ b/test/integration/simple_zonal/controls/gcp.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/integration/simple_zonal/inspec.yml b/test/integration/simple_zonal/inspec.yml index 028e773638..5cb8ff9e01 100644 --- a/test/integration/simple_zonal/inspec.yml +++ b/test/integration/simple_zonal/inspec.yml @@ -27,4 +27,5 @@ attributes: type: string - name: service_account required: true - type: string \ No newline at end of file + type: string + diff --git a/test/integration/simple_zonal_private/controls/gcloud.rb b/test/integration/simple_zonal_private/controls/gcloud.rb index 2f808e136c..9968affcb6 100644 --- a/test/integration/simple_zonal_private/controls/gcloud.rb +++ b/test/integration/simple_zonal_private/controls/gcloud.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/integration/stub_domains/controls/gcloud.rb b/test/integration/stub_domains/controls/gcloud.rb index 03612e151e..48072bb119 100644 --- a/test/integration/stub_domains/controls/gcloud.rb +++ b/test/integration/stub_domains/controls/gcloud.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/integration/stub_domains/controls/kubectl.rb b/test/integration/stub_domains/controls/kubectl.rb index 1fa048e98d..1e53883a2d 100644 --- a/test/integration/stub_domains/controls/kubectl.rb +++ b/test/integration/stub_domains/controls/kubectl.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/integration/stub_domains_private/controls/gcloud.rb b/test/integration/stub_domains_private/controls/gcloud.rb index 3356196754..f16ee7b401 100644 --- a/test/integration/stub_domains_private/controls/gcloud.rb +++ b/test/integration/stub_domains_private/controls/gcloud.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/integration/stub_domains_private/controls/kubectl.rb b/test/integration/stub_domains_private/controls/kubectl.rb index e9a1bd7412..17502685d8 100644 --- a/test/integration/stub_domains_private/controls/kubectl.rb +++ b/test/integration/stub_domains_private/controls/kubectl.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/integration/stub_domains_upstream_nameservers/controls/gcloud.rb b/test/integration/stub_domains_upstream_nameservers/controls/gcloud.rb index 03612e151e..48072bb119 100644 --- a/test/integration/stub_domains_upstream_nameservers/controls/gcloud.rb +++ b/test/integration/stub_domains_upstream_nameservers/controls/gcloud.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/integration/stub_domains_upstream_nameservers/controls/kubectl.rb b/test/integration/stub_domains_upstream_nameservers/controls/kubectl.rb index 5223cbd2d4..8e8dfe086c 100644 --- a/test/integration/stub_domains_upstream_nameservers/controls/kubectl.rb +++ b/test/integration/stub_domains_upstream_nameservers/controls/kubectl.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/integration/upstream_nameservers/controls/gcloud.rb b/test/integration/upstream_nameservers/controls/gcloud.rb index 03612e151e..48072bb119 100644 --- a/test/integration/upstream_nameservers/controls/gcloud.rb +++ b/test/integration/upstream_nameservers/controls/gcloud.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/integration/upstream_nameservers/controls/kubectl.rb b/test/integration/upstream_nameservers/controls/kubectl.rb index 36612a02aa..21ec09c326 100644 --- a/test/integration/upstream_nameservers/controls/kubectl.rb +++ b/test/integration/upstream_nameservers/controls/kubectl.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/integration/workload_metadata_config/controls/gcloud.rb b/test/integration/workload_metadata_config/controls/gcloud.rb index ea9c3627ce..e62606c78c 100644 --- a/test/integration/workload_metadata_config/controls/gcloud.rb +++ b/test/integration/workload_metadata_config/controls/gcloud.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/make.sh b/test/make.sh deleted file mode 100755 index ec1cd6b01d..0000000000 --- a/test/make.sh +++ /dev/null @@ -1,225 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Please note that this file was generated from [terraform-google-module-template](https://github.com/terraform-google-modules/terraform-google-module-template). -# Please make sure to contribute relevant changes upstream! - -# Create a temporary directory that's auto-cleaned, even if the process aborts. -DELETE_AT_EXIT="$(mktemp -d)" -finish() { - [[ -d "${DELETE_AT_EXIT}" ]] && rm -rf "${DELETE_AT_EXIT}" -} -trap finish EXIT -# Create a temporary file in the auto-cleaned up directory while avoiding -# overwriting TMPDIR for other processes. -# shellcheck disable=SC2120 -# (Arguments may be passed, e.g. maketemp -d) -maketemp() { - TMPDIR="${DELETE_AT_EXIT}" mktemp "$@" -} - -# find_files is a helper to exclude .git directories and match only regular -# files to avoid double-processing symlinks. -find_files() { - local pth="$1" - shift - find "${pth}" '(' \ - -path '*/.git' -o \ - -path '*/.terraform' -o \ - -path '*/.kitchen' -o \ - -path './autogen' -o \ - -path './test/fixtures/all_examples' -o \ - -path './test/fixtures/shared' ')' \ - -prune -o -type f "$@" -} - -# Compatibility with both GNU and BSD style xargs. -compat_xargs() { - local compat=() rval - # Test if xargs is GNU or BSD style. GNU xargs will succeed with status 0 - # when given --no-run-if-empty and no input on STDIN. BSD xargs will fail and - # exit status non-zero If xargs fails, assume it is BSD style and proceed. - # stderr is silently redirected to avoid console log spam. - if xargs --no-run-if-empty /dev/null; then - compat=("--no-run-if-empty") - fi - xargs "${compat[@]}" "$@" - rval="$?" - if [[ -z "${NOWARN:-}" ]] && [[ "${rval}" -gt 0 ]]; then - echo "Warning: compat_xargs $* failed with exit code ${rval}" >&2 - fi - return "${rval}" -} - -# This function makes sure that the required files for -# releasing to OSS are present -function basefiles() { - local fn required_files="LICENSE README.md" - echo "Checking for required files ${required_files}" - for fn in ${required_files}; do - test -f "${fn}" || echo "Missing required file ${fn}" - done -} - -# This function runs 'terraform validate' and 'terraform fmt' -# against all directory paths which contain *.tf files. -function check_terraform() { - local rval=125 - # fmt is before validate for faster feedback, validate requires terraform - # init which takes time. - echo "Running terraform fmt" - find_files . -name "*.tf" -print0 \ - | compat_xargs -0 -n1 dirname \ - | sort -u \ - | compat_xargs -t -n1 terraform fmt -diff -check=true -write=false - rval="$?" - if [[ "${rval}" -gt 0 ]]; then - echo "Error: terraform fmt failed with exit code ${rval}" >&2 - echo "Check the output for diffs and correct using terraform fmt " >&2 - return "${rval}" - fi - echo "Running terraform validate" - find_files . -not -path "./test/fixtures/shared/*" -name "*.tf" -print0 \ - | compat_xargs -0 -n1 dirname \ - | sort -u \ - | compat_xargs -t -n1 helpers/terraform_validate -} - -# This function runs 'go fmt' and 'go vet' on every file -# that ends in '.go' -function golang() { - echo "Running go fmt and go vet" - find_files . -name "*.go" -print0 | compat_xargs -0 -n1 go fmt - find_files . -name "*.go" -print0 | compat_xargs -0 -n1 go vet -} - -# This function runs the flake8 linter on every file -# ending in '.py' -function check_python() { - echo "Running flake8" - find_files . -name "*.py" -print0 | compat_xargs -0 flake8 - return 0 -} - -# This function runs the shellcheck linter on every -# file ending in '.sh' -function check_shell() { - echo "Running shellcheck" - find_files . -name "*.sh" -print0 | compat_xargs -0 shellcheck -x -} - -# This function makes sure that there is no trailing whitespace -# in any files in the project. -# There are some exclusions -function check_trailing_whitespace() { - local rc - echo "Checking for trailing whitespace" - find_files . -print \ - | grep -v -E '\.(pyc|png)$' \ - | NOWARN=1 compat_xargs grep -H -n '[[:blank:]]$' - rc=$? - if [[ ${rc} -eq 0 ]]; then - return 1 - fi -} - -function generate() { - pip3 install --user -r ./helpers/generate_modules/requirements.txt - ./helpers/generate_modules/generate_modules.py -} - -function generate_docs() { - echo "Generating markdown docs with terraform-docs" - local pth helper_dir rval - helper_dir="$(pwd)/helpers" - while read -r pth; do - if [[ -e "${pth}/README.md" ]]; then - (cd "${pth}" || return 3; "${helper_dir}"/terraform_docs .;) - rval="$?" - if [[ "${rval}" -gt 0 ]]; then - echo "Error: terraform_docs in ${pth} exit code: ${rval}" >&2 - return "${rval}" - fi - else - echo "Skipping ${pth} because README.md does not exist." - fi - done < <(find_files . -name '*.tf' -print0 \ - | compat_xargs -0 -n1 dirname \ - | sort -u) -} - -function check_generate() { - TMPDIR=$(mktemp -d) - git worktree add --detach "$TMPDIR" >/dev/null - cd "$TMPDIR" || exit 1 - - generate >/dev/null - generate_docs >/dev/null - - git diff --stat --exit-code >/dev/null - rc=$? - cd - >/dev/null || exit 1 - - if [[ $rc -ne 0 ]]; then - echo '"make generate" creates a diff, run "make generate" and commit the results' - fi - rm -rf "$TMPDIR" - git worktree prune >/dev/null - - echo "Code was generated properly" - - exit $rc -} - -function check_generate_docs() { - TMPDIR=$(mktemp -d) - git worktree add --detach "$TMPDIR" >/dev/null - cd "$TMPDIR" || exit 1 - - generate_docs >/dev/null - git diff --stat --exit-code >/dev/null - rc=$? - cd - >/dev/null || exit 1 - - if [[ $rc -ne 0 ]]; then - echo '"make generate_docs" creates a diff, run "make generate_docs" and commit the results' - fi - rm -rf "$TMPDIR" - git worktree prune >/dev/null - - echo "Docs were generated properly" - - exit $rc -} - -function prepare_test_variables() { - echo "Preparing terraform.tfvars files for integration tests" - #shellcheck disable=2044 - for i in $(find ./test/fixtures -type f -name terraform.tfvars.sample); do - destination=${i/%.sample/} - if [ ! -f "${destination}" ]; then - cp "${i}" "${destination}" - echo "${destination} has been created. Please edit it to reflect your GCP configuration." - fi - done -} - -function check_headers() { - echo "Checking file headers" - # Use the exclusion behavior of find_files - find_files . -type f -print0 \ - | compat_xargs -0 python test/verify_boilerplate.py -} diff --git a/test/setup/.gitignore b/test/setup/.gitignore new file mode 100644 index 0000000000..0e515f83d2 --- /dev/null +++ b/test/setup/.gitignore @@ -0,0 +1,2 @@ +terraform.tfvars +source.sh diff --git a/test/setup/iam.tf b/test/setup/iam.tf new file mode 100644 index 0000000000..29facd32a9 --- /dev/null +++ b/test/setup/iam.tf @@ -0,0 +1,58 @@ +/** + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +locals { + int_required_roles = [ + "roles/cloudkms.cryptoKeyEncrypterDecrypter", + "roles/compute.networkAdmin", + "roles/container.clusterAdmin", + "roles/container.developer", + "roles/iam.serviceAccountAdmin", + "roles/iam.serviceAccountUser", + "roles/compute.networkAdmin", + "roles/compute.viewer", + "roles/resourcemanager.projectIamAdmin" + ] +} + + +resource "random_id" "random_suffix" { + byte_length = 2 +} + +resource "google_service_account" "int_test" { + project = module.gke-project.project_id + account_id = "gke-int-test-${random_id.random_suffix.hex}" + display_name = "gke-int-test" +} + +resource "google_service_account" "gke_sa" { + project = module.gke-project.project_id + account_id = "gke-sa-int-test-${random_id.random_suffix.hex}" + display_name = "gke-sa-int-test" +} + +resource "google_project_iam_member" "int_test" { + count = length(local.int_required_roles) + + project = module.gke-project.project_id + role = local.int_required_roles[count.index] + member = "serviceAccount:${google_service_account.int_test.email}" +} + +resource "google_service_account_key" "int_test" { + service_account_id = google_service_account.int_test.id +} diff --git a/test/setup/main.tf b/test/setup/main.tf new file mode 100644 index 0000000000..f974c7408e --- /dev/null +++ b/test/setup/main.tf @@ -0,0 +1,41 @@ +/** + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +module "gke-project" { + source = "terraform-google-modules/project-factory/google" + version = "~> 3.0" + + name = "ci-gke" + random_project_id = true + org_id = var.org_id + folder_id = var.folder_id + billing_account = var.billing_account + + activate_apis = [ + "bigquery-json.googleapis.com", + "cloudkms.googleapis.com", + "cloudresourcemanager.googleapis.com", + "compute.googleapis.com", + "container.googleapis.com", + "containerregistry.googleapis.com", + "iam.googleapis.com", + "iamcredentials.googleapis.com", + "oslogin.googleapis.com", + "pubsub.googleapis.com", + "serviceusage.googleapis.com", + "storage-api.googleapis.com", + ] +} diff --git a/Gemfile b/test/setup/make_source.sh old mode 100644 new mode 100755 similarity index 50% rename from Gemfile rename to test/setup/make_source.sh index a54d14ec29..b39944af41 --- a/Gemfile +++ b/test/setup/make_source.sh @@ -1,4 +1,6 @@ -# Copyright 2018 Google LLC +#!/usr/bin/env bash + +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,10 +14,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -ruby "~> 2.5" +echo "#!/usr/bin/env bash" > ../source.sh + +project_id=$(terraform output project_id) +echo "export TF_VAR_project_id='$project_id'" >> ../source.sh + +sa_json=$(terraform output sa_key) +# shellcheck disable=SC2086 +echo "export SERVICE_ACCOUNT_JSON='$(echo $sa_json | base64 --decode)'" >> ../source.sh -source 'https://rubygems.org/' do - gem "kitchen-terraform", "~> 4.9" - gem "kubeclient", "~> 4.0" - gem "rest-client", "~> 2.0" -end +compute_engine_service_account=$(terraform output compute_engine_service_account) +echo "export TF_VAR_compute_engine_service_account='$compute_engine_service_account'" >> ../source.sh diff --git a/test/setup/outputs.tf b/test/setup/outputs.tf new file mode 100644 index 0000000000..3e508ed1c7 --- /dev/null +++ b/test/setup/outputs.tf @@ -0,0 +1,28 @@ +/** + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +output "project_id" { + value = module.gke-project.project_id +} + +output "sa_key" { + value = google_service_account_key.int_test.private_key + sensitive = true +} + +output "compute_engine_service_account" { + value = google_service_account.gke_sa.email +} diff --git a/test/setup/variables.tf b/test/setup/variables.tf new file mode 100644 index 0000000000..6d80b89896 --- /dev/null +++ b/test/setup/variables.tf @@ -0,0 +1,26 @@ +/** + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +variable "org_id" { + description = "The numeric organization id" +} + +variable "folder_id" { + description = "The folder to deploy in" +} + +variable "billing_account" { + description = "The billing account id associated with the project, e.g. XXXXXX-YYYYYY-ZZZZZZ" +} diff --git a/test/boilerplate/boilerplate.tf.txt b/test/setup/versions.tf similarity index 76% rename from test/boilerplate/boilerplate.tf.txt rename to test/setup/versions.tf index cfccff84ca..efbd8ea517 100644 --- a/test/boilerplate/boilerplate.tf.txt +++ b/test/setup/versions.tf @@ -1,5 +1,5 @@ /** - * Copyright 2018 Google LLC + * Copyright 2019 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,3 +13,15 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + +terraform { + required_version = ">= 0.12" +} + +provider "google" { + version = "~> 2.13.0" +} + +provider "google-beta" { + version = "~> 2.13.0" +} diff --git a/test/task_helper_functions.sh b/test/task_helper_functions.sh new file mode 100755 index 0000000000..70ab3db5c8 --- /dev/null +++ b/test/task_helper_functions.sh @@ -0,0 +1,64 @@ +#!/usr/bin/env bash + +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +function generate() { + pip3 install --user -r /workspace/helpers/generate_modules/requirements.txt + /workspace/helpers/generate_modules/generate_modules.py +} + +# Changed from using git-diff, to aviod errors on CI: +# fatal: not a git repository (or any parent up to mount point /) +function check_generate() { + local tempdir rval rc + setup_trap_handler + tempdir=$(mktemp -d) + rval=0 + echo "Checking submodule's files generation" + rsync -axh \ + --exclude '*/.terraform' \ + --exclude '*/.kitchen' \ + --exclude '*/.git' \ + /workspace "${tempdir}" >/dev/null 2>/dev/null + cd "${tempdir}" || exit 1 + generate >/dev/null 2>/dev/null + diff -r \ + --exclude=".terraform" \ + --exclude=".kitchen" \ + --exclude=".git" \ + /workspace "${tempdir}/workspace" + rc=$? + if [[ "${rc}" -ne 0 ]]; then + echo "Error: submodule's files generation has not been run, please run the" + echo "'source /workspace/helpers/generate.sh && generate' commands and commit the above changes." + ((rval++)) + fi + cd /workspace || exit 1 + rm -Rf "${tempdir}" + return $((rval)) +} + +find_files() { + local pth="$1" + shift + find "${pth}" '(' \ + -path '*/.git' -o \ + -path '*/.terraform' -o \ + -path '*/.kitchen' -o \ + -path './autogen' -o \ + -path './test/fixtures/all_examples' -o \ + -path './test/fixtures/shared' ')' \ + -prune -o -type f "$@" +} diff --git a/test/test_verify_boilerplate.py b/test/test_verify_boilerplate.py deleted file mode 100755 index 22a3cca055..0000000000 --- a/test/test_verify_boilerplate.py +++ /dev/null @@ -1,136 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -''' A simple test for the verify_boilerplate python script. -This will create a set of test files, both valid and invalid, -and confirm that the has_valid_header call returns the correct -value. - -It also checks the number of files that are found by the -get_files call. -''' -from copy import deepcopy -from tempfile import mkdtemp -from shutil import rmtree -import unittest -from verify_boilerplate import has_valid_header, get_refs, get_regexs, \ - get_args, get_files - - -class AllTestCase(unittest.TestCase): - """ - All of the setup, teardown, and tests are contained in this - class. - """ - - def write_file(self, filename, content, expected): - """ - A utility method that creates test files, and adds them to - the cases that will be tested. - - Args: - filename: (string) the file name (path) to be created. - content: (list of strings) the contents of the file. - expected: (boolean) True if the header is expected to be valid, - false if not. - """ - - file = open(filename, 'w+') - for line in content: - file.write(line + "\n") - file.close() - self.cases[filename] = expected - - def create_test_files(self, tmp_path, extension, header): - """ - Creates 2 test files for .tf, .xml, .go, etc and one for - Dockerfile, and Makefile. - - The reason for the difference is that Makefile and Dockerfile - don't have an extension. These would be substantially more - difficult to create negative test cases, unless the files - were written, deleted, and re-written. - - Args: - tmp_path: (string) the path in which to create the files - extension: (string) the file extension - header: (list of strings) the header/boilerplate content - """ - - content = "\n...blah \ncould be code or could be garbage\n" - special_cases = ["Dockerfile", "Makefile"] - header_template = deepcopy(header) - valid_filename = tmp_path + extension - valid_content = header_template.append(content) - if extension not in special_cases: - # Invalid test cases for non-*file files (.tf|.py|.sh|.yaml|.xml..) - invalid_header = [] - for line in header_template: - if "2018" in line: - invalid_header.append(line.replace('2018', 'YEAR')) - else: - invalid_header.append(line) - invalid_header.append(content) - invalid_content = invalid_header - invalid_filename = tmp_path + "invalid." + extension - self.write_file(invalid_filename, invalid_content, False) - valid_filename = tmp_path + "testfile." + extension - - valid_content = header_template - self.write_file(valid_filename, valid_content, True) - - def setUp(self): - """ - Set initial counts and values, and initializes the setup of the - test files. - """ - self.cases = {} - self.tmp_path = mkdtemp() + "/" - self.my_args = get_args() - self.my_refs = get_refs(self.my_args) - self.my_regex = get_regexs() - self.prexisting_file_count = len( - get_files(self.my_refs.keys(), self.my_args)) - for key in self.my_refs: - self.create_test_files(self.tmp_path, key, - self.my_refs.get(key)) - - def tearDown(self): - """ Delete the test directory. """ - rmtree(self.tmp_path) - - def test_files_headers(self): - """ - Confirms that the expected output of has_valid_header is correct. - """ - for case in self.cases: - if self.cases[case]: - self.assertTrue(has_valid_header(case, self.my_refs, - self.my_regex)) - else: - self.assertFalse(has_valid_header(case, self.my_refs, - self.my_regex)) - - def test_invalid_count(self): - """ - Test that the initial files found isn't zero, indicating - a problem with the code. - """ - self.assertFalse(self.prexisting_file_count == 0) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/verify_boilerplate.py b/test/verify_boilerplate.py deleted file mode 100644 index a632fdedcc..0000000000 --- a/test/verify_boilerplate.py +++ /dev/null @@ -1,279 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# Verifies that all source files contain the necessary copyright boilerplate -# snippet. -# This is based on existing work -# https://github.com/kubernetes/test-infra/blob/master/hack -# /verify_boilerplate.py -from __future__ import print_function -import argparse -import glob -import os -import re -import sys - - -def get_args(): - """Parses command line arguments. - - Configures and runs argparse.ArgumentParser to extract command line - arguments. - - Returns: - An argparse.Namespace containing the arguments parsed from the - command line - """ - parser = argparse.ArgumentParser() - parser.add_argument("filenames", - help="list of files to check, " - "all files if unspecified", - nargs='*') - rootdir = os.path.dirname(__file__) + "/../" - rootdir = os.path.abspath(rootdir) - parser.add_argument( - "--rootdir", - default=rootdir, - help="root directory to examine") - - default_boilerplate_dir = os.path.join(rootdir, "test/boilerplate") - parser.add_argument("--boilerplate-dir", default=default_boilerplate_dir) - return parser.parse_args() - - -def get_refs(ARGS): - """Converts the directory of boilerplate files into a map keyed by file - extension. - - Reads each boilerplate file's contents into an array, then adds that array - to a map keyed by the file extension. - - Returns: - A map of boilerplate lines, keyed by file extension. For example, - boilerplate.py.txt would result in the k,v pair {".py": py_lines} where - py_lines is an array containing each line of the file. - """ - refs = {} - - # Find and iterate over the absolute path for each boilerplate template - for path in glob.glob(os.path.join( - ARGS.boilerplate_dir, - "boilerplate.*.txt")): - extension = os.path.basename(path).split(".")[1] - ref_file = open(path, 'r') - ref = ref_file.read().splitlines() - ref_file.close() - refs[extension] = ref - return refs - - -# pylint: disable=too-many-locals -def has_valid_header(filename, refs, regexs): - """Test whether a file has the correct boilerplate header. - - Tests each file against the boilerplate stored in refs for that file type - (based on extension), or by the entire filename (eg Dockerfile, Makefile). - Some heuristics are applied to remove build tags and shebangs, but little - variance in header formatting is tolerated. - - Args: - filename: A string containing the name of the file to test - refs: A map of boilerplate headers, keyed by file extension - regexs: a map of compiled regex objects used in verifying boilerplate - - Returns: - True if the file has the correct boilerplate header, otherwise returns - False. - """ - try: - with open(filename, 'r') as fp: # pylint: disable=invalid-name - data = fp.read() - except IOError: - return False - basename = os.path.basename(filename) - extension = get_file_extension(filename) - if extension: - ref = refs[extension] - else: - ref = refs[basename] - # remove build tags from the top of Go files - if extension == "go": - con = regexs["go_build_constraints"] - (data, found) = con.subn("", data, 1) - # remove shebang - elif extension == "sh" or extension == "py": - she = regexs["shebang"] - (data, found) = she.subn("", data, 1) - data = data.splitlines() - # if our test file is smaller than the reference it surely fails! - if len(ref) > len(data): - return False - # trim our file to the same number of lines as the reference file - data = data[:len(ref)] - year = regexs["year"] - for datum in data: - if year.search(datum): - return False - - # if we don't match the reference at this point, fail - if ref != data: - return False - return True - - -def get_file_extension(filename): - """Extracts the extension part of a filename. - - Identifies the extension as everything after the last period in filename. - - Args: - filename: string containing the filename - - Returns: - A string containing the extension in lowercase - """ - return os.path.splitext(filename)[1].split(".")[-1].lower() - - -# These directories will be omitted from header checks -SKIPPED_DIRS = [ - 'Godeps', 'third_party', '_gopath', '_output', - '.git', 'vendor', '__init__.py', 'node_modules' -] - - -def normalize_files(files): - """Extracts the files that require boilerplate checking from the files - argument. - - A new list will be built. Each path from the original files argument will - be added unless it is within one of SKIPPED_DIRS. All relative paths will - be converted to absolute paths by prepending the root_dir path parsed from - the command line, or its default value. - - Args: - files: a list of file path strings - - Returns: - A modified copy of the files list where any any path in a skipped - directory is removed, and all paths have been made absolute. - """ - newfiles = [] - for pathname in files: - if any(x in pathname for x in SKIPPED_DIRS): - continue - newfiles.append(pathname) - for idx, pathname in enumerate(newfiles): - if not os.path.isabs(pathname): - newfiles[idx] = os.path.join(ARGS.rootdir, pathname) - return newfiles - - -def get_files(extensions, ARGS): - """Generates a list of paths whose boilerplate should be verified. - - If a list of file names has been provided on the command line, it will be - treated as the initial set to search. Otherwise, all paths within rootdir - will be discovered and used as the initial set. - - Once the initial set of files is identified, it is normalized via - normalize_files() and further stripped of any file name whose extension is - not in extensions. - - Args: - extensions: a list of file extensions indicating which file types - should have their boilerplate verified - - Returns: - A list of absolute file paths - """ - files = [] - if ARGS.filenames: - files = ARGS.filenames - else: - for root, dirs, walkfiles in os.walk(ARGS.rootdir): - # don't visit certain dirs. This is just a performance improvement - # as we would prune these later in normalize_files(). But doing it - # cuts down the amount of filesystem walking we do and cuts down - # the size of the file list - for dpath in SKIPPED_DIRS: - if dpath in dirs: - dirs.remove(dpath) - for name in walkfiles: - pathname = os.path.join(root, name) - files.append(pathname) - files = normalize_files(files) - outfiles = [] - for pathname in files: - basename = os.path.basename(pathname) - extension = get_file_extension(pathname) - if extension in extensions or basename in extensions: - outfiles.append(pathname) - return outfiles - - -def get_regexs(): - """Builds a map of regular expressions used in boilerplate validation. - - There are two scenarios where these regexes are used. The first is in - validating the date referenced is the boilerplate, by ensuring it is an - acceptable year. The second is in identifying non-boilerplate elements, - like shebangs and compiler hints that should be ignored when validating - headers. - - Returns: - A map of compiled regular expression objects, keyed by mnemonic. - """ - regexs = {} - # Search for "YEAR" which exists in the boilerplate, but shouldn't in the - # real thing - regexs["year"] = re.compile('YEAR') - # dates can be 2014, 2015, 2016 or 2017, company holder names can be - # anything - regexs["date"] = re.compile('(2014|2015|2016|2017|2018)') - # strip // +build \n\n build constraints - regexs["go_build_constraints"] = re.compile(r"^(// \+build.*\n)+\n", - re.MULTILINE) - # strip #!.* from shell/python scripts - regexs["shebang"] = re.compile(r"^(#!.*\n)\n*", re.MULTILINE) - return regexs - - -def main(args): - """Identifies and verifies files that should have the desired boilerplate. - - Retrieves the lists of files to be validated and tests each one in turn. - If all files contain correct boilerplate, this function terminates - normally. Otherwise it prints the name of each non-conforming file and - exists with a non-zero status code. - """ - regexs = get_regexs() - refs = get_refs(args) - filenames = get_files(refs.keys(), args) - nonconforming_files = [] - for filename in filenames: - if not has_valid_header(filename, refs, regexs): - nonconforming_files.append(filename) - if nonconforming_files: - print('%d files have incorrect boilerplate headers:' % len( - nonconforming_files)) - for filename in sorted(nonconforming_files): - print(os.path.relpath(filename, args.rootdir)) - sys.exit(1) - - -if __name__ == "__main__": - ARGS = get_args() - main(ARGS) From 55ade204c6eb74c522273f920939190a820cd831 Mon Sep 17 00:00:00 2001 From: Mirko Montanari Date: Mon, 30 Sep 2019 17:56:10 -0700 Subject: [PATCH 28/82] Add a parameter 'registry_project_id' The PR allows configuring the project holding the GCR registry when used in connection with 'create_service_account'=true and grant_registry_access=true. Holding the GCR is a project with other resources increases the risk of exposing sensitive data to the service account running the nodes, as the required permissions of role roles/storage.objectViewer provide access to all storage objects in the project. --- README.md | 20 ++++++++++ autogen/README.md | 3 ++ autogen/sa.tf | 2 +- autogen/variables.tf | 6 +++ examples/workload_metadata_config/main.tf | 5 ++- .../workload_metadata_config/variables.tf | 5 +-- modules/beta-private-cluster/README.md | 4 ++ modules/beta-private-cluster/sa.tf | 2 +- modules/beta-private-cluster/variables.tf | 6 +++ modules/beta-public-cluster/README.md | 4 ++ modules/beta-public-cluster/sa.tf | 2 +- modules/beta-public-cluster/variables.tf | 6 +++ modules/private-cluster/README.md | 4 ++ modules/private-cluster/sa.tf | 2 +- modules/private-cluster/variables.tf | 6 +++ sa.tf | 2 +- test/fixtures/shared/outputs.tf | 3 ++ test/fixtures/shared/variables.tf | 4 ++ .../workload_metadata_config/example.tf | 37 ++++++++++++++++++- .../controls/gcloud.rb | 18 +++++++++ .../workload_metadata_config/inspec.yml | 6 +++ test/setup/make_source.sh | 3 ++ variables.tf | 6 +++ 23 files changed, 145 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index 50e2afd63b..02f020d54a 100644 --- a/README.md +++ b/README.md @@ -108,6 +108,22 @@ Then perform the following commands on the root folder: - `terraform apply` to apply the infrastructure build - `terraform destroy` to destroy the built infrastructure +## Upgrade to v3.0.0 + +v3.0.0 is a breaking release. Refer to the +[Upgrading to v3.0 guide][upgrading-to-v3.0] for details. + +## Upgrade to v2.0.0 + +v2.0.0 is a breaking release. Refer to the +[Upgrading to v2.0 guide][upgrading-to-v2.0] for details. + +## Upgrade to v1.0.0 + +Version 1.0.0 of this module introduces a breaking change: adding the `disable-legacy-endpoints` metadata field to all node pools. This metadata is required by GKE and [determines whether the `/0.1/` and `/v1beta1/` paths are available in the nodes' metadata server](https://cloud.google.com/kubernetes-engine/docs/how-to/protecting-cluster-metadata#disable-legacy-apis). If your applications do not require access to the node's metadata server, you can leave the default value of `true` provided by the module. If your applications require access to the metadata server, be sure to read the linked documentation to see if you need to set the value for this field to `false` to allow your applications access to the above metadata server paths. + +In either case, upgrading to module version `v1.0.0` will trigger a recreation of all node pools in the cluster. + ## Inputs @@ -151,6 +167,7 @@ Then perform the following commands on the root folder: | project\_id | The project ID to host the cluster in (required) | string | n/a | yes | | region | The region to host the cluster in (required) | string | n/a | yes | | regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | +| registry\_project\_id | Project holding the Google Container Registry. If empty, we use the cluster project. If grant_registry_access is true, storage.objectViewer role is assigned on this project. | string | `""` | no | | remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | | service\_account | The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created. | string | `""` | no | | stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | map(list(string)) | `` | no | @@ -212,6 +229,9 @@ following project roles: - roles/iam.serviceAccountUser - roles/resourcemanager.projectIamAdmin (only required if `service_account` is set to `create`) +Additionally, if `service_account` is set to `create` and `grant_registry_access` is requested, the service account requires the following role on the `registry_project_id` project: +- roles/resourcemanager.projectIamAdmin + ### Enable APIs In order to operate with the Service Account you must activate the following APIs on the project where the Service Account was created: diff --git a/autogen/README.md b/autogen/README.md index 421e4a2605..dc0b63b003 100644 --- a/autogen/README.md +++ b/autogen/README.md @@ -269,6 +269,9 @@ following project roles: - roles/iam.serviceAccountUser - roles/resourcemanager.projectIamAdmin (only required if `service_account` is set to `create`) +Additionally, if `service_account` is set to `create` and `grant_registry_access` is requested, the service account requires the following role on the `registry_project_id` project: +- roles/resourcemanager.projectIamAdmin + ### Enable APIs In order to operate with the Service Account you must activate the following APIs on the project where the Service Account was created: diff --git a/autogen/sa.tf b/autogen/sa.tf index 62b31f457a..eaebeb2a22 100644 --- a/autogen/sa.tf +++ b/autogen/sa.tf @@ -64,7 +64,7 @@ resource "google_project_iam_member" "cluster_service_account-monitoring_viewer" resource "google_project_iam_member" "cluster_service_account-gcr" { count = var.create_service_account && var.grant_registry_access ? 1 : 0 - project = var.project_id + project = var.registry_project_id == "" ? var.project_id : var.registry_project_id role = "roles/storage.objectViewer" member = "serviceAccount:${google_service_account.cluster_service_account[0].email}" } diff --git a/autogen/variables.tf b/autogen/variables.tf index 0fedacb2af..17566d238f 100644 --- a/autogen/variables.tf +++ b/autogen/variables.tf @@ -269,6 +269,12 @@ variable "grant_registry_access" { default = false } +variable "registry_project_id" { + type = string + description = "Project holding the Google Container Registry. If empty, we use the cluster project. If grant_registry_access is true, storage.objectViewer role is assigned on this project." + default = "" +} + variable "service_account" { type = string description = "The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created." diff --git a/examples/workload_metadata_config/main.tf b/examples/workload_metadata_config/main.tf index 11cae808d4..f9fb25da5b 100644 --- a/examples/workload_metadata_config/main.tf +++ b/examples/workload_metadata_config/main.tf @@ -40,8 +40,9 @@ module "gke" { subnetwork = var.subnetwork ip_range_pods = var.ip_range_pods ip_range_services = var.ip_range_services - create_service_account = false - service_account = var.compute_engine_service_account + create_service_account = true + grant_registry_access = true + registry_project_id = var.registry_project_id enable_private_endpoint = true enable_private_nodes = true master_ipv4_cidr_block = "172.16.0.0/28" diff --git a/examples/workload_metadata_config/variables.tf b/examples/workload_metadata_config/variables.tf index 040c78d2c4..eaa8c36e83 100644 --- a/examples/workload_metadata_config/variables.tf +++ b/examples/workload_metadata_config/variables.tf @@ -48,7 +48,6 @@ variable "ip_range_services" { description = "The secondary ip range to use for pods" } -variable "compute_engine_service_account" { - description = "Service account to associate to the nodes in the cluster" +variable "registry_project_id" { + description = "Project name for the GCR registry" } - diff --git a/modules/beta-private-cluster/README.md b/modules/beta-private-cluster/README.md index 988d48ead8..c2920e4b28 100644 --- a/modules/beta-private-cluster/README.md +++ b/modules/beta-private-cluster/README.md @@ -190,6 +190,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | project\_id | The project ID to host the cluster in (required) | string | n/a | yes | | region | The region to host the cluster in (required) | string | n/a | yes | | regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | +| registry\_project\_id | Project holding the Google Container Registry. If empty, we use the cluster project. If grant_registry_access is true, storage.objectViewer role is assigned on this project. | string | `""` | no | | remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | | resource\_usage\_export\_dataset\_id | The dataset id for which network egress metering for this cluster will be enabled. If enabled, a daemonset will be created in the cluster to meter network egress traffic. | string | `""` | no | | sandbox\_enabled | (Beta) Enable GKE Sandbox (Do not forget to set `image_type` = `COS_CONTAINERD` and `node_version` = `1.12.7-gke.17` or later to use it). | bool | `"false"` | no | @@ -258,6 +259,9 @@ following project roles: - roles/iam.serviceAccountUser - roles/resourcemanager.projectIamAdmin (only required if `service_account` is set to `create`) +Additionally, if `service_account` is set to `create` and `grant_registry_access` is requested, the service account requires the following role on the `registry_project_id` project: +- roles/resourcemanager.projectIamAdmin + ### Enable APIs In order to operate with the Service Account you must activate the following APIs on the project where the Service Account was created: diff --git a/modules/beta-private-cluster/sa.tf b/modules/beta-private-cluster/sa.tf index 9e063fcc22..c7f34e4fbb 100644 --- a/modules/beta-private-cluster/sa.tf +++ b/modules/beta-private-cluster/sa.tf @@ -64,7 +64,7 @@ resource "google_project_iam_member" "cluster_service_account-monitoring_viewer" resource "google_project_iam_member" "cluster_service_account-gcr" { count = var.create_service_account && var.grant_registry_access ? 1 : 0 - project = var.project_id + project = var.registry_project_id == "" ? var.project_id : var.registry_project_id role = "roles/storage.objectViewer" member = "serviceAccount:${google_service_account.cluster_service_account[0].email}" } diff --git a/modules/beta-private-cluster/variables.tf b/modules/beta-private-cluster/variables.tf index 9a869a830f..2c53d06b90 100644 --- a/modules/beta-private-cluster/variables.tf +++ b/modules/beta-private-cluster/variables.tf @@ -267,6 +267,12 @@ variable "grant_registry_access" { default = false } +variable "registry_project_id" { + type = string + description = "Project holding the Google Container Registry. If empty, we use the cluster project. If grant_registry_access is true, storage.objectViewer role is assigned on this project." + default = "" +} + variable "service_account" { type = string description = "The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created." diff --git a/modules/beta-public-cluster/README.md b/modules/beta-public-cluster/README.md index 7d59e927bf..f013439240 100644 --- a/modules/beta-public-cluster/README.md +++ b/modules/beta-public-cluster/README.md @@ -181,6 +181,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | project\_id | The project ID to host the cluster in (required) | string | n/a | yes | | region | The region to host the cluster in (required) | string | n/a | yes | | regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | +| registry\_project\_id | Project holding the Google Container Registry. If empty, we use the cluster project. If grant_registry_access is true, storage.objectViewer role is assigned on this project. | string | `""` | no | | remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | | resource\_usage\_export\_dataset\_id | The dataset id for which network egress metering for this cluster will be enabled. If enabled, a daemonset will be created in the cluster to meter network egress traffic. | string | `""` | no | | sandbox\_enabled | (Beta) Enable GKE Sandbox (Do not forget to set `image_type` = `COS_CONTAINERD` and `node_version` = `1.12.7-gke.17` or later to use it). | bool | `"false"` | no | @@ -249,6 +250,9 @@ following project roles: - roles/iam.serviceAccountUser - roles/resourcemanager.projectIamAdmin (only required if `service_account` is set to `create`) +Additionally, if `service_account` is set to `create` and `grant_registry_access` is requested, the service account requires the following role on the `registry_project_id` project: +- roles/resourcemanager.projectIamAdmin + ### Enable APIs In order to operate with the Service Account you must activate the following APIs on the project where the Service Account was created: diff --git a/modules/beta-public-cluster/sa.tf b/modules/beta-public-cluster/sa.tf index 9e063fcc22..c7f34e4fbb 100644 --- a/modules/beta-public-cluster/sa.tf +++ b/modules/beta-public-cluster/sa.tf @@ -64,7 +64,7 @@ resource "google_project_iam_member" "cluster_service_account-monitoring_viewer" resource "google_project_iam_member" "cluster_service_account-gcr" { count = var.create_service_account && var.grant_registry_access ? 1 : 0 - project = var.project_id + project = var.registry_project_id == "" ? var.project_id : var.registry_project_id role = "roles/storage.objectViewer" member = "serviceAccount:${google_service_account.cluster_service_account[0].email}" } diff --git a/modules/beta-public-cluster/variables.tf b/modules/beta-public-cluster/variables.tf index 0ae2b75661..07771a27a9 100644 --- a/modules/beta-public-cluster/variables.tf +++ b/modules/beta-public-cluster/variables.tf @@ -267,6 +267,12 @@ variable "grant_registry_access" { default = false } +variable "registry_project_id" { + type = string + description = "Project holding the Google Container Registry. If empty, we use the cluster project. If grant_registry_access is true, storage.objectViewer role is assigned on this project." + default = "" +} + variable "service_account" { type = string description = "The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created." diff --git a/modules/private-cluster/README.md b/modules/private-cluster/README.md index d823f640fa..f7f8fef179 100644 --- a/modules/private-cluster/README.md +++ b/modules/private-cluster/README.md @@ -176,6 +176,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | project\_id | The project ID to host the cluster in (required) | string | n/a | yes | | region | The region to host the cluster in (required) | string | n/a | yes | | regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | +| registry\_project\_id | Project holding the Google Container Registry. If empty, we use the cluster project. If grant_registry_access is true, storage.objectViewer role is assigned on this project. | string | `""` | no | | remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | | service\_account | The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created. | string | `""` | no | | stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | map(list(string)) | `` | no | @@ -237,6 +238,9 @@ following project roles: - roles/iam.serviceAccountUser - roles/resourcemanager.projectIamAdmin (only required if `service_account` is set to `create`) +Additionally, if `service_account` is set to `create` and `grant_registry_access` is requested, the service account requires the following role on the `registry_project_id` project: +- roles/resourcemanager.projectIamAdmin + ### Enable APIs In order to operate with the Service Account you must activate the following APIs on the project where the Service Account was created: diff --git a/modules/private-cluster/sa.tf b/modules/private-cluster/sa.tf index 9e063fcc22..c7f34e4fbb 100644 --- a/modules/private-cluster/sa.tf +++ b/modules/private-cluster/sa.tf @@ -64,7 +64,7 @@ resource "google_project_iam_member" "cluster_service_account-monitoring_viewer" resource "google_project_iam_member" "cluster_service_account-gcr" { count = var.create_service_account && var.grant_registry_access ? 1 : 0 - project = var.project_id + project = var.registry_project_id == "" ? var.project_id : var.registry_project_id role = "roles/storage.objectViewer" member = "serviceAccount:${google_service_account.cluster_service_account[0].email}" } diff --git a/modules/private-cluster/variables.tf b/modules/private-cluster/variables.tf index 8008e08975..00d7779e83 100644 --- a/modules/private-cluster/variables.tf +++ b/modules/private-cluster/variables.tf @@ -257,6 +257,12 @@ variable "grant_registry_access" { default = false } +variable "registry_project_id" { + type = string + description = "Project holding the Google Container Registry. If empty, we use the cluster project. If grant_registry_access is true, storage.objectViewer role is assigned on this project." + default = "" +} + variable "service_account" { type = string description = "The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created." diff --git a/sa.tf b/sa.tf index 9e063fcc22..c7f34e4fbb 100644 --- a/sa.tf +++ b/sa.tf @@ -64,7 +64,7 @@ resource "google_project_iam_member" "cluster_service_account-monitoring_viewer" resource "google_project_iam_member" "cluster_service_account-gcr" { count = var.create_service_account && var.grant_registry_access ? 1 : 0 - project = var.project_id + project = var.registry_project_id == "" ? var.project_id : var.registry_project_id role = "roles/storage.objectViewer" member = "serviceAccount:${google_service_account.cluster_service_account[0].email}" } diff --git a/test/fixtures/shared/outputs.tf b/test/fixtures/shared/outputs.tf index 1c2eb5e9ba..71b4b250de 100644 --- a/test/fixtures/shared/outputs.tf +++ b/test/fixtures/shared/outputs.tf @@ -79,3 +79,6 @@ output "service_account" { value = module.example.service_account } +output "registry_project_id" { + value = var.registry_project_id +} diff --git a/test/fixtures/shared/variables.tf b/test/fixtures/shared/variables.tf index 76280e0065..5dff24dbd4 100644 --- a/test/fixtures/shared/variables.tf +++ b/test/fixtures/shared/variables.tf @@ -33,3 +33,7 @@ variable "compute_engine_service_account" { description = "The email address of the service account to associate with the GKE cluster" } +variable "registry_project_id" { + description = "Project to use for granting access to the GCR registry, if requested" +} + diff --git a/test/fixtures/workload_metadata_config/example.tf b/test/fixtures/workload_metadata_config/example.tf index 4d4a98e119..b0db3f8b8d 100644 --- a/test/fixtures/workload_metadata_config/example.tf +++ b/test/fixtures/workload_metadata_config/example.tf @@ -17,6 +17,28 @@ module "example" { source = "../../../examples/workload_metadata_config" +<<<<<<< HEAD +<<<<<<< HEAD +<<<<<<< HEAD +<<<<<<< HEAD +======= +>>>>>>> 5258f89... Removing a few conflicting files. +======= +>>>>>>> 244108e... Removing a few conflicting files. +======= +>>>>>>> 5258f89... Removing a few conflicting files. + project_id = var.project_id + cluster_name_suffix = "-${random_string.suffix.result}" + region = var.region + zones = slice(var.zones, 0, 1) + network = google_compute_network.main.name + subnetwork = google_compute_subnetwork.main.name + ip_range_pods = google_compute_subnetwork.main.secondary_ip_range[0].range_name + ip_range_services = google_compute_subnetwork.main.secondary_ip_range[1].range_name +<<<<<<< HEAD +<<<<<<< HEAD +<<<<<<< HEAD +======= project_id = var.project_id cluster_name_suffix = "-${random_string.suffix.result}" region = var.region @@ -25,5 +47,18 @@ module "example" { subnetwork = google_compute_subnetwork.main.name ip_range_pods = google_compute_subnetwork.main.secondary_ip_range[0].range_name ip_range_services = google_compute_subnetwork.main.secondary_ip_range[1].range_name - compute_engine_service_account = var.compute_engine_service_account +<<<<<<< HEAD +<<<<<<< HEAD +>>>>>>> d791335... Removed the custom test for create_service_account +======= +>>>>>>> 5258f89... Removing a few conflicting files. +======= +>>>>>>> e7f04bb... Removed the custom test for create_service_account +======= +>>>>>>> 244108e... Removing a few conflicting files. +======= +>>>>>>> d791335... Removed the custom test for create_service_account +======= +>>>>>>> 5258f89... Removing a few conflicting files. + registry_project_id = var.registry_project_id } diff --git a/test/integration/workload_metadata_config/controls/gcloud.rb b/test/integration/workload_metadata_config/controls/gcloud.rb index e62606c78c..ad642ff7c9 100644 --- a/test/integration/workload_metadata_config/controls/gcloud.rb +++ b/test/integration/workload_metadata_config/controls/gcloud.rb @@ -13,8 +13,10 @@ # limitations under the License. project_id = attribute('project_id') +registry_project_id = attribute('registry_project_id') location = attribute('location') cluster_name = attribute('cluster_name') +service_account = attribute('service_account') control "gcloud" do title "Google Compute Engine GKE configuration" @@ -55,4 +57,20 @@ end end end + + describe command("gcloud projects get-iam-policy #{registry_project_id} --format=json") do + its(:exit_status) { should eq 0 } + its(:stderr) { should eq '' } + + let!(:iam) do + if subject.exit_status == 0 + JSON.parse(subject.stdout) + else + {} + end + end + it "has expected registry roles" do + expect(iam['bindings']).to include("members" => ["serviceAccount:#{service_account}"], "role" => "roles/storage.objectViewer") + end + end end diff --git a/test/integration/workload_metadata_config/inspec.yml b/test/integration/workload_metadata_config/inspec.yml index f6f3811afa..4f2b7d40d6 100644 --- a/test/integration/workload_metadata_config/inspec.yml +++ b/test/integration/workload_metadata_config/inspec.yml @@ -9,3 +9,9 @@ attributes: - name: project_id required: true type: string + - name: service_account + required: true + type: string + - name: registry_project_id + required: false + type: string diff --git a/test/setup/make_source.sh b/test/setup/make_source.sh index b39944af41..ad3f57165a 100755 --- a/test/setup/make_source.sh +++ b/test/setup/make_source.sh @@ -19,6 +19,9 @@ echo "#!/usr/bin/env bash" > ../source.sh project_id=$(terraform output project_id) echo "export TF_VAR_project_id='$project_id'" >> ../source.sh +# We use the same project for registry project in the tests. +echo "export TF_VAR_registry_project_id='$project_id'" >> ../source.sh + sa_json=$(terraform output sa_key) # shellcheck disable=SC2086 echo "export SERVICE_ACCOUNT_JSON='$(echo $sa_json | base64 --decode)'" >> ../source.sh diff --git a/variables.tf b/variables.tf index 460bdeaeff..da9c744646 100644 --- a/variables.tf +++ b/variables.tf @@ -257,6 +257,12 @@ variable "grant_registry_access" { default = false } +variable "registry_project_id" { + type = string + description = "Project holding the Google Container Registry. If empty, we use the cluster project. If grant_registry_access is true, storage.objectViewer role is assigned on this project." + default = "" +} + variable "service_account" { type = string description = "The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created." From bf3946905bcb9179c07f99bb432046ce66962c61 Mon Sep 17 00:00:00 2001 From: Mirko Montanari Date: Fri, 4 Oct 2019 15:06:47 -0700 Subject: [PATCH 29/82] Reformatted a file --- .../workload_metadata_config/example.tf | 35 ------------------- 1 file changed, 35 deletions(-) diff --git a/test/fixtures/workload_metadata_config/example.tf b/test/fixtures/workload_metadata_config/example.tf index b0db3f8b8d..3568cfa404 100644 --- a/test/fixtures/workload_metadata_config/example.tf +++ b/test/fixtures/workload_metadata_config/example.tf @@ -17,16 +17,6 @@ module "example" { source = "../../../examples/workload_metadata_config" -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD -======= ->>>>>>> 5258f89... Removing a few conflicting files. -======= ->>>>>>> 244108e... Removing a few conflicting files. -======= ->>>>>>> 5258f89... Removing a few conflicting files. project_id = var.project_id cluster_name_suffix = "-${random_string.suffix.result}" region = var.region @@ -35,30 +25,5 @@ module "example" { subnetwork = google_compute_subnetwork.main.name ip_range_pods = google_compute_subnetwork.main.secondary_ip_range[0].range_name ip_range_services = google_compute_subnetwork.main.secondary_ip_range[1].range_name -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD -======= - project_id = var.project_id - cluster_name_suffix = "-${random_string.suffix.result}" - region = var.region - zones = slice(var.zones, 0, 1) - network = google_compute_network.main.name - subnetwork = google_compute_subnetwork.main.name - ip_range_pods = google_compute_subnetwork.main.secondary_ip_range[0].range_name - ip_range_services = google_compute_subnetwork.main.secondary_ip_range[1].range_name -<<<<<<< HEAD -<<<<<<< HEAD ->>>>>>> d791335... Removed the custom test for create_service_account -======= ->>>>>>> 5258f89... Removing a few conflicting files. -======= ->>>>>>> e7f04bb... Removed the custom test for create_service_account -======= ->>>>>>> 244108e... Removing a few conflicting files. -======= ->>>>>>> d791335... Removed the custom test for create_service_account -======= ->>>>>>> 5258f89... Removing a few conflicting files. registry_project_id = var.registry_project_id } From f13d9b791ec906004bb96d317b0f6e277d01765c Mon Sep 17 00:00:00 2001 From: Aaron Sproul Date: Thu, 10 Oct 2019 15:55:46 -0700 Subject: [PATCH 30/82] apply suggestions from flake8 to helpers/migrate.py --- helpers/migrate.py | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/helpers/migrate.py b/helpers/migrate.py index a22a7ce751..8f2d71cfce 100755 --- a/helpers/migrate.py +++ b/helpers/migrate.py @@ -18,7 +18,6 @@ import copy import subprocess import sys -import shutil import re MIGRATIONS = [ @@ -43,6 +42,7 @@ }, ] + class ModuleMigration: """ Migrate the resources from a flat project factory to match the new @@ -89,6 +89,7 @@ def targets(self): return to_move + class TerraformModule: """ A Terraform module with associated resources. @@ -171,7 +172,7 @@ def __init__(self, module, resource_type, name): self.module = module self.resource_type = resource_type - find_suffix = re.match('(^.+)\[(\d+)\]', name) + find_suffix = re.match(r'(^.+)\[(\d+)\]', name) if find_suffix: self.name = find_suffix.group(1) self.index = find_suffix.group(2) @@ -187,7 +188,7 @@ def path(self): if parts[0] == '': del parts[0] path = ".".join(parts) - if self.index is not -1 and self.plural: + if self.index != -1 and self.plural: path = "{0}[{1}]".format(path, self.index) return path @@ -198,6 +199,7 @@ def __repr__(self): self.resource_type, self.name) + def group_by_module(resources): """ Group a set of resources according to their containing module. @@ -241,7 +243,11 @@ def state_changes_for_module(module, statefile=None): for (old, new) in migration.moves(): wrapper = '"{0}"' - argv = ["terraform", "state", "mv", wrapper.format(old), wrapper.format(new)] + argv = ["terraform", + "state", + "mv", + wrapper.format(old), + wrapper.format(new)] commands.append(argv) return commands @@ -265,8 +271,8 @@ def migrate(statefile=None, dryrun=False): # Filter our list of Terraform modules down to anything that looks like a # zonal GKE module. We key this off the presence off of - # `google_container_cluster.zonal_primary` since that should almost always be - # unique to a GKE module. + # `google_container_cluster.zonal_primary` since that should almost always + # be unique to a GKE module. modules_to_migrate = [ module for module in modules if module.has_resource("google_container_cluster", "zonal_primary") @@ -289,6 +295,7 @@ def migrate(statefile=None, dryrun=False): argv = [arg.strip('"') for arg in argv] subprocess.run(argv, check=True, encoding='utf-8') + def main(argv): parser = argparser() args = parser.parse_args(argv[1:]) @@ -298,6 +305,7 @@ def main(argv): migrate(dryrun=args.dryrun) + def argparser(): parser = argparse.ArgumentParser(description='Migrate Terraform state') parser.add_argument('--dryrun', action='store_true', @@ -307,4 +315,4 @@ def argparser(): if __name__ == "__main__": - main(sys.argv) \ No newline at end of file + main(sys.argv) From e80b9c22c45aaca67864c259b9f4e515bf1409a3 Mon Sep 17 00:00:00 2001 From: Aaron Sproul Date: Fri, 11 Oct 2019 10:34:44 -0700 Subject: [PATCH 31/82] Staying up to date to ensure lint tests pass locally --- README.md | 16 +++ .../README.md | 135 ------------------ .../private-cluster-update-variant/README.md | 135 ------------------ .../node_pool_update_variant/terraform.tfvars | 1 - 4 files changed, 16 insertions(+), 271 deletions(-) delete mode 120000 test/fixtures/node_pool_update_variant/terraform.tfvars diff --git a/README.md b/README.md index 50e2afd63b..f879d0f7dc 100644 --- a/README.md +++ b/README.md @@ -108,6 +108,22 @@ Then perform the following commands on the root folder: - `terraform apply` to apply the infrastructure build - `terraform destroy` to destroy the built infrastructure +## Upgrade to v3.0.0 + +v3.0.0 is a breaking release. Refer to the +[Upgrading to v3.0 guide][upgrading-to-v3.0] for details. + +## Upgrade to v2.0.0 + +v2.0.0 is a breaking release. Refer to the +[Upgrading to v2.0 guide][upgrading-to-v2.0] for details. + +## Upgrade to v1.0.0 + +Version 1.0.0 of this module introduces a breaking change: adding the `disable-legacy-endpoints` metadata field to all node pools. This metadata is required by GKE and [determines whether the `/0.1/` and `/v1beta1/` paths are available in the nodes' metadata server](https://cloud.google.com/kubernetes-engine/docs/how-to/protecting-cluster-metadata#disable-legacy-apis). If your applications do not require access to the node's metadata server, you can leave the default value of `true` provided by the module. If your applications require access to the metadata server, be sure to read the linked documentation to see if you need to set the value for this field to `false` to allow your applications access to the above metadata server paths. + +In either case, upgrading to module version `v1.0.0` will trigger a recreation of all node pools in the cluster. + ## Inputs diff --git a/modules/beta-private-cluster-update-variant/README.md b/modules/beta-private-cluster-update-variant/README.md index 020ab728c0..0fc0068e96 100644 --- a/modules/beta-private-cluster-update-variant/README.md +++ b/modules/beta-private-cluster-update-variant/README.md @@ -278,141 +278,6 @@ The project has the following folders and files: - /README.MD: This file. - /modules: Private and beta sub modules. -## Templating - -To more cleanly handle cases where desired functionality would require complex duplication of Terraform resources (i.e. [PR 51](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/51)), this repository is largely generated from the [`autogen`](/autogen) directory. - -The root module is generated by running `make generate`. Changes to this repository should be made in the [`autogen`](/autogen) directory where appropriate. - -Note: The correct sequence to update the repo using autogen functionality is to run -`make generate && make generate_docs`. This will create the various Terraform files, and then -generate the Terraform documentation using `terraform-docs`. - -## Testing - -### Requirements -- [bundler](https://github.com/bundler/bundler) -- [gcloud](https://cloud.google.com/sdk/install) -- [terraform-docs](https://github.com/segmentio/terraform-docs/releases) 0.6.0 - -### Autogeneration of documentation from .tf files -Run -``` -make generate_docs -``` - -### Integration test - -Integration tests are run though [test-kitchen](https://github.com/test-kitchen/test-kitchen), [kitchen-terraform](https://github.com/newcontext-oss/kitchen-terraform), and [InSpec](https://github.com/inspec/inspec). - -Six test-kitchen instances are defined: - -- `deploy-service` -- `node-pool` -- `shared-vpc` -- `simple-regional` -- `simple-zonal` -- `stub-domains` - -The test-kitchen instances in `test/fixtures/` wrap identically-named examples in the `examples/` directory. - -#### Setup - -1. Configure the [test fixtures](#test-configuration) -2. Download a Service Account key with the necessary permissions and put it in the module's root directory with the name `credentials.json`. - - Requires the [permissions to run the module](#configure-a-service-account) - - Requires `roles/compute.networkAdmin` to create the test suite's networks - - Requires `roles/resourcemanager.projectIamAdmin` since service account creation is tested -3. Build the Docker container for testing: - - ``` - make docker_build_kitchen_terraform - ``` -4. Run the testing container in interactive mode: - - ``` - make docker_run - ``` - - The module root directory will be loaded into the Docker container at `/cft/workdir/`. -5. Run kitchen-terraform to test the infrastructure: - - 1. `kitchen create` creates Terraform state and downloads modules, if applicable. - 2. `kitchen converge` creates the underlying resources. Run `kitchen converge ` to create resources for a specific test case. - 3. Run `kitchen converge` again. This is necessary due to an oddity in how `networkPolicyConfig` is handled by the upstream API. (See [#72](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/issues/72) for details). - 4. `kitchen verify` tests the created infrastructure. Run `kitchen verify ` to run a specific test case. - 5. `kitchen destroy` tears down the underlying resources created by `kitchen converge`. Run `kitchen destroy ` to tear down resources for a specific test case. - -Alternatively, you can simply run `make test_integration_docker` to run all the test steps non-interactively. - -If you wish to parallelize running the test suites, it is also possible to offload the work onto Concourse to run each test suite for you using the command `make test_integration_concourse`. The `.concourse` directory will be created and contain all of the logs from the running test suites. - -When running tests locally, you will need to use your own test project environment. You can configure your environment by setting all of the following variables: - -``` -export COMPUTE_ENGINE_SERVICE_ACCOUNT="" -export PROJECT_ID="" -export REGION="" -export ZONES='[""]' -export SERVICE_ACCOUNT_JSON="$(cat "")" -export CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE="" -export GOOGLE_APPLICATION_CREDENTIALS="" -``` - -#### Test configuration - -Each test-kitchen instance is configured with a `variables.tfvars` file in the test fixture directory, e.g. `test/fixtures/node_pool/terraform.tfvars`. -For convenience, since all of the variables are project-specific, these files have been symlinked to `test/fixtures/shared/terraform.tfvars`. -Similarly, each test fixture has a `variables.tf` to define these variables, and an `outputs.tf` to facilitate providing necessary information for `inspec` to locate and query against created resources. - -Each test-kitchen instance creates a GCP Network and Subnetwork fixture to house resources, and may create any other necessary fixture data as needed. - -### Autogeneration of documentation from .tf files -Run -``` -make generate_docs -``` - -### Linting -The makefile in this project will lint or sometimes just format any shell, -Python, golang, Terraform, or Dockerfiles. The linters will only be run if -the makefile finds files with the appropriate file extension. - -All of the linter checks are in the default make target, so you just have to -run - -``` -make -s -``` - -The -s is for 'silent'. Successful output looks like this - -``` -Running shellcheck -Running flake8 -Running go fmt and go vet -Running terraform validate -Running hadolint on Dockerfiles -Checking for required files -Testing the validity of the header check -.. ----------------------------------------------------------------------- -Ran 2 tests in 0.026s - -OK -Checking file headers -The following lines have trailing whitespace -``` - -The linters -are as follows: -* Shell - shellcheck. Can be found in homebrew -* Python - flake8. Can be installed with 'pip install flake8' -* Golang - gofmt. gofmt comes with the standard golang installation. golang -is a compiled language so there is no standard linter. -* Terraform - terraform has a built-in linter in the 'terraform validate' -command. -* Dockerfiles - hadolint. Can be found in homebrew [upgrading-to-v2.0]: ../../docs/upgrading_to_v2.0.md [upgrading-to-v3.0]: ../../docs/upgrading_to_v3.0.md diff --git a/modules/private-cluster-update-variant/README.md b/modules/private-cluster-update-variant/README.md index 7becc84ea4..e817361124 100644 --- a/modules/private-cluster-update-variant/README.md +++ b/modules/private-cluster-update-variant/README.md @@ -257,141 +257,6 @@ The project has the following folders and files: - /README.MD: This file. - /modules: Private and beta sub modules. -## Templating - -To more cleanly handle cases where desired functionality would require complex duplication of Terraform resources (i.e. [PR 51](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/51)), this repository is largely generated from the [`autogen`](/autogen) directory. - -The root module is generated by running `make generate`. Changes to this repository should be made in the [`autogen`](/autogen) directory where appropriate. - -Note: The correct sequence to update the repo using autogen functionality is to run -`make generate && make generate_docs`. This will create the various Terraform files, and then -generate the Terraform documentation using `terraform-docs`. - -## Testing - -### Requirements -- [bundler](https://github.com/bundler/bundler) -- [gcloud](https://cloud.google.com/sdk/install) -- [terraform-docs](https://github.com/segmentio/terraform-docs/releases) 0.6.0 - -### Autogeneration of documentation from .tf files -Run -``` -make generate_docs -``` - -### Integration test - -Integration tests are run though [test-kitchen](https://github.com/test-kitchen/test-kitchen), [kitchen-terraform](https://github.com/newcontext-oss/kitchen-terraform), and [InSpec](https://github.com/inspec/inspec). - -Six test-kitchen instances are defined: - -- `deploy-service` -- `node-pool` -- `shared-vpc` -- `simple-regional` -- `simple-zonal` -- `stub-domains` - -The test-kitchen instances in `test/fixtures/` wrap identically-named examples in the `examples/` directory. - -#### Setup - -1. Configure the [test fixtures](#test-configuration) -2. Download a Service Account key with the necessary permissions and put it in the module's root directory with the name `credentials.json`. - - Requires the [permissions to run the module](#configure-a-service-account) - - Requires `roles/compute.networkAdmin` to create the test suite's networks - - Requires `roles/resourcemanager.projectIamAdmin` since service account creation is tested -3. Build the Docker container for testing: - - ``` - make docker_build_kitchen_terraform - ``` -4. Run the testing container in interactive mode: - - ``` - make docker_run - ``` - - The module root directory will be loaded into the Docker container at `/cft/workdir/`. -5. Run kitchen-terraform to test the infrastructure: - - 1. `kitchen create` creates Terraform state and downloads modules, if applicable. - 2. `kitchen converge` creates the underlying resources. Run `kitchen converge ` to create resources for a specific test case. - 3. Run `kitchen converge` again. This is necessary due to an oddity in how `networkPolicyConfig` is handled by the upstream API. (See [#72](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/issues/72) for details). - 4. `kitchen verify` tests the created infrastructure. Run `kitchen verify ` to run a specific test case. - 5. `kitchen destroy` tears down the underlying resources created by `kitchen converge`. Run `kitchen destroy ` to tear down resources for a specific test case. - -Alternatively, you can simply run `make test_integration_docker` to run all the test steps non-interactively. - -If you wish to parallelize running the test suites, it is also possible to offload the work onto Concourse to run each test suite for you using the command `make test_integration_concourse`. The `.concourse` directory will be created and contain all of the logs from the running test suites. - -When running tests locally, you will need to use your own test project environment. You can configure your environment by setting all of the following variables: - -``` -export COMPUTE_ENGINE_SERVICE_ACCOUNT="" -export PROJECT_ID="" -export REGION="" -export ZONES='[""]' -export SERVICE_ACCOUNT_JSON="$(cat "")" -export CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE="" -export GOOGLE_APPLICATION_CREDENTIALS="" -``` - -#### Test configuration - -Each test-kitchen instance is configured with a `variables.tfvars` file in the test fixture directory, e.g. `test/fixtures/node_pool/terraform.tfvars`. -For convenience, since all of the variables are project-specific, these files have been symlinked to `test/fixtures/shared/terraform.tfvars`. -Similarly, each test fixture has a `variables.tf` to define these variables, and an `outputs.tf` to facilitate providing necessary information for `inspec` to locate and query against created resources. - -Each test-kitchen instance creates a GCP Network and Subnetwork fixture to house resources, and may create any other necessary fixture data as needed. - -### Autogeneration of documentation from .tf files -Run -``` -make generate_docs -``` - -### Linting -The makefile in this project will lint or sometimes just format any shell, -Python, golang, Terraform, or Dockerfiles. The linters will only be run if -the makefile finds files with the appropriate file extension. - -All of the linter checks are in the default make target, so you just have to -run - -``` -make -s -``` - -The -s is for 'silent'. Successful output looks like this - -``` -Running shellcheck -Running flake8 -Running go fmt and go vet -Running terraform validate -Running hadolint on Dockerfiles -Checking for required files -Testing the validity of the header check -.. ----------------------------------------------------------------------- -Ran 2 tests in 0.026s - -OK -Checking file headers -The following lines have trailing whitespace -``` - -The linters -are as follows: -* Shell - shellcheck. Can be found in homebrew -* Python - flake8. Can be installed with 'pip install flake8' -* Golang - gofmt. gofmt comes with the standard golang installation. golang -is a compiled language so there is no standard linter. -* Terraform - terraform has a built-in linter in the 'terraform validate' -command. -* Dockerfiles - hadolint. Can be found in homebrew [upgrading-to-v2.0]: ../../docs/upgrading_to_v2.0.md [upgrading-to-v3.0]: ../../docs/upgrading_to_v3.0.md diff --git a/test/fixtures/node_pool_update_variant/terraform.tfvars b/test/fixtures/node_pool_update_variant/terraform.tfvars deleted file mode 120000 index 08ac6f4724..0000000000 --- a/test/fixtures/node_pool_update_variant/terraform.tfvars +++ /dev/null @@ -1 +0,0 @@ -../shared/terraform.tfvars \ No newline at end of file From aa048e1a54ee609ff076717e57944aa1fdb8dbd9 Mon Sep 17 00:00:00 2001 From: Morgante Pell Date: Fri, 11 Oct 2019 15:16:14 -0400 Subject: [PATCH 32/82] Update CHANGELOG.md --- CHANGELOG.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9200790618..24b37b3ebe 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,10 @@ Extending the adopted spec, each change should have a link to its corresponding ## [Unreleased] +### Added + +* Added [private](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/tree/master/modules/private-cluster-update-variant) and [beta private](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/tree/master/modules/beta-private-cluster-update-variant) variants which allow node pools to be created before being destroyed. [#256] + ## [v5.0.0] - 2019-09-25 v5.0.0 is a backwards-incompatible release. Please see the [upgrading guide](./docs/upgrading_to_v5.0.md). @@ -196,6 +200,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o [v0.3.0]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v0.2.0...v0.3.0 [v0.2.0]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v0.1.0...v0.2.0 +[#256]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/256 [#248]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/248 [#228]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/228 [#238]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/238 From c4bf4cf134c617758e9461377c2482d62c1e6254 Mon Sep 17 00:00:00 2001 From: pp Date: Tue, 27 Aug 2019 13:35:32 +0300 Subject: [PATCH 33/82] Changed `region` variable * `region` variable became optional for zonal clusters (Fixes #235) --- CHANGELOG.md | 2 ++ README.md | 2 +- autogen/main.tf | 3 ++- autogen/networks.tf | 2 +- autogen/variables.tf | 3 ++- main.tf | 3 ++- modules/beta-private-cluster/README.md | 2 +- modules/beta-private-cluster/main.tf | 3 ++- modules/beta-private-cluster/networks.tf | 2 +- modules/beta-private-cluster/variables.tf | 3 ++- modules/beta-public-cluster/README.md | 2 +- modules/beta-public-cluster/main.tf | 3 ++- modules/beta-public-cluster/networks.tf | 2 +- modules/beta-public-cluster/variables.tf | 3 ++- modules/private-cluster/README.md | 2 +- modules/private-cluster/main.tf | 3 ++- modules/private-cluster/networks.tf | 2 +- modules/private-cluster/variables.tf | 3 ++- networks.tf | 2 +- variables.tf | 3 ++- 20 files changed, 31 insertions(+), 19 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 706682fbf1..ec280dddc3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ Extending the adopted spec, each change should have a link to its corresponding ## [Unreleased] ### Added +* Made `region` variable optional for zonal clusters [#247] * Added `grant_registry_access` variable to grant Container Registry access to created SA [#236] * Support for Intranode Visbiility (IV) and Veritical Pod Autoscaling (VPA) beta features [#216] * Support for Workload Identity beta feature [#234] @@ -170,6 +171,7 @@ Extending the adopted spec, each change should have a link to its corresponding [v0.3.0]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v0.2.0...v0.3.0 [v0.2.0]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v0.1.0...v0.2.0 +[#247]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/247 [#236]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/236 [#217]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/217 [#234]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/234 diff --git a/README.md b/README.md index f38023e600..4f92ede67e 100644 --- a/README.md +++ b/README.md @@ -166,7 +166,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | node\_version | The Kubernetes version of the node pools. Defaults kubernetes_version (master) variable and can be overridden for individual node pools by setting the `version` key on them. Must be empyty or set the same as master at cluster creation. | string | `""` | no | | non\_masquerade\_cidrs | List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading. | list(string) | `` | no | | project\_id | The project ID to host the cluster in (required) | string | n/a | yes | -| region | The region to host the cluster in (required) | string | n/a | yes | +| region | The region to host the cluster in (optional if zonal cluster / required if regional) | string | `"null"` | no | | regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | | remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | | service\_account | The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created. | string | `""` | no | diff --git a/autogen/main.tf b/autogen/main.tf index 9d6476b916..e020abdc25 100644 --- a/autogen/main.tf +++ b/autogen/main.tf @@ -27,7 +27,7 @@ data "google_compute_zones" "available" { {% endif %} project = var.project_id - region = var.region + region = local.region } resource "random_shuffle" "available_zones" { @@ -38,6 +38,7 @@ resource "random_shuffle" "available_zones" { locals { // location location = var.regional ? var.region : var.zones[0] + region = var.region == null ? join("-", slice(split("-", var.zones[0]), 0, 2)) : var.region // for regional cluster - use var.zones if provided, use available otherwise, for zonal cluster use var.zones with first element extracted node_locations = var.regional ? coalescelist(compact(var.zones), sort(random_shuffle.available_zones.result)) : slice(var.zones, 1, length(var.zones)) // kuberentes version diff --git a/autogen/networks.tf b/autogen/networks.tf index 19a9af5307..84baaa8995 100644 --- a/autogen/networks.tf +++ b/autogen/networks.tf @@ -35,6 +35,6 @@ data "google_compute_subnetwork" "gke_subnetwork" { {% endif %} name = var.subnetwork - region = var.region + region = local.region project = local.network_project_id } diff --git a/autogen/variables.tf b/autogen/variables.tf index 9a956194e0..46f34d3074 100644 --- a/autogen/variables.tf +++ b/autogen/variables.tf @@ -40,7 +40,8 @@ variable "regional" { variable "region" { type = string - description = "The region to host the cluster in (required)" + description = "The region to host the cluster in (optional if zonal cluster / required if regional)" + default = null } variable "zones" { diff --git a/main.tf b/main.tf index b63d60f884..a9e1c15810 100644 --- a/main.tf +++ b/main.tf @@ -23,7 +23,7 @@ data "google_compute_zones" "available" { provider = google project = var.project_id - region = var.region + region = local.region } resource "random_shuffle" "available_zones" { @@ -34,6 +34,7 @@ resource "random_shuffle" "available_zones" { locals { // location location = var.regional ? var.region : var.zones[0] + region = var.region == null ? join("-", slice(split("-", var.zones[0]), 0, 2)) : var.region // for regional cluster - use var.zones if provided, use available otherwise, for zonal cluster use var.zones with first element extracted node_locations = var.regional ? coalescelist(compact(var.zones), sort(random_shuffle.available_zones.result)) : slice(var.zones, 1, length(var.zones)) // kuberentes version diff --git a/modules/beta-private-cluster/README.md b/modules/beta-private-cluster/README.md index eba9f48d31..4fe5c70dc8 100644 --- a/modules/beta-private-cluster/README.md +++ b/modules/beta-private-cluster/README.md @@ -188,7 +188,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | non\_masquerade\_cidrs | List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading. | list(string) | `` | no | | pod\_security\_policy\_config | enabled - Enable the PodSecurityPolicy controller for this cluster. If enabled, pods must be valid under a PodSecurityPolicy to be created. | list | `` | no | | project\_id | The project ID to host the cluster in (required) | string | n/a | yes | -| region | The region to host the cluster in (required) | string | n/a | yes | +| region | The region to host the cluster in (optional if zonal cluster / required if regional) | string | `"null"` | no | | regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | | remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | | service\_account | The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created. | string | `""` | no | diff --git a/modules/beta-private-cluster/main.tf b/modules/beta-private-cluster/main.tf index 43fea3b6dc..8c88b8936d 100644 --- a/modules/beta-private-cluster/main.tf +++ b/modules/beta-private-cluster/main.tf @@ -23,7 +23,7 @@ data "google_compute_zones" "available" { provider = google-beta project = var.project_id - region = var.region + region = local.region } resource "random_shuffle" "available_zones" { @@ -34,6 +34,7 @@ resource "random_shuffle" "available_zones" { locals { // location location = var.regional ? var.region : var.zones[0] + region = var.region == null ? join("-", slice(split("-", var.zones[0]), 0, 2)) : var.region // for regional cluster - use var.zones if provided, use available otherwise, for zonal cluster use var.zones with first element extracted node_locations = var.regional ? coalescelist(compact(var.zones), sort(random_shuffle.available_zones.result)) : slice(var.zones, 1, length(var.zones)) // kuberentes version diff --git a/modules/beta-private-cluster/networks.tf b/modules/beta-private-cluster/networks.tf index 14ea500e03..2456654130 100644 --- a/modules/beta-private-cluster/networks.tf +++ b/modules/beta-private-cluster/networks.tf @@ -27,6 +27,6 @@ data "google_compute_subnetwork" "gke_subnetwork" { provider = google-beta name = var.subnetwork - region = var.region + region = local.region project = local.network_project_id } diff --git a/modules/beta-private-cluster/variables.tf b/modules/beta-private-cluster/variables.tf index 6aa50eafff..6d1fa393ce 100644 --- a/modules/beta-private-cluster/variables.tf +++ b/modules/beta-private-cluster/variables.tf @@ -40,7 +40,8 @@ variable "regional" { variable "region" { type = string - description = "The region to host the cluster in (required)" + description = "The region to host the cluster in (optional if zonal cluster / required if regional)" + default = null } variable "zones" { diff --git a/modules/beta-public-cluster/README.md b/modules/beta-public-cluster/README.md index 49ffddedc1..75a7e55188 100644 --- a/modules/beta-public-cluster/README.md +++ b/modules/beta-public-cluster/README.md @@ -179,7 +179,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | non\_masquerade\_cidrs | List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading. | list(string) | `` | no | | pod\_security\_policy\_config | enabled - Enable the PodSecurityPolicy controller for this cluster. If enabled, pods must be valid under a PodSecurityPolicy to be created. | list | `` | no | | project\_id | The project ID to host the cluster in (required) | string | n/a | yes | -| region | The region to host the cluster in (required) | string | n/a | yes | +| region | The region to host the cluster in (optional if zonal cluster / required if regional) | string | `"null"` | no | | regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | | remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | | service\_account | The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created. | string | `""` | no | diff --git a/modules/beta-public-cluster/main.tf b/modules/beta-public-cluster/main.tf index db5138e99f..4b1edff890 100644 --- a/modules/beta-public-cluster/main.tf +++ b/modules/beta-public-cluster/main.tf @@ -23,7 +23,7 @@ data "google_compute_zones" "available" { provider = google-beta project = var.project_id - region = var.region + region = local.region } resource "random_shuffle" "available_zones" { @@ -34,6 +34,7 @@ resource "random_shuffle" "available_zones" { locals { // location location = var.regional ? var.region : var.zones[0] + region = var.region == null ? join("-", slice(split("-", var.zones[0]), 0, 2)) : var.region // for regional cluster - use var.zones if provided, use available otherwise, for zonal cluster use var.zones with first element extracted node_locations = var.regional ? coalescelist(compact(var.zones), sort(random_shuffle.available_zones.result)) : slice(var.zones, 1, length(var.zones)) // kuberentes version diff --git a/modules/beta-public-cluster/networks.tf b/modules/beta-public-cluster/networks.tf index 14ea500e03..2456654130 100644 --- a/modules/beta-public-cluster/networks.tf +++ b/modules/beta-public-cluster/networks.tf @@ -27,6 +27,6 @@ data "google_compute_subnetwork" "gke_subnetwork" { provider = google-beta name = var.subnetwork - region = var.region + region = local.region project = local.network_project_id } diff --git a/modules/beta-public-cluster/variables.tf b/modules/beta-public-cluster/variables.tf index d8b68de69b..7873500f6e 100644 --- a/modules/beta-public-cluster/variables.tf +++ b/modules/beta-public-cluster/variables.tf @@ -40,7 +40,8 @@ variable "regional" { variable "region" { type = string - description = "The region to host the cluster in (required)" + description = "The region to host the cluster in (optional if zonal cluster / required if regional)" + default = null } variable "zones" { diff --git a/modules/private-cluster/README.md b/modules/private-cluster/README.md index d3044b770d..48ffa5b508 100644 --- a/modules/private-cluster/README.md +++ b/modules/private-cluster/README.md @@ -175,7 +175,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | node\_version | The Kubernetes version of the node pools. Defaults kubernetes_version (master) variable and can be overridden for individual node pools by setting the `version` key on them. Must be empyty or set the same as master at cluster creation. | string | `""` | no | | non\_masquerade\_cidrs | List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading. | list(string) | `` | no | | project\_id | The project ID to host the cluster in (required) | string | n/a | yes | -| region | The region to host the cluster in (required) | string | n/a | yes | +| region | The region to host the cluster in (optional if zonal cluster / required if regional) | string | `"null"` | no | | regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | | remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | | service\_account | The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created. | string | `""` | no | diff --git a/modules/private-cluster/main.tf b/modules/private-cluster/main.tf index 9c8dc629eb..85744dc006 100644 --- a/modules/private-cluster/main.tf +++ b/modules/private-cluster/main.tf @@ -23,7 +23,7 @@ data "google_compute_zones" "available" { provider = google-beta project = var.project_id - region = var.region + region = local.region } resource "random_shuffle" "available_zones" { @@ -34,6 +34,7 @@ resource "random_shuffle" "available_zones" { locals { // location location = var.regional ? var.region : var.zones[0] + region = var.region == null ? join("-", slice(split("-", var.zones[0]), 0, 2)) : var.region // for regional cluster - use var.zones if provided, use available otherwise, for zonal cluster use var.zones with first element extracted node_locations = var.regional ? coalescelist(compact(var.zones), sort(random_shuffle.available_zones.result)) : slice(var.zones, 1, length(var.zones)) // kuberentes version diff --git a/modules/private-cluster/networks.tf b/modules/private-cluster/networks.tf index 14ea500e03..2456654130 100644 --- a/modules/private-cluster/networks.tf +++ b/modules/private-cluster/networks.tf @@ -27,6 +27,6 @@ data "google_compute_subnetwork" "gke_subnetwork" { provider = google-beta name = var.subnetwork - region = var.region + region = local.region project = local.network_project_id } diff --git a/modules/private-cluster/variables.tf b/modules/private-cluster/variables.tf index 407787f143..65edafd095 100644 --- a/modules/private-cluster/variables.tf +++ b/modules/private-cluster/variables.tf @@ -40,7 +40,8 @@ variable "regional" { variable "region" { type = string - description = "The region to host the cluster in (required)" + description = "The region to host the cluster in (optional if zonal cluster / required if regional)" + default = null } variable "zones" { diff --git a/networks.tf b/networks.tf index a382073dc0..aae034eee5 100644 --- a/networks.tf +++ b/networks.tf @@ -27,6 +27,6 @@ data "google_compute_subnetwork" "gke_subnetwork" { provider = google name = var.subnetwork - region = var.region + region = local.region project = local.network_project_id } diff --git a/variables.tf b/variables.tf index d8c339b50e..f74396e29e 100644 --- a/variables.tf +++ b/variables.tf @@ -40,7 +40,8 @@ variable "regional" { variable "region" { type = string - description = "The region to host the cluster in (required)" + description = "The region to host the cluster in (optional if zonal cluster / required if regional)" + default = null } variable "zones" { From 9249a528c60731a627ea0682914665c236ed0a9a Mon Sep 17 00:00:00 2001 From: Kostas Evangelou Date: Wed, 16 Oct 2019 15:54:57 +0100 Subject: [PATCH 34/82] Make default metadata, labels and tags optional in beta private cluster --- modules/beta-private-cluster/cluster.tf | 20 ++++++-------------- 1 file changed, 6 insertions(+), 14 deletions(-) diff --git a/modules/beta-private-cluster/cluster.tf b/modules/beta-private-cluster/cluster.tf index c481c69a35..08332c4db5 100644 --- a/modules/beta-private-cluster/cluster.tf +++ b/modules/beta-private-cluster/cluster.tf @@ -243,22 +243,14 @@ resource "google_container_node_pool" "pools" { image_type = lookup(var.node_pools[count.index], "image_type", "COS") machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2") labels = merge( - { - "cluster_name" = var.name - }, - { - "node_pool" = var.node_pools[count.index]["name"] - }, + lookup(lookup(var.node_pools_labels, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(var.node_pools_labels, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index]["name"] } : {}, var.node_pools_labels["all"], var.node_pools_labels[var.node_pools[count.index]["name"]], ) metadata = merge( - { - "cluster_name" = var.name - }, - { - "node_pool" = var.node_pools[count.index]["name"] - }, + lookup(lookup(var.node_pools_metadata, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(var.node_pools_metadata, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index]["name"] } : {}, var.node_pools_metadata["all"], var.node_pools_metadata[var.node_pools[count.index]["name"]], { @@ -277,8 +269,8 @@ resource "google_container_node_pool" "pools" { } } tags = concat( - ["gke-${var.name}"], - ["gke-${var.name}-${var.node_pools[count.index]["name"]}"], + lookup(var.node_pools_tags, "default_values", [true, true])[0] ? ["gke-${var.name}"] : [], + lookup(var.node_pools_tags, "default_values", [true, true])[1] ? ["gke-${var.name}-${var.node_pools[count.index]["name"]}"] : [], var.node_pools_tags["all"], var.node_pools_tags[var.node_pools[count.index]["name"]], ) From dcb2b11ff4206110f54d79199c6fbf125edd0966 Mon Sep 17 00:00:00 2001 From: Kostas Evangelou Date: Wed, 16 Oct 2019 17:02:44 +0100 Subject: [PATCH 35/82] generate changes for all (sub)modules --- autogen/cluster.tf | 20 +++++------------ cluster.tf | 20 +++++------------ .../cluster.tf | 22 ++++++------------- modules/beta-public-cluster/cluster.tf | 20 +++++------------ .../private-cluster-update-variant/cluster.tf | 22 ++++++------------- modules/private-cluster/cluster.tf | 20 +++++------------ 6 files changed, 38 insertions(+), 86 deletions(-) diff --git a/autogen/cluster.tf b/autogen/cluster.tf index 296b2818df..5de8ad6876 100644 --- a/autogen/cluster.tf +++ b/autogen/cluster.tf @@ -341,22 +341,14 @@ resource "google_container_node_pool" "pools" { image_type = lookup(var.node_pools[count.index], "image_type", "COS") machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2") labels = merge( - { - "cluster_name" = var.name - }, - { - "node_pool" = var.node_pools[count.index]["name"] - }, + lookup(lookup(var.node_pools_labels, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(var.node_pools_labels, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index]["name"] } : {}, var.node_pools_labels["all"], var.node_pools_labels[var.node_pools[count.index]["name"]], ) metadata = merge( - { - "cluster_name" = var.name - }, - { - "node_pool" = var.node_pools[count.index]["name"] - }, + lookup(lookup(var.node_pools_metadata, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(var.node_pools_metadata, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index]["name"] } : {}, var.node_pools_metadata["all"], var.node_pools_metadata[var.node_pools[count.index]["name"]], { @@ -377,8 +369,8 @@ resource "google_container_node_pool" "pools" { } {% endif %} tags = concat( - ["gke-${var.name}"], - ["gke-${var.name}-${var.node_pools[count.index]["name"]}"], + lookup(var.node_pools_tags, "default_values", [true, true])[0] ? ["gke-${var.name}"] : [], + lookup(var.node_pools_tags, "default_values", [true, true])[1] ? ["gke-${var.name}-${var.node_pools[count.index]["name"]}"] : [], var.node_pools_tags["all"], var.node_pools_tags[var.node_pools[count.index]["name"]], ) diff --git a/cluster.tf b/cluster.tf index ffdb27b0fc..ba117abd3d 100644 --- a/cluster.tf +++ b/cluster.tf @@ -161,22 +161,14 @@ resource "google_container_node_pool" "pools" { image_type = lookup(var.node_pools[count.index], "image_type", "COS") machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2") labels = merge( - { - "cluster_name" = var.name - }, - { - "node_pool" = var.node_pools[count.index]["name"] - }, + lookup(lookup(var.node_pools_labels, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(var.node_pools_labels, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index]["name"] } : {}, var.node_pools_labels["all"], var.node_pools_labels[var.node_pools[count.index]["name"]], ) metadata = merge( - { - "cluster_name" = var.name - }, - { - "node_pool" = var.node_pools[count.index]["name"] - }, + lookup(lookup(var.node_pools_metadata, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(var.node_pools_metadata, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index]["name"] } : {}, var.node_pools_metadata["all"], var.node_pools_metadata[var.node_pools[count.index]["name"]], { @@ -184,8 +176,8 @@ resource "google_container_node_pool" "pools" { }, ) tags = concat( - ["gke-${var.name}"], - ["gke-${var.name}-${var.node_pools[count.index]["name"]}"], + lookup(var.node_pools_tags, "default_values", [true, true])[0] ? ["gke-${var.name}"] : [], + lookup(var.node_pools_tags, "default_values", [true, true])[1] ? ["gke-${var.name}-${var.node_pools[count.index]["name"]}"] : [], var.node_pools_tags["all"], var.node_pools_tags[var.node_pools[count.index]["name"]], ) diff --git a/modules/beta-private-cluster-update-variant/cluster.tf b/modules/beta-private-cluster-update-variant/cluster.tf index cf1def945d..6b02e72750 100644 --- a/modules/beta-private-cluster-update-variant/cluster.tf +++ b/modules/beta-private-cluster-update-variant/cluster.tf @@ -315,22 +315,14 @@ resource "google_container_node_pool" "pools" { image_type = lookup(var.node_pools[count.index], "image_type", "COS") machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2") labels = merge( - { - "cluster_name" = var.name - }, - { - "node_pool" = var.node_pools[count.index]["name"] - }, + lookup(lookup(var.node_pools_labels, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(var.node_pools_labels, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index]["name"] } : {}, var.node_pools_labels["all"], var.node_pools_labels[var.node_pools[count.index]["name"]], ) metadata = merge( - { - "cluster_name" = var.name - }, - { - "node_pool" = var.node_pools[count.index]["name"] - }, + lookup(lookup(var.node_pools_metadata, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(var.node_pools_metadata, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index]["name"] } : {}, var.node_pools_metadata["all"], var.node_pools_metadata[var.node_pools[count.index]["name"]], { @@ -349,8 +341,8 @@ resource "google_container_node_pool" "pools" { } } tags = concat( - ["gke-${var.name}"], - ["gke-${var.name}-${var.node_pools[count.index]["name"]}"], + lookup(var.node_pools_tags, "default_values", [true, true])[0] ? ["gke-${var.name}"] : [], + lookup(var.node_pools_tags, "default_values", [true, true])[1] ? ["gke-${var.name}-${var.node_pools[count.index]["name"]}"] : [], var.node_pools_tags["all"], var.node_pools_tags[var.node_pools[count.index]["name"]], ) @@ -389,7 +381,7 @@ resource "google_container_node_pool" "pools" { } lifecycle { - ignore_changes = [initial_node_count] + ignore_changes = [initial_node_count] create_before_destroy = true } diff --git a/modules/beta-public-cluster/cluster.tf b/modules/beta-public-cluster/cluster.tf index a264e932b9..0ff13c98b2 100644 --- a/modules/beta-public-cluster/cluster.tf +++ b/modules/beta-public-cluster/cluster.tf @@ -238,22 +238,14 @@ resource "google_container_node_pool" "pools" { image_type = lookup(var.node_pools[count.index], "image_type", "COS") machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2") labels = merge( - { - "cluster_name" = var.name - }, - { - "node_pool" = var.node_pools[count.index]["name"] - }, + lookup(lookup(var.node_pools_labels, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(var.node_pools_labels, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index]["name"] } : {}, var.node_pools_labels["all"], var.node_pools_labels[var.node_pools[count.index]["name"]], ) metadata = merge( - { - "cluster_name" = var.name - }, - { - "node_pool" = var.node_pools[count.index]["name"] - }, + lookup(lookup(var.node_pools_metadata, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(var.node_pools_metadata, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index]["name"] } : {}, var.node_pools_metadata["all"], var.node_pools_metadata[var.node_pools[count.index]["name"]], { @@ -272,8 +264,8 @@ resource "google_container_node_pool" "pools" { } } tags = concat( - ["gke-${var.name}"], - ["gke-${var.name}-${var.node_pools[count.index]["name"]}"], + lookup(var.node_pools_tags, "default_values", [true, true])[0] ? ["gke-${var.name}"] : [], + lookup(var.node_pools_tags, "default_values", [true, true])[1] ? ["gke-${var.name}-${var.node_pools[count.index]["name"]}"] : [], var.node_pools_tags["all"], var.node_pools_tags[var.node_pools[count.index]["name"]], ) diff --git a/modules/private-cluster-update-variant/cluster.tf b/modules/private-cluster-update-variant/cluster.tf index e8db91a77a..19929ed7de 100644 --- a/modules/private-cluster-update-variant/cluster.tf +++ b/modules/private-cluster-update-variant/cluster.tf @@ -238,22 +238,14 @@ resource "google_container_node_pool" "pools" { image_type = lookup(var.node_pools[count.index], "image_type", "COS") machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2") labels = merge( - { - "cluster_name" = var.name - }, - { - "node_pool" = var.node_pools[count.index]["name"] - }, + lookup(lookup(var.node_pools_labels, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(var.node_pools_labels, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index]["name"] } : {}, var.node_pools_labels["all"], var.node_pools_labels[var.node_pools[count.index]["name"]], ) metadata = merge( - { - "cluster_name" = var.name - }, - { - "node_pool" = var.node_pools[count.index]["name"] - }, + lookup(lookup(var.node_pools_metadata, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(var.node_pools_metadata, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index]["name"] } : {}, var.node_pools_metadata["all"], var.node_pools_metadata[var.node_pools[count.index]["name"]], { @@ -261,8 +253,8 @@ resource "google_container_node_pool" "pools" { }, ) tags = concat( - ["gke-${var.name}"], - ["gke-${var.name}-${var.node_pools[count.index]["name"]}"], + lookup(var.node_pools_tags, "default_values", [true, true])[0] ? ["gke-${var.name}"] : [], + lookup(var.node_pools_tags, "default_values", [true, true])[1] ? ["gke-${var.name}-${var.node_pools[count.index]["name"]}"] : [], var.node_pools_tags["all"], var.node_pools_tags[var.node_pools[count.index]["name"]], ) @@ -293,7 +285,7 @@ resource "google_container_node_pool" "pools" { } lifecycle { - ignore_changes = [initial_node_count] + ignore_changes = [initial_node_count] create_before_destroy = true } diff --git a/modules/private-cluster/cluster.tf b/modules/private-cluster/cluster.tf index 412e8295ed..0a9df965ef 100644 --- a/modules/private-cluster/cluster.tf +++ b/modules/private-cluster/cluster.tf @@ -166,22 +166,14 @@ resource "google_container_node_pool" "pools" { image_type = lookup(var.node_pools[count.index], "image_type", "COS") machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2") labels = merge( - { - "cluster_name" = var.name - }, - { - "node_pool" = var.node_pools[count.index]["name"] - }, + lookup(lookup(var.node_pools_labels, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(var.node_pools_labels, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index]["name"] } : {}, var.node_pools_labels["all"], var.node_pools_labels[var.node_pools[count.index]["name"]], ) metadata = merge( - { - "cluster_name" = var.name - }, - { - "node_pool" = var.node_pools[count.index]["name"] - }, + lookup(lookup(var.node_pools_metadata, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(var.node_pools_metadata, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index]["name"] } : {}, var.node_pools_metadata["all"], var.node_pools_metadata[var.node_pools[count.index]["name"]], { @@ -189,8 +181,8 @@ resource "google_container_node_pool" "pools" { }, ) tags = concat( - ["gke-${var.name}"], - ["gke-${var.name}-${var.node_pools[count.index]["name"]}"], + lookup(var.node_pools_tags, "default_values", [true, true])[0] ? ["gke-${var.name}"] : [], + lookup(var.node_pools_tags, "default_values", [true, true])[1] ? ["gke-${var.name}-${var.node_pools[count.index]["name"]}"] : [], var.node_pools_tags["all"], var.node_pools_tags[var.node_pools[count.index]["name"]], ) From 970671f7d42705147a07e78375dced78332bdaab Mon Sep 17 00:00:00 2001 From: Kostas Evangelou Date: Wed, 16 Oct 2019 17:33:00 +0100 Subject: [PATCH 36/82] format tf files --- modules/beta-private-cluster-update-variant/cluster.tf | 2 +- modules/private-cluster-update-variant/cluster.tf | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/beta-private-cluster-update-variant/cluster.tf b/modules/beta-private-cluster-update-variant/cluster.tf index 6b02e72750..5b3b769c9e 100644 --- a/modules/beta-private-cluster-update-variant/cluster.tf +++ b/modules/beta-private-cluster-update-variant/cluster.tf @@ -381,7 +381,7 @@ resource "google_container_node_pool" "pools" { } lifecycle { - ignore_changes = [initial_node_count] + ignore_changes = [initial_node_count] create_before_destroy = true } diff --git a/modules/private-cluster-update-variant/cluster.tf b/modules/private-cluster-update-variant/cluster.tf index 19929ed7de..c6d76de2e5 100644 --- a/modules/private-cluster-update-variant/cluster.tf +++ b/modules/private-cluster-update-variant/cluster.tf @@ -285,7 +285,7 @@ resource "google_container_node_pool" "pools" { } lifecycle { - ignore_changes = [initial_node_count] + ignore_changes = [initial_node_count] create_before_destroy = true } From bb420dfe64e4998b225fdad9b797e727ea259db4 Mon Sep 17 00:00:00 2001 From: omazin Date: Thu, 17 Oct 2019 15:15:38 +0300 Subject: [PATCH 37/82] [wait-for-cluster.sh] Set CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE. --- scripts/wait-for-cluster.sh | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/scripts/wait-for-cluster.sh b/scripts/wait-for-cluster.sh index 6ff3253d58..37f0176ec7 100755 --- a/scripts/wait-for-cluster.sh +++ b/scripts/wait-for-cluster.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,6 +15,10 @@ set -e +if [ -n "${GOOGLE_APPLICATION_CREDENTIALS}" ]; then + CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE=${GOOGLE_APPLICATION_CREDENTIALS} +fi + PROJECT=$1 CLUSTER_NAME=$2 gcloud_command="gcloud container clusters list --project=$PROJECT --format=json" From 50521cfdd65985274fda85a6188b8d89a5754cea Mon Sep 17 00:00:00 2001 From: bharathkkb Date: Thu, 17 Oct 2019 21:12:33 -0500 Subject: [PATCH 38/82] added simple example with networking --- .../simple_regional_with_networking/README.md | 47 +++++++++++++ .../simple_regional_with_networking/main.tf | 68 +++++++++++++++++++ .../outputs.tf | 48 +++++++++++++ .../variables.tf | 49 +++++++++++++ 4 files changed, 212 insertions(+) create mode 100644 examples/simple_regional_with_networking/README.md create mode 100644 examples/simple_regional_with_networking/main.tf create mode 100644 examples/simple_regional_with_networking/outputs.tf create mode 100644 examples/simple_regional_with_networking/variables.tf diff --git a/examples/simple_regional_with_networking/README.md b/examples/simple_regional_with_networking/README.md new file mode 100644 index 0000000000..a58378f9ae --- /dev/null +++ b/examples/simple_regional_with_networking/README.md @@ -0,0 +1,47 @@ +# Simple Regional Cluster with Networking + +This example illustrates how to create a VPC and a simple cluster. + + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| cluster\_name\_suffix | A suffix to append to the default cluster name | string | `""` | no | +| compute\_engine\_service\_account | Service account to associate to the nodes in the cluster | string | n/a | yes | +| ip\_range\_pods | The secondary ip range to use for pods | string | n/a | yes | +| ip\_range\_services | The secondary ip range to use for pods | string | n/a | yes | +| network\_name | The VPC network created to host the cluster in | string | n/a | yes | +| project\_id | The project ID to host the cluster in | string | n/a | yes | +| region | The region to host the cluster in | string | n/a | yes | +| subnetwork | The subnetwork created to host the cluster in | string | n/a | yes | + +## Outputs + +| Name | Description | +|------|-------------| +| ca\_certificate | | +| client\_token | | +| cluster\_name | Cluster name | +| ip\_range\_pods | The secondary IP range used for pods | +| ip\_range\_services | The secondary IP range used for services | +| kubernetes\_endpoint | | +| location | | +| master\_kubernetes\_version | The master Kubernetes version | +| network | | +| network\_name | The name of the VPC being created | +| project\_id | | +| region | | +| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | +| subnet\_names | The name of the subnet being created | +| subnet\_secondary\_ranges | The secondary ranges associated with the subnet | +| subnetwork | | +| zones | List of zones in which the cluster resides | + + + +To provision this example, run the following from within this directory: +- `terraform init` to get the plugins +- `terraform plan` to see the infrastructure plan +- `terraform apply` to apply the infrastructure build +- `terraform destroy` to destroy the built infrastructure diff --git a/examples/simple_regional_with_networking/main.tf b/examples/simple_regional_with_networking/main.tf new file mode 100644 index 0000000000..f780395645 --- /dev/null +++ b/examples/simple_regional_with_networking/main.tf @@ -0,0 +1,68 @@ +/** + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +locals { + cluster_type = "simple-regional" +} + +provider "google" { + version = "~> 2.12.0" + region = var.region +} + +module "gcp-network" { + source = "terraform-google-modules/network/google" + project_id = var.project_id + network_name = var.network_name + + subnets = [ + { + subnet_name = var.subnetwork + subnet_ip = "10.0.0.0/17" + subnet_region = var.region + }, + ] + + secondary_ranges = { + "${var.subnetwork}" = [ + { + range_name = var.ip_range_pods + ip_cidr_range = "192.168.0.0/18" + }, + { + range_name = var.ip_range_services + ip_cidr_range = "192.168.64.0/18" + }, + ] + } +} + +module "gke" { + source = "terraform-google-modules/kubernetes-engine/google" + project_id = var.project_id + name = "${local.cluster_type}-cluster${var.cluster_name_suffix}" + regional = true + region = var.region + network = var.network_name + subnetwork = var.subnetwork + ip_range_pods = var.ip_range_pods + ip_range_services = var.ip_range_services + create_service_account = false + service_account = var.compute_engine_service_account +} + +data "google_client_config" "default" { +} diff --git a/examples/simple_regional_with_networking/outputs.tf b/examples/simple_regional_with_networking/outputs.tf new file mode 100644 index 0000000000..9abceb572a --- /dev/null +++ b/examples/simple_regional_with_networking/outputs.tf @@ -0,0 +1,48 @@ +/** + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +output "kubernetes_endpoint" { + sensitive = true + value = module.gke.endpoint +} + +output "client_token" { + sensitive = true + value = base64encode(data.google_client_config.default.access_token) +} + +output "ca_certificate" { + value = module.gke.ca_certificate +} + +output "service_account" { + description = "The service account to default running nodes as if not overridden in `node_pools`." + value = module.gke.service_account +} + +output "network_name" { + description = "The name of the VPC being created" + value = module.gcp-network.network_name +} +output "subnet_names" { + description = "The name of the subnet being created" + value = module.gcp-network.subnets_names +} +output "subnet_secondary_ranges" { + description = "The secondary ranges associated with the subnet" + value = flatten(module.gcp-network.subnets_secondary_ranges) +} + diff --git a/examples/simple_regional_with_networking/variables.tf b/examples/simple_regional_with_networking/variables.tf new file mode 100644 index 0000000000..d78a9fbdb7 --- /dev/null +++ b/examples/simple_regional_with_networking/variables.tf @@ -0,0 +1,49 @@ +/** + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +variable "project_id" { + description = "The project ID to host the cluster in" +} + +variable "cluster_name_suffix" { + description = "A suffix to append to the default cluster name" + default = "" +} + +variable "region" { + description = "The region to host the cluster in" +} + +variable "network_name" { + description = "The VPC network created to host the cluster in" +} + +variable "subnetwork" { + description = "The subnetwork created to host the cluster in" +} + +variable "ip_range_pods" { + description = "The secondary ip range to use for pods" +} + +variable "ip_range_services" { + description = "The secondary ip range to use for pods" +} + +variable "compute_engine_service_account" { + description = "Service account to associate to the nodes in the cluster" +} + From 1668b646b7993745cac49b62997cccec70a203c0 Mon Sep 17 00:00:00 2001 From: omazin Date: Fri, 18 Oct 2019 08:29:57 +0300 Subject: [PATCH 39/82] [wait-for-cluster.sh] Update the script in all submodules. Follow up #284. --- .../scripts/wait-for-cluster.sh | 6 +++++- modules/beta-private-cluster/scripts/wait-for-cluster.sh | 6 +++++- modules/beta-public-cluster/scripts/wait-for-cluster.sh | 6 +++++- .../scripts/wait-for-cluster.sh | 6 +++++- modules/private-cluster/scripts/wait-for-cluster.sh | 6 +++++- 5 files changed, 25 insertions(+), 5 deletions(-) diff --git a/modules/beta-private-cluster-update-variant/scripts/wait-for-cluster.sh b/modules/beta-private-cluster-update-variant/scripts/wait-for-cluster.sh index 6ff3253d58..37f0176ec7 100755 --- a/modules/beta-private-cluster-update-variant/scripts/wait-for-cluster.sh +++ b/modules/beta-private-cluster-update-variant/scripts/wait-for-cluster.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,6 +15,10 @@ set -e +if [ -n "${GOOGLE_APPLICATION_CREDENTIALS}" ]; then + CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE=${GOOGLE_APPLICATION_CREDENTIALS} +fi + PROJECT=$1 CLUSTER_NAME=$2 gcloud_command="gcloud container clusters list --project=$PROJECT --format=json" diff --git a/modules/beta-private-cluster/scripts/wait-for-cluster.sh b/modules/beta-private-cluster/scripts/wait-for-cluster.sh index 6ff3253d58..37f0176ec7 100755 --- a/modules/beta-private-cluster/scripts/wait-for-cluster.sh +++ b/modules/beta-private-cluster/scripts/wait-for-cluster.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,6 +15,10 @@ set -e +if [ -n "${GOOGLE_APPLICATION_CREDENTIALS}" ]; then + CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE=${GOOGLE_APPLICATION_CREDENTIALS} +fi + PROJECT=$1 CLUSTER_NAME=$2 gcloud_command="gcloud container clusters list --project=$PROJECT --format=json" diff --git a/modules/beta-public-cluster/scripts/wait-for-cluster.sh b/modules/beta-public-cluster/scripts/wait-for-cluster.sh index 6ff3253d58..37f0176ec7 100755 --- a/modules/beta-public-cluster/scripts/wait-for-cluster.sh +++ b/modules/beta-public-cluster/scripts/wait-for-cluster.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,6 +15,10 @@ set -e +if [ -n "${GOOGLE_APPLICATION_CREDENTIALS}" ]; then + CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE=${GOOGLE_APPLICATION_CREDENTIALS} +fi + PROJECT=$1 CLUSTER_NAME=$2 gcloud_command="gcloud container clusters list --project=$PROJECT --format=json" diff --git a/modules/private-cluster-update-variant/scripts/wait-for-cluster.sh b/modules/private-cluster-update-variant/scripts/wait-for-cluster.sh index 6ff3253d58..37f0176ec7 100755 --- a/modules/private-cluster-update-variant/scripts/wait-for-cluster.sh +++ b/modules/private-cluster-update-variant/scripts/wait-for-cluster.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,6 +15,10 @@ set -e +if [ -n "${GOOGLE_APPLICATION_CREDENTIALS}" ]; then + CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE=${GOOGLE_APPLICATION_CREDENTIALS} +fi + PROJECT=$1 CLUSTER_NAME=$2 gcloud_command="gcloud container clusters list --project=$PROJECT --format=json" diff --git a/modules/private-cluster/scripts/wait-for-cluster.sh b/modules/private-cluster/scripts/wait-for-cluster.sh index 6ff3253d58..37f0176ec7 100755 --- a/modules/private-cluster/scripts/wait-for-cluster.sh +++ b/modules/private-cluster/scripts/wait-for-cluster.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,6 +15,10 @@ set -e +if [ -n "${GOOGLE_APPLICATION_CREDENTIALS}" ]; then + CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE=${GOOGLE_APPLICATION_CREDENTIALS} +fi + PROJECT=$1 CLUSTER_NAME=$2 gcloud_command="gcloud container clusters list --project=$PROJECT --format=json" From af8f1b858ed1e7e21cf3fe32efef219cb6ab81ab Mon Sep 17 00:00:00 2001 From: omazin Date: Fri, 18 Oct 2019 12:50:52 +0300 Subject: [PATCH 40/82] [wait-for-cluster.sh] Update the script in autogen folder. --- autogen/scripts/wait-for-cluster.sh | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/autogen/scripts/wait-for-cluster.sh b/autogen/scripts/wait-for-cluster.sh index 6ff3253d58..37f0176ec7 100755 --- a/autogen/scripts/wait-for-cluster.sh +++ b/autogen/scripts/wait-for-cluster.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,6 +15,10 @@ set -e +if [ -n "${GOOGLE_APPLICATION_CREDENTIALS}" ]; then + CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE=${GOOGLE_APPLICATION_CREDENTIALS} +fi + PROJECT=$1 CLUSTER_NAME=$2 gcloud_command="gcloud container clusters list --project=$PROJECT --format=json" From 9c6627362467fa4dffd5a29fbafca885f4300d53 Mon Sep 17 00:00:00 2001 From: pp Date: Tue, 8 Oct 2019 20:03:03 +0300 Subject: [PATCH 41/82] Added variable `skip_provisioners` to skip 'local-exec' * Fix #258 * Added test `simple_regional_skip_local_exec` * Remove old upgrading guide from README's --- README.md | 17 +++++++++++++++++ autogen/cluster.tf | 1 + autogen/dns.tf | 2 +- autogen/variables.tf | 5 +++++ cluster.tf | 1 + dns.tf | 2 +- examples/simple_regional/README.md | 1 + examples/simple_regional/main.tf | 1 + examples/simple_regional/variables.tf | 5 +++++ modules/beta-private-cluster/README.md | 1 + modules/beta-private-cluster/cluster.tf | 1 + modules/beta-private-cluster/dns.tf | 2 +- modules/beta-private-cluster/variables.tf | 5 +++++ modules/beta-public-cluster/README.md | 1 + modules/beta-public-cluster/cluster.tf | 1 + modules/beta-public-cluster/dns.tf | 2 +- modules/beta-public-cluster/variables.tf | 5 +++++ modules/private-cluster/README.md | 1 + modules/private-cluster/cluster.tf | 1 + modules/private-cluster/dns.tf | 2 +- modules/private-cluster/variables.tf | 5 +++++ test/fixtures/simple_regional/example.tf | 1 + variables.tf | 5 +++++ 23 files changed, 63 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 50e2afd63b..ac4ff4174d 100644 --- a/README.md +++ b/README.md @@ -108,6 +108,22 @@ Then perform the following commands on the root folder: - `terraform apply` to apply the infrastructure build - `terraform destroy` to destroy the built infrastructure +## Upgrade to v3.0.0 + +v3.0.0 is a breaking release. Refer to the +[Upgrading to v3.0 guide][upgrading-to-v3.0] for details. + +## Upgrade to v2.0.0 + +v2.0.0 is a breaking release. Refer to the +[Upgrading to v2.0 guide][upgrading-to-v2.0] for details. + +## Upgrade to v1.0.0 + +Version 1.0.0 of this module introduces a breaking change: adding the `disable-legacy-endpoints` metadata field to all node pools. This metadata is required by GKE and [determines whether the `/0.1/` and `/v1beta1/` paths are available in the nodes' metadata server](https://cloud.google.com/kubernetes-engine/docs/how-to/protecting-cluster-metadata#disable-legacy-apis). If your applications do not require access to the node's metadata server, you can leave the default value of `true` provided by the module. If your applications require access to the metadata server, be sure to read the linked documentation to see if you need to set the value for this field to `false` to allow your applications access to the above metadata server paths. + +In either case, upgrading to module version `v1.0.0` will trigger a recreation of all node pools in the cluster. + ## Inputs @@ -153,6 +169,7 @@ Then perform the following commands on the root folder: | regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | | remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | | service\_account | The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created. | string | `""` | no | +| skip\_provisioners | Flag to skip all local-exec provisioners. It breaks down `stub_domains` and `upstream_nameservers` variables functionality. | bool | `"false"` | no | | stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | map(list(string)) | `` | no | | subnetwork | The subnetwork to host the cluster in (required) | string | n/a | yes | | upstream\_nameservers | If specified, the values replace the nameservers taken by default from the node’s /etc/resolv.conf | list | `` | no | diff --git a/autogen/cluster.tf b/autogen/cluster.tf index 4e5fd74d55..95ca6a31b6 100644 --- a/autogen/cluster.tf +++ b/autogen/cluster.tf @@ -352,6 +352,7 @@ resource "google_container_node_pool" "pools" { } resource "null_resource" "wait_for_cluster" { + count = var.skip_provisioners ? 1 : 0 provisioner "local-exec" { command = "${path.module}/scripts/wait-for-cluster.sh ${var.project_id} ${var.name}" diff --git a/autogen/dns.tf b/autogen/dns.tf index d9d4a35395..731e010b0d 100644 --- a/autogen/dns.tf +++ b/autogen/dns.tf @@ -20,7 +20,7 @@ Delete default kube-dns configmap *****************************************/ resource "null_resource" "delete_default_kube_dns_configmap" { - count = local.custom_kube_dns_config || local.upstream_nameservers_config ? 1 : 0 + count = (local.custom_kube_dns_config || local.upstream_nameservers_config) || var.skip_provisioners ? 1 : 0 provisioner "local-exec" { command = "${path.module}/scripts/kubectl_wrapper.sh https://${local.cluster_endpoint} ${data.google_client_config.default.access_token} ${local.cluster_ca_certificate} ${path.module}/scripts/delete-default-resource.sh kube-system configmap kube-dns" diff --git a/autogen/variables.tf b/autogen/variables.tf index 0fedacb2af..b3137c6dcb 100644 --- a/autogen/variables.tf +++ b/autogen/variables.tf @@ -304,6 +304,11 @@ variable "cluster_resource_labels" { default = {} } +variable "skip_provisioners" { + type = bool + description = "Flag to skip all local-exec provisioners. It breaks down `stub_domains` and `upstream_nameservers` variables functionality." + default = false +} {% if private_cluster %} variable "deploy_using_private_endpoint" { diff --git a/cluster.tf b/cluster.tf index ffdb27b0fc..40b5559703 100644 --- a/cluster.tf +++ b/cluster.tf @@ -227,6 +227,7 @@ resource "google_container_node_pool" "pools" { } resource "null_resource" "wait_for_cluster" { + count = var.skip_provisioners ? 1 : 0 provisioner "local-exec" { command = "${path.module}/scripts/wait-for-cluster.sh ${var.project_id} ${var.name}" diff --git a/dns.tf b/dns.tf index b240a23e65..f490c15504 100644 --- a/dns.tf +++ b/dns.tf @@ -20,7 +20,7 @@ Delete default kube-dns configmap *****************************************/ resource "null_resource" "delete_default_kube_dns_configmap" { - count = local.custom_kube_dns_config || local.upstream_nameservers_config ? 1 : 0 + count = (local.custom_kube_dns_config || local.upstream_nameservers_config) || var.skip_provisioners ? 1 : 0 provisioner "local-exec" { command = "${path.module}/scripts/kubectl_wrapper.sh https://${local.cluster_endpoint} ${data.google_client_config.default.access_token} ${local.cluster_ca_certificate} ${path.module}/scripts/delete-default-resource.sh kube-system configmap kube-dns" diff --git a/examples/simple_regional/README.md b/examples/simple_regional/README.md index fb209e47b5..1f0a187b6b 100644 --- a/examples/simple_regional/README.md +++ b/examples/simple_regional/README.md @@ -14,6 +14,7 @@ This example illustrates how to create a simple cluster. | network | The VPC network to host the cluster in | string | n/a | yes | | project\_id | The project ID to host the cluster in | string | n/a | yes | | region | The region to host the cluster in | string | n/a | yes | +| skip\_provisioners | Flag to skip local-exec provisioners | bool | `"false"` | no | | subnetwork | The subnetwork to host the cluster in | string | n/a | yes | ## Outputs diff --git a/examples/simple_regional/main.tf b/examples/simple_regional/main.tf index 4662435fbd..353ae91906 100644 --- a/examples/simple_regional/main.tf +++ b/examples/simple_regional/main.tf @@ -35,6 +35,7 @@ module "gke" { ip_range_services = var.ip_range_services create_service_account = false service_account = var.compute_engine_service_account + skip_provisioners = var.skip_provisioners } data "google_client_config" "default" { diff --git a/examples/simple_regional/variables.tf b/examples/simple_regional/variables.tf index 6121eab9ea..e7405d9e21 100644 --- a/examples/simple_regional/variables.tf +++ b/examples/simple_regional/variables.tf @@ -47,3 +47,8 @@ variable "compute_engine_service_account" { description = "Service account to associate to the nodes in the cluster" } +variable "skip_provisioners" { + type = bool + description = "Flag to skip local-exec provisioners" + default = false +} diff --git a/modules/beta-private-cluster/README.md b/modules/beta-private-cluster/README.md index 988d48ead8..242b46a114 100644 --- a/modules/beta-private-cluster/README.md +++ b/modules/beta-private-cluster/README.md @@ -194,6 +194,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | resource\_usage\_export\_dataset\_id | The dataset id for which network egress metering for this cluster will be enabled. If enabled, a daemonset will be created in the cluster to meter network egress traffic. | string | `""` | no | | sandbox\_enabled | (Beta) Enable GKE Sandbox (Do not forget to set `image_type` = `COS_CONTAINERD` and `node_version` = `1.12.7-gke.17` or later to use it). | bool | `"false"` | no | | service\_account | The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created. | string | `""` | no | +| skip\_provisioners | Flag to skip all local-exec provisioners. It breaks down `stub_domains` and `upstream_nameservers` variables functionality. | bool | `"false"` | no | | stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | map(list(string)) | `` | no | | subnetwork | The subnetwork to host the cluster in (required) | string | n/a | yes | | upstream\_nameservers | If specified, the values replace the nameservers taken by default from the node’s /etc/resolv.conf | list | `` | no | diff --git a/modules/beta-private-cluster/cluster.tf b/modules/beta-private-cluster/cluster.tf index c481c69a35..bb19940d1f 100644 --- a/modules/beta-private-cluster/cluster.tf +++ b/modules/beta-private-cluster/cluster.tf @@ -328,6 +328,7 @@ resource "google_container_node_pool" "pools" { } resource "null_resource" "wait_for_cluster" { + count = var.skip_provisioners ? 1 : 0 provisioner "local-exec" { command = "${path.module}/scripts/wait-for-cluster.sh ${var.project_id} ${var.name}" diff --git a/modules/beta-private-cluster/dns.tf b/modules/beta-private-cluster/dns.tf index b240a23e65..f490c15504 100644 --- a/modules/beta-private-cluster/dns.tf +++ b/modules/beta-private-cluster/dns.tf @@ -20,7 +20,7 @@ Delete default kube-dns configmap *****************************************/ resource "null_resource" "delete_default_kube_dns_configmap" { - count = local.custom_kube_dns_config || local.upstream_nameservers_config ? 1 : 0 + count = (local.custom_kube_dns_config || local.upstream_nameservers_config) || var.skip_provisioners ? 1 : 0 provisioner "local-exec" { command = "${path.module}/scripts/kubectl_wrapper.sh https://${local.cluster_endpoint} ${data.google_client_config.default.access_token} ${local.cluster_ca_certificate} ${path.module}/scripts/delete-default-resource.sh kube-system configmap kube-dns" diff --git a/modules/beta-private-cluster/variables.tf b/modules/beta-private-cluster/variables.tf index 9a869a830f..b6cd988fa8 100644 --- a/modules/beta-private-cluster/variables.tf +++ b/modules/beta-private-cluster/variables.tf @@ -302,6 +302,11 @@ variable "cluster_resource_labels" { default = {} } +variable "skip_provisioners" { + type = bool + description = "Flag to skip all local-exec provisioners. It breaks down `stub_domains` and `upstream_nameservers` variables functionality." + default = false +} variable "deploy_using_private_endpoint" { type = bool diff --git a/modules/beta-public-cluster/README.md b/modules/beta-public-cluster/README.md index 7d59e927bf..1bd0d313d2 100644 --- a/modules/beta-public-cluster/README.md +++ b/modules/beta-public-cluster/README.md @@ -185,6 +185,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | resource\_usage\_export\_dataset\_id | The dataset id for which network egress metering for this cluster will be enabled. If enabled, a daemonset will be created in the cluster to meter network egress traffic. | string | `""` | no | | sandbox\_enabled | (Beta) Enable GKE Sandbox (Do not forget to set `image_type` = `COS_CONTAINERD` and `node_version` = `1.12.7-gke.17` or later to use it). | bool | `"false"` | no | | service\_account | The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created. | string | `""` | no | +| skip\_provisioners | Flag to skip all local-exec provisioners. It breaks down `stub_domains` and `upstream_nameservers` variables functionality. | bool | `"false"` | no | | stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | map(list(string)) | `` | no | | subnetwork | The subnetwork to host the cluster in (required) | string | n/a | yes | | upstream\_nameservers | If specified, the values replace the nameservers taken by default from the node’s /etc/resolv.conf | list | `` | no | diff --git a/modules/beta-public-cluster/cluster.tf b/modules/beta-public-cluster/cluster.tf index a264e932b9..e2e46ac862 100644 --- a/modules/beta-public-cluster/cluster.tf +++ b/modules/beta-public-cluster/cluster.tf @@ -323,6 +323,7 @@ resource "google_container_node_pool" "pools" { } resource "null_resource" "wait_for_cluster" { + count = var.skip_provisioners ? 1 : 0 provisioner "local-exec" { command = "${path.module}/scripts/wait-for-cluster.sh ${var.project_id} ${var.name}" diff --git a/modules/beta-public-cluster/dns.tf b/modules/beta-public-cluster/dns.tf index b240a23e65..f490c15504 100644 --- a/modules/beta-public-cluster/dns.tf +++ b/modules/beta-public-cluster/dns.tf @@ -20,7 +20,7 @@ Delete default kube-dns configmap *****************************************/ resource "null_resource" "delete_default_kube_dns_configmap" { - count = local.custom_kube_dns_config || local.upstream_nameservers_config ? 1 : 0 + count = (local.custom_kube_dns_config || local.upstream_nameservers_config) || var.skip_provisioners ? 1 : 0 provisioner "local-exec" { command = "${path.module}/scripts/kubectl_wrapper.sh https://${local.cluster_endpoint} ${data.google_client_config.default.access_token} ${local.cluster_ca_certificate} ${path.module}/scripts/delete-default-resource.sh kube-system configmap kube-dns" diff --git a/modules/beta-public-cluster/variables.tf b/modules/beta-public-cluster/variables.tf index 0ae2b75661..fda9068e96 100644 --- a/modules/beta-public-cluster/variables.tf +++ b/modules/beta-public-cluster/variables.tf @@ -302,6 +302,11 @@ variable "cluster_resource_labels" { default = {} } +variable "skip_provisioners" { + type = bool + description = "Flag to skip all local-exec provisioners. It breaks down `stub_domains` and `upstream_nameservers` variables functionality." + default = false +} variable "istio" { description = "(Beta) Enable Istio addon" diff --git a/modules/private-cluster/README.md b/modules/private-cluster/README.md index d823f640fa..f2d8f895ff 100644 --- a/modules/private-cluster/README.md +++ b/modules/private-cluster/README.md @@ -178,6 +178,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | | remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | | service\_account | The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created. | string | `""` | no | +| skip\_provisioners | Flag to skip all local-exec provisioners. It breaks down `stub_domains` and `upstream_nameservers` variables functionality. | bool | `"false"` | no | | stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | map(list(string)) | `` | no | | subnetwork | The subnetwork to host the cluster in (required) | string | n/a | yes | | upstream\_nameservers | If specified, the values replace the nameservers taken by default from the node’s /etc/resolv.conf | list | `` | no | diff --git a/modules/private-cluster/cluster.tf b/modules/private-cluster/cluster.tf index 412e8295ed..ab8b275ab1 100644 --- a/modules/private-cluster/cluster.tf +++ b/modules/private-cluster/cluster.tf @@ -232,6 +232,7 @@ resource "google_container_node_pool" "pools" { } resource "null_resource" "wait_for_cluster" { + count = var.skip_provisioners ? 1 : 0 provisioner "local-exec" { command = "${path.module}/scripts/wait-for-cluster.sh ${var.project_id} ${var.name}" diff --git a/modules/private-cluster/dns.tf b/modules/private-cluster/dns.tf index b240a23e65..f490c15504 100644 --- a/modules/private-cluster/dns.tf +++ b/modules/private-cluster/dns.tf @@ -20,7 +20,7 @@ Delete default kube-dns configmap *****************************************/ resource "null_resource" "delete_default_kube_dns_configmap" { - count = local.custom_kube_dns_config || local.upstream_nameservers_config ? 1 : 0 + count = (local.custom_kube_dns_config || local.upstream_nameservers_config) || var.skip_provisioners ? 1 : 0 provisioner "local-exec" { command = "${path.module}/scripts/kubectl_wrapper.sh https://${local.cluster_endpoint} ${data.google_client_config.default.access_token} ${local.cluster_ca_certificate} ${path.module}/scripts/delete-default-resource.sh kube-system configmap kube-dns" diff --git a/modules/private-cluster/variables.tf b/modules/private-cluster/variables.tf index 8008e08975..44544f57b4 100644 --- a/modules/private-cluster/variables.tf +++ b/modules/private-cluster/variables.tf @@ -292,6 +292,11 @@ variable "cluster_resource_labels" { default = {} } +variable "skip_provisioners" { + type = bool + description = "Flag to skip all local-exec provisioners. It breaks down `stub_domains` and `upstream_nameservers` variables functionality." + default = false +} variable "deploy_using_private_endpoint" { type = bool diff --git a/test/fixtures/simple_regional/example.tf b/test/fixtures/simple_regional/example.tf index a03fadb28b..7f8bb83637 100644 --- a/test/fixtures/simple_regional/example.tf +++ b/test/fixtures/simple_regional/example.tf @@ -25,4 +25,5 @@ module "example" { ip_range_pods = google_compute_subnetwork.main.secondary_ip_range[0].range_name ip_range_services = google_compute_subnetwork.main.secondary_ip_range[1].range_name compute_engine_service_account = var.compute_engine_service_account + skip_provisioners = true } diff --git a/variables.tf b/variables.tf index 460bdeaeff..c7a133a1e1 100644 --- a/variables.tf +++ b/variables.tf @@ -292,3 +292,8 @@ variable "cluster_resource_labels" { default = {} } +variable "skip_provisioners" { + type = bool + description = "Flag to skip all local-exec provisioners. It breaks down `stub_domains` and `upstream_nameservers` variables functionality." + default = false +} From a0f5f7b9b48bc9161a18b148c0d59b5e441f23b2 Mon Sep 17 00:00:00 2001 From: pp Date: Fri, 18 Oct 2019 14:42:34 +0300 Subject: [PATCH 42/82] Moved sanbox_config to node_pool resource (Fix #240) --- autogen/cluster.tf | 16 ++++++++-------- .../cluster.tf | 16 ++++++++-------- modules/beta-private-cluster/cluster.tf | 16 ++++++++-------- modules/beta-public-cluster/cluster.tf | 16 ++++++++-------- 4 files changed, 32 insertions(+), 32 deletions(-) diff --git a/autogen/cluster.tf b/autogen/cluster.tf index 296b2818df..035eeb0cf7 100644 --- a/autogen/cluster.tf +++ b/autogen/cluster.tf @@ -167,14 +167,6 @@ resource "google_container_cluster" "primary" { node_metadata = workload_metadata_config.value.node_metadata } } - - dynamic "sandbox_config" { - for_each = local.cluster_sandbox_enabled - - content { - sandbox_type = sandbox_config.value - } - } {% endif %} } } @@ -415,6 +407,14 @@ resource "google_container_node_pool" "pools" { node_metadata = workload_metadata_config.value.node_metadata } } + + dynamic "sandbox_config" { + for_each = local.cluster_sandbox_enabled + + content { + sandbox_type = sandbox_config.value + } + } {% endif %} } diff --git a/modules/beta-private-cluster-update-variant/cluster.tf b/modules/beta-private-cluster-update-variant/cluster.tf index cf1def945d..2348150d19 100644 --- a/modules/beta-private-cluster-update-variant/cluster.tf +++ b/modules/beta-private-cluster-update-variant/cluster.tf @@ -158,14 +158,6 @@ resource "google_container_cluster" "primary" { node_metadata = workload_metadata_config.value.node_metadata } } - - dynamic "sandbox_config" { - for_each = local.cluster_sandbox_enabled - - content { - sandbox_type = sandbox_config.value - } - } } } @@ -386,6 +378,14 @@ resource "google_container_node_pool" "pools" { node_metadata = workload_metadata_config.value.node_metadata } } + + dynamic "sandbox_config" { + for_each = local.cluster_sandbox_enabled + + content { + sandbox_type = sandbox_config.value + } + } } lifecycle { diff --git a/modules/beta-private-cluster/cluster.tf b/modules/beta-private-cluster/cluster.tf index c481c69a35..56f40ed17a 100644 --- a/modules/beta-private-cluster/cluster.tf +++ b/modules/beta-private-cluster/cluster.tf @@ -158,14 +158,6 @@ resource "google_container_cluster" "primary" { node_metadata = workload_metadata_config.value.node_metadata } } - - dynamic "sandbox_config" { - for_each = local.cluster_sandbox_enabled - - content { - sandbox_type = sandbox_config.value - } - } } } @@ -314,6 +306,14 @@ resource "google_container_node_pool" "pools" { node_metadata = workload_metadata_config.value.node_metadata } } + + dynamic "sandbox_config" { + for_each = local.cluster_sandbox_enabled + + content { + sandbox_type = sandbox_config.value + } + } } lifecycle { diff --git a/modules/beta-public-cluster/cluster.tf b/modules/beta-public-cluster/cluster.tf index a264e932b9..e37b2b3b31 100644 --- a/modules/beta-public-cluster/cluster.tf +++ b/modules/beta-public-cluster/cluster.tf @@ -158,14 +158,6 @@ resource "google_container_cluster" "primary" { node_metadata = workload_metadata_config.value.node_metadata } } - - dynamic "sandbox_config" { - for_each = local.cluster_sandbox_enabled - - content { - sandbox_type = sandbox_config.value - } - } } } @@ -309,6 +301,14 @@ resource "google_container_node_pool" "pools" { node_metadata = workload_metadata_config.value.node_metadata } } + + dynamic "sandbox_config" { + for_each = local.cluster_sandbox_enabled + + content { + sandbox_type = sandbox_config.value + } + } } lifecycle { From 6a214aad3939e3904e5ca504deb414f8cb93dd19 Mon Sep 17 00:00:00 2001 From: bharathkkb Date: Fri, 18 Oct 2019 11:21:58 -0500 Subject: [PATCH 43/82] fix docs --- examples/simple_regional_with_networking/README.md | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/examples/simple_regional_with_networking/README.md b/examples/simple_regional_with_networking/README.md index a58378f9ae..376602c232 100644 --- a/examples/simple_regional_with_networking/README.md +++ b/examples/simple_regional_with_networking/README.md @@ -22,21 +22,11 @@ This example illustrates how to create a VPC and a simple cluster. |------|-------------| | ca\_certificate | | | client\_token | | -| cluster\_name | Cluster name | -| ip\_range\_pods | The secondary IP range used for pods | -| ip\_range\_services | The secondary IP range used for services | | kubernetes\_endpoint | | -| location | | -| master\_kubernetes\_version | The master Kubernetes version | -| network | | | network\_name | The name of the VPC being created | -| project\_id | | -| region | | | service\_account | The service account to default running nodes as if not overridden in `node_pools`. | | subnet\_names | The name of the subnet being created | | subnet\_secondary\_ranges | The secondary ranges associated with the subnet | -| subnetwork | | -| zones | List of zones in which the cluster resides | From 9983d8dc19eab8c6364a7933a7b91f4aa8dfbe25 Mon Sep 17 00:00:00 2001 From: Morgante Pell Date: Fri, 18 Oct 2019 17:50:30 -0400 Subject: [PATCH 44/82] Update CHANGELOG.md --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4c645b5daf..c78c043f18 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,7 @@ Extending the adopted spec, each change should have a link to its corresponding ### Added * Added [private](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/tree/master/modules/private-cluster-update-variant) and [beta private](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/tree/master/modules/beta-private-cluster-update-variant) variants which allow node pools to be created before being destroyed. [#256] +* Add a parameter `registry_project_id` to allow connecting to registries in other projects. [#273] ## [v5.0.0] - 2019-09-25 v5.0.0 is a backwards-incompatible release. Please see the [upgrading guide](./docs/upgrading_to_v5.0.md). @@ -204,6 +205,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o [v0.3.0]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v0.2.0...v0.3.0 [v0.2.0]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v0.1.0...v0.2.0 +[#273]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/273 [#247]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/247 [#256]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/256 [#248]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/248 From 312b65ad49189d06f13aa84de4c4423353aac21b Mon Sep 17 00:00:00 2001 From: bharathkkb Date: Sat, 19 Oct 2019 17:55:15 -0500 Subject: [PATCH 45/82] added tests --- .../simple_regional_with_networking/main.tf | 9 +- .../outputs.tf | 5 +- .../test_outputs.tf | 1 + .../variables.tf | 2 +- .../example.tf | 24 +++ .../outputs.tf | 64 ++++++ .../variables.tf | 35 +++ .../controls/gcloud.rb | 202 ++++++++++++++++++ .../inspec.yml | 30 +++ 9 files changed, 365 insertions(+), 7 deletions(-) create mode 120000 examples/simple_regional_with_networking/test_outputs.tf create mode 100644 test/fixtures/simple_regional_with_networking/example.tf create mode 100644 test/fixtures/simple_regional_with_networking/outputs.tf create mode 100644 test/fixtures/simple_regional_with_networking/variables.tf create mode 100644 test/integration/simple_regional_with_networking/controls/gcloud.rb create mode 100644 test/integration/simple_regional_with_networking/inspec.yml diff --git a/examples/simple_regional_with_networking/main.tf b/examples/simple_regional_with_networking/main.tf index f780395645..1f8186e853 100644 --- a/examples/simple_regional_with_networking/main.tf +++ b/examples/simple_regional_with_networking/main.tf @@ -25,8 +25,9 @@ provider "google" { module "gcp-network" { source = "terraform-google-modules/network/google" + version = "~> 1.4.0" project_id = var.project_id - network_name = var.network_name + network_name = var.network subnets = [ { @@ -51,13 +52,13 @@ module "gcp-network" { } module "gke" { - source = "terraform-google-modules/kubernetes-engine/google" + source = "../../" project_id = var.project_id name = "${local.cluster_type}-cluster${var.cluster_name_suffix}" regional = true region = var.region - network = var.network_name - subnetwork = var.subnetwork + network = module.gcp-network.network_name + subnetwork = module.gcp-network.subnets_names[0] ip_range_pods = var.ip_range_pods ip_range_services = var.ip_range_services create_service_account = false diff --git a/examples/simple_regional_with_networking/outputs.tf b/examples/simple_regional_with_networking/outputs.tf index 9abceb572a..6cf2ab5eab 100644 --- a/examples/simple_regional_with_networking/outputs.tf +++ b/examples/simple_regional_with_networking/outputs.tf @@ -37,12 +37,13 @@ output "network_name" { description = "The name of the VPC being created" value = module.gcp-network.network_name } + output "subnet_names" { description = "The name of the subnet being created" value = module.gcp-network.subnets_names } + output "subnet_secondary_ranges" { description = "The secondary ranges associated with the subnet" - value = flatten(module.gcp-network.subnets_secondary_ranges) + value = module.gcp-network.subnets_secondary_ranges } - diff --git a/examples/simple_regional_with_networking/test_outputs.tf b/examples/simple_regional_with_networking/test_outputs.tf new file mode 120000 index 0000000000..17b34213ba --- /dev/null +++ b/examples/simple_regional_with_networking/test_outputs.tf @@ -0,0 +1 @@ +../../test/fixtures/all_examples/test_outputs.tf \ No newline at end of file diff --git a/examples/simple_regional_with_networking/variables.tf b/examples/simple_regional_with_networking/variables.tf index d78a9fbdb7..5c36a81c16 100644 --- a/examples/simple_regional_with_networking/variables.tf +++ b/examples/simple_regional_with_networking/variables.tf @@ -27,7 +27,7 @@ variable "region" { description = "The region to host the cluster in" } -variable "network_name" { +variable "network" { description = "The VPC network created to host the cluster in" } diff --git a/test/fixtures/simple_regional_with_networking/example.tf b/test/fixtures/simple_regional_with_networking/example.tf new file mode 100644 index 0000000000..4322474b35 --- /dev/null +++ b/test/fixtures/simple_regional_with_networking/example.tf @@ -0,0 +1,24 @@ + +resource "random_string" "suffix" { + length = 4 + special = false + upper = false +} +locals { + network = "gke-network-${random_string.suffix.result}" + subnetwork = "gke-subnetwork-${random_string.suffix.result}" + ip_range_pods="gke-ip-range-pods-${random_string.suffix.result}" + ip_range_services="gke-ip-range-svc-${random_string.suffix.result}" +} +module "example" { + source = "../../../examples/simple_regional_with_networking" + + project_id = var.project_id + cluster_name_suffix = "-${random_string.suffix.result}" + region = var.region + network = local.network + subnetwork = local.subnetwork + ip_range_pods = local.ip_range_pods + ip_range_services = local.ip_range_services + compute_engine_service_account = var.compute_engine_service_account +} \ No newline at end of file diff --git a/test/fixtures/simple_regional_with_networking/outputs.tf b/test/fixtures/simple_regional_with_networking/outputs.tf new file mode 100644 index 0000000000..32955a3206 --- /dev/null +++ b/test/fixtures/simple_regional_with_networking/outputs.tf @@ -0,0 +1,64 @@ +/** + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +output "project_id" { + value = var.project_id +} + +output "location" { + value = module.example.location +} + +output "cluster_name" { + description = "Cluster name" + value = module.example.cluster_name +} + +output "kubernetes_endpoint" { + sensitive = true + value = module.example.kubernetes_endpoint +} + +output "client_token" { + sensitive = true + value = module.example.client_token +} + +output "ca_certificate" { + value = module.example.ca_certificate +} + +output "service_account" { + description = "The service account to default running nodes as if not overridden in `node_pools`." + value = module.example.service_account +} + +output "network_name" { + description = "The name of the VPC being created" + value = local.network +} + +output "subnet_name" { + description = "The name of the subnet being created" + value = local.subnetwork +} + +output "region" { + description = "The region the cluster is hosted in" + value = module.example.region +} + diff --git a/test/fixtures/simple_regional_with_networking/variables.tf b/test/fixtures/simple_regional_with_networking/variables.tf new file mode 100644 index 0000000000..bc35e84fee --- /dev/null +++ b/test/fixtures/simple_regional_with_networking/variables.tf @@ -0,0 +1,35 @@ +/** + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +variable "project_id" { + description = "The project ID to host the cluster in" +} + +variable "cluster_name_suffix" { + description = "A suffix to append to the default cluster name" + default = "" +} + +variable "region" { + description = "The region to host the cluster in" + default = "us-east4" +} + + +variable "compute_engine_service_account" { + description = "Service account to associate to the nodes in the cluster" +} + diff --git a/test/integration/simple_regional_with_networking/controls/gcloud.rb b/test/integration/simple_regional_with_networking/controls/gcloud.rb new file mode 100644 index 0000000000..939780ff18 --- /dev/null +++ b/test/integration/simple_regional_with_networking/controls/gcloud.rb @@ -0,0 +1,202 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +project_id = attribute('project_id') +location = attribute('location') +cluster_name = attribute('cluster_name') +network_name = attribute('network_name') +subnet_name = attribute('subnet_name') +region = attribute('region') + +control "gcloud" do + title "Google Compute Engine GKE configuration" + describe command("gcloud --project=#{project_id} container clusters --zone=#{location} describe #{cluster_name} --format=json") do + its(:exit_status) { should eq 0 } + its(:stderr) { should eq '' } + + let!(:data) do + if subject.exit_status == 0 + JSON.parse(subject.stdout) + else + {} + end + end + + describe "cluster" do + it "is running" do + expect(data['status']).to eq 'RUNNING' + end + + it "is regional" do + expect(data['location']).to match(/^.*[1-9]$/) + end + + it "uses public nodes and master endpoint" do + expect(data['privateClusterConfig']).to eq nil + end + + it "has the expected addon settings" do + expect(data['addonsConfig']).to eq({ + "horizontalPodAutoscaling" => {}, + "httpLoadBalancing" => {}, + "kubernetesDashboard" => { + "disabled" => true, + }, + "networkPolicyConfig" => { + "disabled" => true, + }, + }) + end + end + + describe "default node pool" do + let(:default_node_pool) { data['nodePools'].select { |p| p['name'] == "default-pool" }.first } + + it "exists" do + expect(data['nodePools']).to include( + including( + "name" => "default-pool", + ) + ) + end + end + + describe "node pool" do + let(:node_pools) { data['nodePools'].reject { |p| p['name'] == "default-pool" } } + + it "has autoscaling enabled" do + expect(node_pools).to include( + including( + "autoscaling" => including( + "enabled" => true, + ), + ) + ) + end + + it "has the expected minimum node count" do + expect(node_pools).to include( + including( + "autoscaling" => including( + "minNodeCount" => 1, + ), + ) + ) + end + + it "has the expected maximum node count" do + expect(node_pools).to include( + including( + "autoscaling" => including( + "maxNodeCount" => 100, + ), + ) + ) + end + + it "is the expected machine type" do + expect(node_pools).to include( + including( + "config" => including( + "machineType" => "n1-standard-2", + ), + ) + ) + end + + it "has the expected disk size" do + expect(node_pools).to include( + including( + "config" => including( + "diskSizeGb" => 100, + ), + ) + ) + end + + it "has the expected labels" do + expect(node_pools).to include( + including( + "config" => including( + "labels" => including( + "cluster_name" => cluster_name, + "node_pool" => "default-node-pool", + ), + ), + ) + ) + end + + it "has the expected network tags" do + expect(node_pools).to include( + including( + "config" => including( + "tags" => match_array([ + "gke-#{cluster_name}", + "gke-#{cluster_name}-default-node-pool", + ]), + ), + ) + ) + end + + it "has autorepair enabled" do + expect(node_pools).to include( + including( + "management" => including( + "autoRepair" => true, + ), + ) + ) + end + + it "has autoupgrade enabled" do + expect(node_pools).to include( + including( + "management" => including( + "autoUpgrade" => true, + ), + ) + ) + end + end + end +end + +control "network" do + title "gcp network configuration" + + describe google_compute_network( + project: project_id, + name: network_name + ) do + it { should exist } + end +end +control "subnetwork" do + title "gcp subnetwork configuration" + + describe command("gcloud compute networks subnets describe #{subnet_name} --project=#{project_id} --region=#{region} --format=json") do + its(:exit_status) { should eq 0 } + its(:stderr) { should eq '' } + + let(:data) do + if subject.exit_status == 0 + JSON.parse(subject.stdout) + else + {} + end + end + end +end \ No newline at end of file diff --git a/test/integration/simple_regional_with_networking/inspec.yml b/test/integration/simple_regional_with_networking/inspec.yml new file mode 100644 index 0000000000..9f70b7327c --- /dev/null +++ b/test/integration/simple_regional_with_networking/inspec.yml @@ -0,0 +1,30 @@ +name: simple_regional_with_networking +depends: + - name: inspec-gcp + git: https://github.com/inspec/inspec-gcp.git + tag: v0.11.0 +attributes: + - name: project_id + required: true + type: string + - name: location + required: true + type: string + - name: cluster_name + required: true + type: string + - name: kubernetes_endpoint + required: true + type: string + - name: client_token + required: true + type: string + - name: network_name + required: true + type: string + - name: subnet_name + required: true + type: string + - name: region + required: true + type: string From 65d9e25b17e47c2b883e81bd8c3e23adac455f50 Mon Sep 17 00:00:00 2001 From: bharathkkb Date: Sat, 19 Oct 2019 18:25:51 -0500 Subject: [PATCH 46/82] add test suite --- .kitchen.yml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/.kitchen.yml b/.kitchen.yml index 9f5df5a03e..89dc11d0d3 100644 --- a/.kitchen.yml +++ b/.kitchen.yml @@ -68,6 +68,13 @@ suites: systems: - name: simple_regional backend: local + - name: "simple_regional_with_networking" + driver: + root_module_directory: test/fixtures/simple_regional_with_networking + verifier: + systems: + - name: simple_regional_with_networking + backend: local - name: "simple_regional_private" driver: root_module_directory: test/fixtures/simple_regional_private From 9413187aad59b113b6404f5ee307eb0c548315fb Mon Sep 17 00:00:00 2001 From: Bharath Baiju Date: Sat, 19 Oct 2019 19:36:50 -0500 Subject: [PATCH 47/82] fix inspec gcp tests --- .kitchen.yml | 10 ++++ .../example.tf | 14 ++++++ .../outputs.tf | 9 ++++ .../controls/gcloud.rb | 30 ------------ .../controls/network.rb | 32 ++++++++++++ .../controls/subnet.rb | 49 +++++++++++++++++++ .../inspec.yml | 8 ++- 7 files changed, 121 insertions(+), 31 deletions(-) create mode 100644 test/integration/simple_regional_with_networking/controls/network.rb create mode 100644 test/integration/simple_regional_with_networking/controls/subnet.rb diff --git a/.kitchen.yml b/.kitchen.yml index 89dc11d0d3..7b08045314 100644 --- a/.kitchen.yml +++ b/.kitchen.yml @@ -75,6 +75,16 @@ suites: systems: - name: simple_regional_with_networking backend: local + controls: + - gcloud + - name: subnet + backend: local + controls: + - subnet + - name: network + backend: gcp + controls: + - network - name: "simple_regional_private" driver: root_module_directory: test/fixtures/simple_regional_private diff --git a/test/fixtures/simple_regional_with_networking/example.tf b/test/fixtures/simple_regional_with_networking/example.tf index 4322474b35..d0d6b3b91d 100644 --- a/test/fixtures/simple_regional_with_networking/example.tf +++ b/test/fixtures/simple_regional_with_networking/example.tf @@ -1,4 +1,18 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + resource "random_string" "suffix" { length = 4 special = false diff --git a/test/fixtures/simple_regional_with_networking/outputs.tf b/test/fixtures/simple_regional_with_networking/outputs.tf index 32955a3206..31da62d519 100644 --- a/test/fixtures/simple_regional_with_networking/outputs.tf +++ b/test/fixtures/simple_regional_with_networking/outputs.tf @@ -62,3 +62,12 @@ output "region" { value = module.example.region } +output "ip_range_pods_name" { + description = "The secondary range name for pods" + value = local.ip_range_pods +} + +output "ip_range_services_name" { + description = "The secondary range name for services" + value = local.ip_range_services +} \ No newline at end of file diff --git a/test/integration/simple_regional_with_networking/controls/gcloud.rb b/test/integration/simple_regional_with_networking/controls/gcloud.rb index 939780ff18..e6152ec2a0 100644 --- a/test/integration/simple_regional_with_networking/controls/gcloud.rb +++ b/test/integration/simple_regional_with_networking/controls/gcloud.rb @@ -15,9 +15,6 @@ project_id = attribute('project_id') location = attribute('location') cluster_name = attribute('cluster_name') -network_name = attribute('network_name') -subnet_name = attribute('subnet_name') -region = attribute('region') control "gcloud" do title "Google Compute Engine GKE configuration" @@ -172,31 +169,4 @@ end end end -end - -control "network" do - title "gcp network configuration" - - describe google_compute_network( - project: project_id, - name: network_name - ) do - it { should exist } - end -end -control "subnetwork" do - title "gcp subnetwork configuration" - - describe command("gcloud compute networks subnets describe #{subnet_name} --project=#{project_id} --region=#{region} --format=json") do - its(:exit_status) { should eq 0 } - its(:stderr) { should eq '' } - - let(:data) do - if subject.exit_status == 0 - JSON.parse(subject.stdout) - else - {} - end - end - end end \ No newline at end of file diff --git a/test/integration/simple_regional_with_networking/controls/network.rb b/test/integration/simple_regional_with_networking/controls/network.rb new file mode 100644 index 0000000000..db356efe28 --- /dev/null +++ b/test/integration/simple_regional_with_networking/controls/network.rb @@ -0,0 +1,32 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +project_id = attribute('project_id') +network_name = attribute('network_name') +subnet_name = attribute('subnet_name') + +control "network" do + title "gcp network configuration" + + describe google_compute_network( + project: project_id, + name: network_name + ) do + it { should exist } + its ('subnetworks.count') { should eq 1 } + its ('subnetworks.first') { should match subnet_name} + end + end + + \ No newline at end of file diff --git a/test/integration/simple_regional_with_networking/controls/subnet.rb b/test/integration/simple_regional_with_networking/controls/subnet.rb new file mode 100644 index 0000000000..f1800e3654 --- /dev/null +++ b/test/integration/simple_regional_with_networking/controls/subnet.rb @@ -0,0 +1,49 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +project_id = attribute('project_id') +network_name = attribute('network_name') +subnet_name = attribute('subnet_name') +region = attribute('region') +ip_range_pods_name = attribute('ip_range_pods_name') +ip_range_services_name = attribute('ip_range_services_name') + +control "subnet" do + title "gcp subnetwork configuration" + + describe command("gcloud compute networks subnets describe #{subnet_name} --project=#{project_id} --region=#{region} --format=json") do + its(:exit_status) { should eq 0 } + its(:stderr) { should eq '' } + + let(:data) do + if subject.exit_status == 0 + JSON.parse(subject.stdout) + else + {} + end + end + it "#should have the correct secondaryIpRanges configuration for #{ip_range_pods_name}" do + expect(data["secondaryIpRanges"][0]).to include( + "rangeName" => ip_range_pods_name, + "ipCidrRange" => "192.168.0.0/18" + ) + end + it "#should have the correct secondaryIpRanges configuration for #{ip_range_services_name}" do + expect(data["secondaryIpRanges"][1]).to include( + "rangeName" => ip_range_services_name, + "ipCidrRange" => "192.168.64.0/18" + ) + end + end + end \ No newline at end of file diff --git a/test/integration/simple_regional_with_networking/inspec.yml b/test/integration/simple_regional_with_networking/inspec.yml index 9f70b7327c..bf2e4e86aa 100644 --- a/test/integration/simple_regional_with_networking/inspec.yml +++ b/test/integration/simple_regional_with_networking/inspec.yml @@ -2,7 +2,7 @@ name: simple_regional_with_networking depends: - name: inspec-gcp git: https://github.com/inspec/inspec-gcp.git - tag: v0.11.0 + tag: v0.10.0 attributes: - name: project_id required: true @@ -28,3 +28,9 @@ attributes: - name: region required: true type: string + - name: ip_range_pods_name + required: true + type: string + - name: ip_range_services_name + required: true + type: string From c4f138df9cb571355742480716b2a022a329fd3b Mon Sep 17 00:00:00 2001 From: bharathkkb Date: Sat, 19 Oct 2019 20:14:51 -0500 Subject: [PATCH 48/82] fix lint --- .../simple_regional_with_networking/README.md | 12 +++++- .../simple_regional_with_networking/main.tf | 2 +- .../example.tf | 39 ++++++++++--------- .../outputs.tf | 2 +- .../variables.tf | 2 +- .../controls/gcloud.rb | 2 +- .../controls/network.rb | 6 +-- .../controls/subnet.rb | 5 +-- 8 files changed, 37 insertions(+), 33 deletions(-) diff --git a/examples/simple_regional_with_networking/README.md b/examples/simple_regional_with_networking/README.md index 376602c232..f39675063d 100644 --- a/examples/simple_regional_with_networking/README.md +++ b/examples/simple_regional_with_networking/README.md @@ -11,7 +11,7 @@ This example illustrates how to create a VPC and a simple cluster. | compute\_engine\_service\_account | Service account to associate to the nodes in the cluster | string | n/a | yes | | ip\_range\_pods | The secondary ip range to use for pods | string | n/a | yes | | ip\_range\_services | The secondary ip range to use for pods | string | n/a | yes | -| network\_name | The VPC network created to host the cluster in | string | n/a | yes | +| network | The VPC network created to host the cluster in | string | n/a | yes | | project\_id | The project ID to host the cluster in | string | n/a | yes | | region | The region to host the cluster in | string | n/a | yes | | subnetwork | The subnetwork created to host the cluster in | string | n/a | yes | @@ -22,11 +22,21 @@ This example illustrates how to create a VPC and a simple cluster. |------|-------------| | ca\_certificate | | | client\_token | | +| cluster\_name | Cluster name | +| ip\_range\_pods | The secondary IP range used for pods | +| ip\_range\_services | The secondary IP range used for services | | kubernetes\_endpoint | | +| location | | +| master\_kubernetes\_version | The master Kubernetes version | +| network | | | network\_name | The name of the VPC being created | +| project\_id | | +| region | | | service\_account | The service account to default running nodes as if not overridden in `node_pools`. | | subnet\_names | The name of the subnet being created | | subnet\_secondary\_ranges | The secondary ranges associated with the subnet | +| subnetwork | | +| zones | List of zones in which the cluster resides | diff --git a/examples/simple_regional_with_networking/main.tf b/examples/simple_regional_with_networking/main.tf index 1f8186e853..ebb4ddb290 100644 --- a/examples/simple_regional_with_networking/main.tf +++ b/examples/simple_regional_with_networking/main.tf @@ -25,7 +25,7 @@ provider "google" { module "gcp-network" { source = "terraform-google-modules/network/google" - version = "~> 1.4.0" + version = "~> 1.4.0" project_id = var.project_id network_name = var.network diff --git a/test/fixtures/simple_regional_with_networking/example.tf b/test/fixtures/simple_regional_with_networking/example.tf index d0d6b3b91d..23caab9b52 100644 --- a/test/fixtures/simple_regional_with_networking/example.tf +++ b/test/fixtures/simple_regional_with_networking/example.tf @@ -1,17 +1,18 @@ - -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ resource "random_string" "suffix" { length = 4 @@ -19,10 +20,10 @@ resource "random_string" "suffix" { upper = false } locals { - network = "gke-network-${random_string.suffix.result}" - subnetwork = "gke-subnetwork-${random_string.suffix.result}" - ip_range_pods="gke-ip-range-pods-${random_string.suffix.result}" - ip_range_services="gke-ip-range-svc-${random_string.suffix.result}" + network = "gke-network-${random_string.suffix.result}" + subnetwork = "gke-subnetwork-${random_string.suffix.result}" + ip_range_pods = "gke-ip-range-pods-${random_string.suffix.result}" + ip_range_services = "gke-ip-range-svc-${random_string.suffix.result}" } module "example" { source = "../../../examples/simple_regional_with_networking" @@ -35,4 +36,4 @@ module "example" { ip_range_pods = local.ip_range_pods ip_range_services = local.ip_range_services compute_engine_service_account = var.compute_engine_service_account -} \ No newline at end of file +} diff --git a/test/fixtures/simple_regional_with_networking/outputs.tf b/test/fixtures/simple_regional_with_networking/outputs.tf index 31da62d519..ff11179f95 100644 --- a/test/fixtures/simple_regional_with_networking/outputs.tf +++ b/test/fixtures/simple_regional_with_networking/outputs.tf @@ -70,4 +70,4 @@ output "ip_range_pods_name" { output "ip_range_services_name" { description = "The secondary range name for services" value = local.ip_range_services -} \ No newline at end of file +} diff --git a/test/fixtures/simple_regional_with_networking/variables.tf b/test/fixtures/simple_regional_with_networking/variables.tf index bc35e84fee..5b2187af58 100644 --- a/test/fixtures/simple_regional_with_networking/variables.tf +++ b/test/fixtures/simple_regional_with_networking/variables.tf @@ -25,7 +25,7 @@ variable "cluster_name_suffix" { variable "region" { description = "The region to host the cluster in" - default = "us-east4" + default = "us-east4" } diff --git a/test/integration/simple_regional_with_networking/controls/gcloud.rb b/test/integration/simple_regional_with_networking/controls/gcloud.rb index e6152ec2a0..e6bbcfc047 100644 --- a/test/integration/simple_regional_with_networking/controls/gcloud.rb +++ b/test/integration/simple_regional_with_networking/controls/gcloud.rb @@ -169,4 +169,4 @@ end end end -end \ No newline at end of file +end diff --git a/test/integration/simple_regional_with_networking/controls/network.rb b/test/integration/simple_regional_with_networking/controls/network.rb index db356efe28..a17ce74663 100644 --- a/test/integration/simple_regional_with_networking/controls/network.rb +++ b/test/integration/simple_regional_with_networking/controls/network.rb @@ -15,18 +15,14 @@ project_id = attribute('project_id') network_name = attribute('network_name') subnet_name = attribute('subnet_name') - control "network" do title "gcp network configuration" - describe google_compute_network( project: project_id, name: network_name ) do it { should exist } its ('subnetworks.count') { should eq 1 } - its ('subnetworks.first') { should match subnet_name} + its ('subnetworks.first') { should match subnet_name } end end - - \ No newline at end of file diff --git a/test/integration/simple_regional_with_networking/controls/subnet.rb b/test/integration/simple_regional_with_networking/controls/subnet.rb index f1800e3654..f88d46355b 100644 --- a/test/integration/simple_regional_with_networking/controls/subnet.rb +++ b/test/integration/simple_regional_with_networking/controls/subnet.rb @@ -18,14 +18,11 @@ region = attribute('region') ip_range_pods_name = attribute('ip_range_pods_name') ip_range_services_name = attribute('ip_range_services_name') - control "subnet" do title "gcp subnetwork configuration" - describe command("gcloud compute networks subnets describe #{subnet_name} --project=#{project_id} --region=#{region} --format=json") do its(:exit_status) { should eq 0 } its(:stderr) { should eq '' } - let(:data) do if subject.exit_status == 0 JSON.parse(subject.stdout) @@ -46,4 +43,4 @@ ) end end - end \ No newline at end of file + end From ccec659cee290b6bcbb05943a808e07caec86c92 Mon Sep 17 00:00:00 2001 From: Chris Sng Date: Thu, 17 Oct 2019 17:48:29 +0800 Subject: [PATCH 49/82] Release Channels --- autogen/cluster.tf | 10 ++++++++++ autogen/main.tf | 4 ++++ autogen/outputs.tf | 9 +++++++++ autogen/variables.tf | 11 +++++++++++ 4 files changed, 34 insertions(+) diff --git a/autogen/cluster.tf b/autogen/cluster.tf index 296b2818df..3b9b0fac08 100644 --- a/autogen/cluster.tf +++ b/autogen/cluster.tf @@ -45,6 +45,16 @@ resource "google_container_cluster" "primary" { } } +{% if beta_cluster %} + dynamic "release_channel" { + for_each = local.release_channel + + content { + channel = release_channel.value.channel + } + } +{% endif %} + subnetwork = data.google_compute_subnetwork.gke_subnetwork.self_link min_master_version = local.master_version diff --git a/autogen/main.tf b/autogen/main.tf index afbd7bf8c1..9ed92d9741 100644 --- a/autogen/main.tf +++ b/autogen/main.tf @@ -48,6 +48,10 @@ locals { node_version_zonal = var.node_version != "" && ! var.regional ? var.node_version : local.master_version_zonal master_version = var.regional ? local.master_version_regional : local.master_version_zonal node_version = var.regional ? local.node_version_regional : local.node_version_zonal +{% if beta_cluster %} + release_channel = var.enable_release_channel ? [{ channel : var.release_channel }] : [] +{% endif %} + custom_kube_dns_config = length(keys(var.stub_domains)) > 0 upstream_nameservers_config = length(var.upstream_nameservers) > 0 diff --git a/autogen/outputs.tf b/autogen/outputs.tf index ff8eab1bef..2f3e73fbb0 100644 --- a/autogen/outputs.tf +++ b/autogen/outputs.tf @@ -150,4 +150,13 @@ output "vertical_pod_autoscaling_enabled" { value = local.cluster_vertical_pod_autoscaling_enabled } +output "release_channel_enabled" { + description = "Whether release channel is enabled" + value = var.enable_release_channel +} + +output "release_channel" { + description = "The release channel of this cluster" + value = var.release_channel +} {% endif %} diff --git a/autogen/variables.tf b/autogen/variables.tf index af446afff8..981c18d672 100644 --- a/autogen/variables.tf +++ b/autogen/variables.tf @@ -417,4 +417,15 @@ variable "authenticator_security_group" { default = null } +variable "enable_release_channel" { + type = bool + description = "(Beta) Whether release channel is configured for this cluster." + default = false +} + +variable "release_channel" { + type = string + description = "(Beta) The release channel of this cluster. Accepted values are `UNSPECIFIED`, `RAPID`, `REGULAR` and `STABLE`. Defaults to `UNSPECIFIED`." + default = "UNSPECIFIED" +} {% endif %} From 5d829c9e3cf38ca0fa2329999ed470e93fcd371c Mon Sep 17 00:00:00 2001 From: Chris Sng Date: Thu, 17 Oct 2019 17:54:08 +0800 Subject: [PATCH 50/82] Generate from template --- cluster.tf | 1 + main.tf | 1 + .../beta-private-cluster-update-variant/README.md | 6 +++++- .../beta-private-cluster-update-variant/cluster.tf | 8 ++++++++ .../beta-private-cluster-update-variant/main.tf | 5 ++++- .../networks.tf | 2 +- .../beta-private-cluster-update-variant/outputs.tf | 9 +++++++++ .../variables.tf | 14 +++++++++++++- modules/beta-private-cluster/README.md | 3 +++ modules/beta-private-cluster/cluster.tf | 8 ++++++++ modules/beta-private-cluster/main.tf | 2 ++ modules/beta-private-cluster/outputs.tf | 9 +++++++++ modules/beta-private-cluster/variables.tf | 11 +++++++++++ modules/beta-public-cluster/README.md | 3 +++ modules/beta-public-cluster/cluster.tf | 8 ++++++++ modules/beta-public-cluster/main.tf | 2 ++ modules/beta-public-cluster/outputs.tf | 9 +++++++++ modules/beta-public-cluster/variables.tf | 11 +++++++++++ modules/private-cluster-update-variant/README.md | 2 +- modules/private-cluster-update-variant/cluster.tf | 1 + modules/private-cluster-update-variant/main.tf | 4 +++- modules/private-cluster-update-variant/networks.tf | 2 +- .../private-cluster-update-variant/variables.tf | 3 ++- modules/private-cluster/cluster.tf | 1 + modules/private-cluster/main.tf | 1 + 25 files changed, 118 insertions(+), 8 deletions(-) diff --git a/cluster.tf b/cluster.tf index ffdb27b0fc..c9519113a8 100644 --- a/cluster.tf +++ b/cluster.tf @@ -41,6 +41,7 @@ resource "google_container_cluster" "primary" { } } + subnetwork = data.google_compute_subnetwork.gke_subnetwork.self_link min_master_version = local.master_version diff --git a/main.tf b/main.tf index a9e1c15810..1090227fd8 100644 --- a/main.tf +++ b/main.tf @@ -45,6 +45,7 @@ locals { master_version = var.regional ? local.master_version_regional : local.master_version_zonal node_version = var.regional ? local.node_version_regional : local.node_version_zonal + custom_kube_dns_config = length(keys(var.stub_domains)) > 0 upstream_nameservers_config = length(var.upstream_nameservers) > 0 network_project_id = var.network_project_id != "" ? var.network_project_id : var.project_id diff --git a/modules/beta-private-cluster-update-variant/README.md b/modules/beta-private-cluster-update-variant/README.md index 0fc0068e96..00bd72d7a2 100644 --- a/modules/beta-private-cluster-update-variant/README.md +++ b/modules/beta-private-cluster-update-variant/README.md @@ -153,6 +153,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | enable\_intranode\_visibility | Whether Intra-node visibility is enabled for this cluster. This makes same node pod to pod traffic visible for VPC network | bool | `"false"` | no | | enable\_private\_endpoint | (Beta) Whether the master's internal IP address is used as the cluster endpoint | bool | `"false"` | no | | enable\_private\_nodes | (Beta) Whether nodes have internal IP addresses only | bool | `"false"` | no | +| enable\_release\_channel | (Beta) Whether release channel is configured for this cluster. | bool | `"false"` | no | | enable\_vertical\_pod\_autoscaling | Vertical Pod Autoscaling automatically adjusts the resources of pods controlled by it | bool | `"false"` | no | | grant\_registry\_access | Grants created cluster-specific service account storage.objectViewer role. | bool | `"false"` | no | | horizontal\_pod\_autoscaling | Enable horizontal pod autoscaling addon | bool | `"true"` | no | @@ -188,8 +189,9 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | non\_masquerade\_cidrs | List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading. | list(string) | `` | no | | pod\_security\_policy\_config | enabled - Enable the PodSecurityPolicy controller for this cluster. If enabled, pods must be valid under a PodSecurityPolicy to be created. | list | `` | no | | project\_id | The project ID to host the cluster in (required) | string | n/a | yes | -| region | The region to host the cluster in (required) | string | n/a | yes | +| region | The region to host the cluster in (optional if zonal cluster / required if regional) | string | `"null"` | no | | regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | +| release\_channel | (Beta) The release channel of this cluster. Accepted values are `UNSPECIFIED`, `RAPID`, `REGULAR` and `STABLE`. Defaults to `UNSPECIFIED`. | string | `"UNSPECIFIED"` | no | | remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | | resource\_usage\_export\_dataset\_id | The dataset id for which network egress metering for this cluster will be enabled. If enabled, a daemonset will be created in the cluster to meter network egress traffic. | string | `""` | no | | sandbox\_enabled | (Beta) Enable GKE Sandbox (Do not forget to set `image_type` = `COS_CONTAINERD` and `node_version` = `1.12.7-gke.17` or later to use it). | bool | `"false"` | no | @@ -223,6 +225,8 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | node\_pools\_versions | List of node pools versions | | pod\_security\_policy\_enabled | Whether pod security policy is enabled | | region | Cluster region | +| release\_channel | The release channel of this cluster | +| release\_channel\_enabled | Whether release channel is enabled | | service\_account | The service account to default running nodes as if not overridden in `node_pools`. | | type | Cluster type (regional / zonal) | | vertical\_pod\_autoscaling\_enabled | Whether veritical pod autoscaling is enabled | diff --git a/modules/beta-private-cluster-update-variant/cluster.tf b/modules/beta-private-cluster-update-variant/cluster.tf index cf1def945d..7960a90124 100644 --- a/modules/beta-private-cluster-update-variant/cluster.tf +++ b/modules/beta-private-cluster-update-variant/cluster.tf @@ -41,6 +41,14 @@ resource "google_container_cluster" "primary" { } } + dynamic "release_channel" { + for_each = local.release_channel + + content { + channel = release_channel.value.channel + } + } + subnetwork = data.google_compute_subnetwork.gke_subnetwork.self_link min_master_version = local.master_version diff --git a/modules/beta-private-cluster-update-variant/main.tf b/modules/beta-private-cluster-update-variant/main.tf index fc38644871..760209d6df 100644 --- a/modules/beta-private-cluster-update-variant/main.tf +++ b/modules/beta-private-cluster-update-variant/main.tf @@ -23,7 +23,7 @@ data "google_compute_zones" "available" { provider = google-beta project = var.project_id - region = var.region + region = local.region } resource "random_shuffle" "available_zones" { @@ -34,6 +34,7 @@ resource "random_shuffle" "available_zones" { locals { // location location = var.regional ? var.region : var.zones[0] + region = var.region == null ? join("-", slice(split("-", var.zones[0]), 0, 2)) : var.region // for regional cluster - use var.zones if provided, use available otherwise, for zonal cluster use var.zones with first element extracted node_locations = var.regional ? coalescelist(compact(var.zones), sort(random_shuffle.available_zones.result)) : slice(var.zones, 1, length(var.zones)) // kuberentes version @@ -43,6 +44,8 @@ locals { node_version_zonal = var.node_version != "" && ! var.regional ? var.node_version : local.master_version_zonal master_version = var.regional ? local.master_version_regional : local.master_version_zonal node_version = var.regional ? local.node_version_regional : local.node_version_zonal + release_channel = var.enable_release_channel ? [{ channel : var.release_channel }] : [] + custom_kube_dns_config = length(keys(var.stub_domains)) > 0 upstream_nameservers_config = length(var.upstream_nameservers) > 0 diff --git a/modules/beta-private-cluster-update-variant/networks.tf b/modules/beta-private-cluster-update-variant/networks.tf index 14ea500e03..2456654130 100644 --- a/modules/beta-private-cluster-update-variant/networks.tf +++ b/modules/beta-private-cluster-update-variant/networks.tf @@ -27,6 +27,6 @@ data "google_compute_subnetwork" "gke_subnetwork" { provider = google-beta name = var.subnetwork - region = var.region + region = local.region project = local.network_project_id } diff --git a/modules/beta-private-cluster-update-variant/outputs.tf b/modules/beta-private-cluster-update-variant/outputs.tf index 4153960069..31a22b2175 100644 --- a/modules/beta-private-cluster-update-variant/outputs.tf +++ b/modules/beta-private-cluster-update-variant/outputs.tf @@ -149,3 +149,12 @@ output "vertical_pod_autoscaling_enabled" { value = local.cluster_vertical_pod_autoscaling_enabled } +output "release_channel_enabled" { + description = "Whether release channel is enabled" + value = var.enable_release_channel +} + +output "release_channel" { + description = "The release channel of this cluster" + value = var.release_channel +} diff --git a/modules/beta-private-cluster-update-variant/variables.tf b/modules/beta-private-cluster-update-variant/variables.tf index 9a869a830f..90b01e30c5 100644 --- a/modules/beta-private-cluster-update-variant/variables.tf +++ b/modules/beta-private-cluster-update-variant/variables.tf @@ -40,7 +40,8 @@ variable "regional" { variable "region" { type = string - description = "The region to host the cluster in (required)" + description = "The region to host the cluster in (optional if zonal cluster / required if regional)" + default = null } variable "zones" { @@ -405,3 +406,14 @@ variable "authenticator_security_group" { default = null } +variable "enable_release_channel" { + type = bool + description = "(Beta) Whether release channel is configured for this cluster." + default = false +} + +variable "release_channel" { + type = string + description = "(Beta) The release channel of this cluster. Accepted values are `UNSPECIFIED`, `RAPID`, `REGULAR` and `STABLE`. Defaults to `UNSPECIFIED`." + default = "UNSPECIFIED" +} diff --git a/modules/beta-private-cluster/README.md b/modules/beta-private-cluster/README.md index 425ef67fa1..341013c059 100644 --- a/modules/beta-private-cluster/README.md +++ b/modules/beta-private-cluster/README.md @@ -153,6 +153,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | enable\_intranode\_visibility | Whether Intra-node visibility is enabled for this cluster. This makes same node pod to pod traffic visible for VPC network | bool | `"false"` | no | | enable\_private\_endpoint | (Beta) Whether the master's internal IP address is used as the cluster endpoint | bool | `"false"` | no | | enable\_private\_nodes | (Beta) Whether nodes have internal IP addresses only | bool | `"false"` | no | +| enable\_release\_channel | (Beta) Whether release channel is configured for this cluster. | bool | `"false"` | no | | enable\_vertical\_pod\_autoscaling | Vertical Pod Autoscaling automatically adjusts the resources of pods controlled by it | bool | `"false"` | no | | grant\_registry\_access | Grants created cluster-specific service account storage.objectViewer role. | bool | `"false"` | no | | horizontal\_pod\_autoscaling | Enable horizontal pod autoscaling addon | bool | `"true"` | no | @@ -224,6 +225,8 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | node\_pools\_versions | List of node pools versions | | pod\_security\_policy\_enabled | Whether pod security policy is enabled | | region | Cluster region | +| release\_channel | The release channel of this cluster | +| release\_channel\_enabled | Whether release channel is enabled | | service\_account | The service account to default running nodes as if not overridden in `node_pools`. | | type | Cluster type (regional / zonal) | | vertical\_pod\_autoscaling\_enabled | Whether veritical pod autoscaling is enabled | diff --git a/modules/beta-private-cluster/cluster.tf b/modules/beta-private-cluster/cluster.tf index c481c69a35..a3aa0b12ef 100644 --- a/modules/beta-private-cluster/cluster.tf +++ b/modules/beta-private-cluster/cluster.tf @@ -41,6 +41,14 @@ resource "google_container_cluster" "primary" { } } + dynamic "release_channel" { + for_each = local.release_channel + + content { + channel = release_channel.value.channel + } + } + subnetwork = data.google_compute_subnetwork.gke_subnetwork.self_link min_master_version = local.master_version diff --git a/modules/beta-private-cluster/main.tf b/modules/beta-private-cluster/main.tf index 63bf31ac78..760209d6df 100644 --- a/modules/beta-private-cluster/main.tf +++ b/modules/beta-private-cluster/main.tf @@ -44,6 +44,8 @@ locals { node_version_zonal = var.node_version != "" && ! var.regional ? var.node_version : local.master_version_zonal master_version = var.regional ? local.master_version_regional : local.master_version_zonal node_version = var.regional ? local.node_version_regional : local.node_version_zonal + release_channel = var.enable_release_channel ? [{ channel : var.release_channel }] : [] + custom_kube_dns_config = length(keys(var.stub_domains)) > 0 upstream_nameservers_config = length(var.upstream_nameservers) > 0 diff --git a/modules/beta-private-cluster/outputs.tf b/modules/beta-private-cluster/outputs.tf index 4153960069..31a22b2175 100644 --- a/modules/beta-private-cluster/outputs.tf +++ b/modules/beta-private-cluster/outputs.tf @@ -149,3 +149,12 @@ output "vertical_pod_autoscaling_enabled" { value = local.cluster_vertical_pod_autoscaling_enabled } +output "release_channel_enabled" { + description = "Whether release channel is enabled" + value = var.enable_release_channel +} + +output "release_channel" { + description = "The release channel of this cluster" + value = var.release_channel +} diff --git a/modules/beta-private-cluster/variables.tf b/modules/beta-private-cluster/variables.tf index ee2d5b5556..2c9dfb1e4d 100644 --- a/modules/beta-private-cluster/variables.tf +++ b/modules/beta-private-cluster/variables.tf @@ -412,3 +412,14 @@ variable "authenticator_security_group" { default = null } +variable "enable_release_channel" { + type = bool + description = "(Beta) Whether release channel is configured for this cluster." + default = false +} + +variable "release_channel" { + type = string + description = "(Beta) The release channel of this cluster. Accepted values are `UNSPECIFIED`, `RAPID`, `REGULAR` and `STABLE`. Defaults to `UNSPECIFIED`." + default = "UNSPECIFIED" +} diff --git a/modules/beta-public-cluster/README.md b/modules/beta-public-cluster/README.md index 73cf4ea4c5..399e583d6c 100644 --- a/modules/beta-public-cluster/README.md +++ b/modules/beta-public-cluster/README.md @@ -145,6 +145,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | disable\_legacy\_metadata\_endpoints | Disable the /0.1/ and /v1beta1/ metadata server endpoints on the node. Changing this value will cause all node pools to be recreated. | bool | `"true"` | no | | enable\_binary\_authorization | Enable BinAuthZ Admission controller | string | `"false"` | no | | enable\_intranode\_visibility | Whether Intra-node visibility is enabled for this cluster. This makes same node pod to pod traffic visible for VPC network | bool | `"false"` | no | +| enable\_release\_channel | (Beta) Whether release channel is configured for this cluster. | bool | `"false"` | no | | enable\_vertical\_pod\_autoscaling | Vertical Pod Autoscaling automatically adjusts the resources of pods controlled by it | bool | `"false"` | no | | grant\_registry\_access | Grants created cluster-specific service account storage.objectViewer role. | bool | `"false"` | no | | horizontal\_pod\_autoscaling | Enable horizontal pod autoscaling addon | bool | `"true"` | no | @@ -215,6 +216,8 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | node\_pools\_versions | List of node pools versions | | pod\_security\_policy\_enabled | Whether pod security policy is enabled | | region | Cluster region | +| release\_channel | The release channel of this cluster | +| release\_channel\_enabled | Whether release channel is enabled | | service\_account | The service account to default running nodes as if not overridden in `node_pools`. | | type | Cluster type (regional / zonal) | | vertical\_pod\_autoscaling\_enabled | Whether veritical pod autoscaling is enabled | diff --git a/modules/beta-public-cluster/cluster.tf b/modules/beta-public-cluster/cluster.tf index a264e932b9..fd37709fd0 100644 --- a/modules/beta-public-cluster/cluster.tf +++ b/modules/beta-public-cluster/cluster.tf @@ -41,6 +41,14 @@ resource "google_container_cluster" "primary" { } } + dynamic "release_channel" { + for_each = local.release_channel + + content { + channel = release_channel.value.channel + } + } + subnetwork = data.google_compute_subnetwork.gke_subnetwork.self_link min_master_version = local.master_version diff --git a/modules/beta-public-cluster/main.tf b/modules/beta-public-cluster/main.tf index f0477f9498..ec0afeda3b 100644 --- a/modules/beta-public-cluster/main.tf +++ b/modules/beta-public-cluster/main.tf @@ -44,6 +44,8 @@ locals { node_version_zonal = var.node_version != "" && ! var.regional ? var.node_version : local.master_version_zonal master_version = var.regional ? local.master_version_regional : local.master_version_zonal node_version = var.regional ? local.node_version_regional : local.node_version_zonal + release_channel = var.enable_release_channel ? [{ channel : var.release_channel }] : [] + custom_kube_dns_config = length(keys(var.stub_domains)) > 0 upstream_nameservers_config = length(var.upstream_nameservers) > 0 diff --git a/modules/beta-public-cluster/outputs.tf b/modules/beta-public-cluster/outputs.tf index 4153960069..31a22b2175 100644 --- a/modules/beta-public-cluster/outputs.tf +++ b/modules/beta-public-cluster/outputs.tf @@ -149,3 +149,12 @@ output "vertical_pod_autoscaling_enabled" { value = local.cluster_vertical_pod_autoscaling_enabled } +output "release_channel_enabled" { + description = "Whether release channel is enabled" + value = var.enable_release_channel +} + +output "release_channel" { + description = "The release channel of this cluster" + value = var.release_channel +} diff --git a/modules/beta-public-cluster/variables.tf b/modules/beta-public-cluster/variables.tf index 90008d54bd..f004155988 100644 --- a/modules/beta-public-cluster/variables.tf +++ b/modules/beta-public-cluster/variables.tf @@ -388,3 +388,14 @@ variable "authenticator_security_group" { default = null } +variable "enable_release_channel" { + type = bool + description = "(Beta) Whether release channel is configured for this cluster." + default = false +} + +variable "release_channel" { + type = string + description = "(Beta) The release channel of this cluster. Accepted values are `UNSPECIFIED`, `RAPID`, `REGULAR` and `STABLE`. Defaults to `UNSPECIFIED`." + default = "UNSPECIFIED" +} diff --git a/modules/private-cluster-update-variant/README.md b/modules/private-cluster-update-variant/README.md index e817361124..8b0a140c4c 100644 --- a/modules/private-cluster-update-variant/README.md +++ b/modules/private-cluster-update-variant/README.md @@ -174,7 +174,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | node\_version | The Kubernetes version of the node pools. Defaults kubernetes_version (master) variable and can be overridden for individual node pools by setting the `version` key on them. Must be empyty or set the same as master at cluster creation. | string | `""` | no | | non\_masquerade\_cidrs | List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading. | list(string) | `` | no | | project\_id | The project ID to host the cluster in (required) | string | n/a | yes | -| region | The region to host the cluster in (required) | string | n/a | yes | +| region | The region to host the cluster in (optional if zonal cluster / required if regional) | string | `"null"` | no | | regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | | remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | | service\_account | The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created. | string | `""` | no | diff --git a/modules/private-cluster-update-variant/cluster.tf b/modules/private-cluster-update-variant/cluster.tf index e8db91a77a..c4342d0d8f 100644 --- a/modules/private-cluster-update-variant/cluster.tf +++ b/modules/private-cluster-update-variant/cluster.tf @@ -41,6 +41,7 @@ resource "google_container_cluster" "primary" { } } + subnetwork = data.google_compute_subnetwork.gke_subnetwork.self_link min_master_version = local.master_version diff --git a/modules/private-cluster-update-variant/main.tf b/modules/private-cluster-update-variant/main.tf index bfe746401c..aba5e2d79f 100644 --- a/modules/private-cluster-update-variant/main.tf +++ b/modules/private-cluster-update-variant/main.tf @@ -23,7 +23,7 @@ data "google_compute_zones" "available" { provider = google project = var.project_id - region = var.region + region = local.region } resource "random_shuffle" "available_zones" { @@ -34,6 +34,7 @@ resource "random_shuffle" "available_zones" { locals { // location location = var.regional ? var.region : var.zones[0] + region = var.region == null ? join("-", slice(split("-", var.zones[0]), 0, 2)) : var.region // for regional cluster - use var.zones if provided, use available otherwise, for zonal cluster use var.zones with first element extracted node_locations = var.regional ? coalescelist(compact(var.zones), sort(random_shuffle.available_zones.result)) : slice(var.zones, 1, length(var.zones)) // kuberentes version @@ -44,6 +45,7 @@ locals { master_version = var.regional ? local.master_version_regional : local.master_version_zonal node_version = var.regional ? local.node_version_regional : local.node_version_zonal + custom_kube_dns_config = length(keys(var.stub_domains)) > 0 upstream_nameservers_config = length(var.upstream_nameservers) > 0 network_project_id = var.network_project_id != "" ? var.network_project_id : var.project_id diff --git a/modules/private-cluster-update-variant/networks.tf b/modules/private-cluster-update-variant/networks.tf index a382073dc0..aae034eee5 100644 --- a/modules/private-cluster-update-variant/networks.tf +++ b/modules/private-cluster-update-variant/networks.tf @@ -27,6 +27,6 @@ data "google_compute_subnetwork" "gke_subnetwork" { provider = google name = var.subnetwork - region = var.region + region = local.region project = local.network_project_id } diff --git a/modules/private-cluster-update-variant/variables.tf b/modules/private-cluster-update-variant/variables.tf index 8008e08975..28b744d868 100644 --- a/modules/private-cluster-update-variant/variables.tf +++ b/modules/private-cluster-update-variant/variables.tf @@ -40,7 +40,8 @@ variable "regional" { variable "region" { type = string - description = "The region to host the cluster in (required)" + description = "The region to host the cluster in (optional if zonal cluster / required if regional)" + default = null } variable "zones" { diff --git a/modules/private-cluster/cluster.tf b/modules/private-cluster/cluster.tf index 412e8295ed..ee8f7d433f 100644 --- a/modules/private-cluster/cluster.tf +++ b/modules/private-cluster/cluster.tf @@ -41,6 +41,7 @@ resource "google_container_cluster" "primary" { } } + subnetwork = data.google_compute_subnetwork.gke_subnetwork.self_link min_master_version = local.master_version diff --git a/modules/private-cluster/main.tf b/modules/private-cluster/main.tf index 2bd1c40d14..aba5e2d79f 100644 --- a/modules/private-cluster/main.tf +++ b/modules/private-cluster/main.tf @@ -45,6 +45,7 @@ locals { master_version = var.regional ? local.master_version_regional : local.master_version_zonal node_version = var.regional ? local.node_version_regional : local.node_version_zonal + custom_kube_dns_config = length(keys(var.stub_domains)) > 0 upstream_nameservers_config = length(var.upstream_nameservers) > 0 network_project_id = var.network_project_id != "" ? var.network_project_id : var.project_id From b63658ef98e8e1c5477fadbb18de0a836163b748 Mon Sep 17 00:00:00 2001 From: Chris Sng Date: Mon, 21 Oct 2019 10:10:06 +0800 Subject: [PATCH 51/82] Merge from master --- modules/beta-private-cluster-update-variant/README.md | 4 ++++ modules/beta-private-cluster-update-variant/sa.tf | 2 +- modules/beta-private-cluster-update-variant/variables.tf | 6 ++++++ modules/beta-private-cluster/README.md | 1 + modules/beta-public-cluster/README.md | 1 + modules/private-cluster-update-variant/README.md | 4 ++++ modules/private-cluster-update-variant/sa.tf | 2 +- modules/private-cluster-update-variant/variables.tf | 6 ++++++ scripts/wait-for-cluster.sh | 6 +----- 9 files changed, 25 insertions(+), 7 deletions(-) diff --git a/modules/beta-private-cluster-update-variant/README.md b/modules/beta-private-cluster-update-variant/README.md index 00bd72d7a2..c2f9aeffd4 100644 --- a/modules/beta-private-cluster-update-variant/README.md +++ b/modules/beta-private-cluster-update-variant/README.md @@ -191,6 +191,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | project\_id | The project ID to host the cluster in (required) | string | n/a | yes | | region | The region to host the cluster in (optional if zonal cluster / required if regional) | string | `"null"` | no | | regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | +| registry\_project\_id | Project holding the Google Container Registry. If empty, we use the cluster project. If grant_registry_access is true, storage.objectViewer role is assigned on this project. | string | `""` | no | | release\_channel | (Beta) The release channel of this cluster. Accepted values are `UNSPECIFIED`, `RAPID`, `REGULAR` and `STABLE`. Defaults to `UNSPECIFIED`. | string | `"UNSPECIFIED"` | no | | remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | | resource\_usage\_export\_dataset\_id | The dataset id for which network egress metering for this cluster will be enabled. If enabled, a daemonset will be created in the cluster to meter network egress traffic. | string | `""` | no | @@ -262,6 +263,9 @@ following project roles: - roles/iam.serviceAccountUser - roles/resourcemanager.projectIamAdmin (only required if `service_account` is set to `create`) +Additionally, if `service_account` is set to `create` and `grant_registry_access` is requested, the service account requires the following role on the `registry_project_id` project: +- roles/resourcemanager.projectIamAdmin + ### Enable APIs In order to operate with the Service Account you must activate the following APIs on the project where the Service Account was created: diff --git a/modules/beta-private-cluster-update-variant/sa.tf b/modules/beta-private-cluster-update-variant/sa.tf index 9e063fcc22..c7f34e4fbb 100644 --- a/modules/beta-private-cluster-update-variant/sa.tf +++ b/modules/beta-private-cluster-update-variant/sa.tf @@ -64,7 +64,7 @@ resource "google_project_iam_member" "cluster_service_account-monitoring_viewer" resource "google_project_iam_member" "cluster_service_account-gcr" { count = var.create_service_account && var.grant_registry_access ? 1 : 0 - project = var.project_id + project = var.registry_project_id == "" ? var.project_id : var.registry_project_id role = "roles/storage.objectViewer" member = "serviceAccount:${google_service_account.cluster_service_account[0].email}" } diff --git a/modules/beta-private-cluster-update-variant/variables.tf b/modules/beta-private-cluster-update-variant/variables.tf index 90b01e30c5..2c9dfb1e4d 100644 --- a/modules/beta-private-cluster-update-variant/variables.tf +++ b/modules/beta-private-cluster-update-variant/variables.tf @@ -268,6 +268,12 @@ variable "grant_registry_access" { default = false } +variable "registry_project_id" { + type = string + description = "Project holding the Google Container Registry. If empty, we use the cluster project. If grant_registry_access is true, storage.objectViewer role is assigned on this project." + default = "" +} + variable "service_account" { type = string description = "The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created." diff --git a/modules/beta-private-cluster/README.md b/modules/beta-private-cluster/README.md index 341013c059..29c5146687 100644 --- a/modules/beta-private-cluster/README.md +++ b/modules/beta-private-cluster/README.md @@ -192,6 +192,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | region | The region to host the cluster in (optional if zonal cluster / required if regional) | string | `"null"` | no | | regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | | registry\_project\_id | Project holding the Google Container Registry. If empty, we use the cluster project. If grant_registry_access is true, storage.objectViewer role is assigned on this project. | string | `""` | no | +| release\_channel | (Beta) The release channel of this cluster. Accepted values are `UNSPECIFIED`, `RAPID`, `REGULAR` and `STABLE`. Defaults to `UNSPECIFIED`. | string | `"UNSPECIFIED"` | no | | remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | | resource\_usage\_export\_dataset\_id | The dataset id for which network egress metering for this cluster will be enabled. If enabled, a daemonset will be created in the cluster to meter network egress traffic. | string | `""` | no | | sandbox\_enabled | (Beta) Enable GKE Sandbox (Do not forget to set `image_type` = `COS_CONTAINERD` and `node_version` = `1.12.7-gke.17` or later to use it). | bool | `"false"` | no | diff --git a/modules/beta-public-cluster/README.md b/modules/beta-public-cluster/README.md index 399e583d6c..bfb6045521 100644 --- a/modules/beta-public-cluster/README.md +++ b/modules/beta-public-cluster/README.md @@ -183,6 +183,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | region | The region to host the cluster in (optional if zonal cluster / required if regional) | string | `"null"` | no | | regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | | registry\_project\_id | Project holding the Google Container Registry. If empty, we use the cluster project. If grant_registry_access is true, storage.objectViewer role is assigned on this project. | string | `""` | no | +| release\_channel | (Beta) The release channel of this cluster. Accepted values are `UNSPECIFIED`, `RAPID`, `REGULAR` and `STABLE`. Defaults to `UNSPECIFIED`. | string | `"UNSPECIFIED"` | no | | remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | | resource\_usage\_export\_dataset\_id | The dataset id for which network egress metering for this cluster will be enabled. If enabled, a daemonset will be created in the cluster to meter network egress traffic. | string | `""` | no | | sandbox\_enabled | (Beta) Enable GKE Sandbox (Do not forget to set `image_type` = `COS_CONTAINERD` and `node_version` = `1.12.7-gke.17` or later to use it). | bool | `"false"` | no | diff --git a/modules/private-cluster-update-variant/README.md b/modules/private-cluster-update-variant/README.md index 8b0a140c4c..e0ef130d3e 100644 --- a/modules/private-cluster-update-variant/README.md +++ b/modules/private-cluster-update-variant/README.md @@ -176,6 +176,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | project\_id | The project ID to host the cluster in (required) | string | n/a | yes | | region | The region to host the cluster in (optional if zonal cluster / required if regional) | string | `"null"` | no | | regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | +| registry\_project\_id | Project holding the Google Container Registry. If empty, we use the cluster project. If grant_registry_access is true, storage.objectViewer role is assigned on this project. | string | `""` | no | | remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | | service\_account | The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created. | string | `""` | no | | stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | map(list(string)) | `` | no | @@ -237,6 +238,9 @@ following project roles: - roles/iam.serviceAccountUser - roles/resourcemanager.projectIamAdmin (only required if `service_account` is set to `create`) +Additionally, if `service_account` is set to `create` and `grant_registry_access` is requested, the service account requires the following role on the `registry_project_id` project: +- roles/resourcemanager.projectIamAdmin + ### Enable APIs In order to operate with the Service Account you must activate the following APIs on the project where the Service Account was created: diff --git a/modules/private-cluster-update-variant/sa.tf b/modules/private-cluster-update-variant/sa.tf index 9e063fcc22..c7f34e4fbb 100644 --- a/modules/private-cluster-update-variant/sa.tf +++ b/modules/private-cluster-update-variant/sa.tf @@ -64,7 +64,7 @@ resource "google_project_iam_member" "cluster_service_account-monitoring_viewer" resource "google_project_iam_member" "cluster_service_account-gcr" { count = var.create_service_account && var.grant_registry_access ? 1 : 0 - project = var.project_id + project = var.registry_project_id == "" ? var.project_id : var.registry_project_id role = "roles/storage.objectViewer" member = "serviceAccount:${google_service_account.cluster_service_account[0].email}" } diff --git a/modules/private-cluster-update-variant/variables.tf b/modules/private-cluster-update-variant/variables.tf index 28b744d868..a425c13d9a 100644 --- a/modules/private-cluster-update-variant/variables.tf +++ b/modules/private-cluster-update-variant/variables.tf @@ -258,6 +258,12 @@ variable "grant_registry_access" { default = false } +variable "registry_project_id" { + type = string + description = "Project holding the Google Container Registry. If empty, we use the cluster project. If grant_registry_access is true, storage.objectViewer role is assigned on this project." + default = "" +} + variable "service_account" { type = string description = "The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created." diff --git a/scripts/wait-for-cluster.sh b/scripts/wait-for-cluster.sh index 37f0176ec7..6ff3253d58 100755 --- a/scripts/wait-for-cluster.sh +++ b/scripts/wait-for-cluster.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2019 Google LLC +# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,10 +15,6 @@ set -e -if [ -n "${GOOGLE_APPLICATION_CREDENTIALS}" ]; then - CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE=${GOOGLE_APPLICATION_CREDENTIALS} -fi - PROJECT=$1 CLUSTER_NAME=$2 gcloud_command="gcloud container clusters list --project=$PROJECT --format=json" From e90d20bac2a364ebdaec067273ec2c676790a977 Mon Sep 17 00:00:00 2001 From: pp Date: Mon, 21 Oct 2019 11:05:51 +0300 Subject: [PATCH 52/82] Created test for sandbox (Fix #252) --- .kitchen.yml | 7 ++ examples/simple_regional_beta/README.md | 11 +- examples/simple_regional_beta/main.tf | 35 +++--- examples/simple_regional_beta/test_outputs.tf | 4 - examples/simple_regional_beta/variables.tf | 33 +++++- test/fixtures/sandbox_enabled/example.tf | 40 +++++++ test/fixtures/sandbox_enabled/network.tf | 48 +++++++++ test/fixtures/sandbox_enabled/outputs.tf | 1 + test/fixtures/sandbox_enabled/variables.tf | 1 + .../sandbox_enabled/controls/gcloud.rb | 102 ++++++++++++++++++ test/integration/sandbox_enabled/inspec.yml | 17 +++ 11 files changed, 270 insertions(+), 29 deletions(-) create mode 100644 test/fixtures/sandbox_enabled/example.tf create mode 100644 test/fixtures/sandbox_enabled/network.tf create mode 120000 test/fixtures/sandbox_enabled/outputs.tf create mode 120000 test/fixtures/sandbox_enabled/variables.tf create mode 100644 test/integration/sandbox_enabled/controls/gcloud.rb create mode 100644 test/integration/sandbox_enabled/inspec.yml diff --git a/.kitchen.yml b/.kitchen.yml index 9f5df5a03e..61b8aae029 100644 --- a/.kitchen.yml +++ b/.kitchen.yml @@ -131,3 +131,10 @@ suites: systems: - name: workload_metadata_config backend: local + - name: "sandbox_enabled" + driver: + root_module_directory: test/fixtures/sandbox_enabled + verifier: + systems: + - name: sandbox_enabled + backend: local diff --git a/examples/simple_regional_beta/README.md b/examples/simple_regional_beta/README.md index bd676115b9..02d0dba224 100644 --- a/examples/simple_regional_beta/README.md +++ b/examples/simple_regional_beta/README.md @@ -2,8 +2,7 @@ This example illustrates how to create a simple cluster with beta features. -[^]: (autogen_docs_start) - + ## Inputs | Name | Description | Type | Default | Required | @@ -11,13 +10,16 @@ This example illustrates how to create a simple cluster with beta features. | cloudrun | Boolean to enable / disable CloudRun | string | `"true"` | no | | cluster\_name\_suffix | A suffix to append to the default cluster name | string | `""` | no | | compute\_engine\_service\_account | Service account to associate to the nodes in the cluster | string | n/a | yes | -| credentials\_path | The path to the GCP credentials JSON file | string | n/a | yes | | ip\_range\_pods | The secondary ip range to use for pods | string | n/a | yes | | ip\_range\_services | The secondary ip range to use for pods | string | n/a | yes | | istio | Boolean to enable / disable Istio | string | `"true"` | no | | network | The VPC network to host the cluster in | string | n/a | yes | +| node\_metadata | Specifies how node metadata is exposed to the workload running on the node | string | `"SECURE"` | no | +| node\_pools | List of maps containing node pools | list(map(string)) | `` | no | | project\_id | The project ID to host the cluster in | string | n/a | yes | | region | The region to host the cluster in | string | n/a | yes | +| remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | +| sandbox\_enabled | (Beta) Enable GKE Sandbox (Do not forget to set `image_type` = `COS_CONTAINERD` and `node_version` = `1.12.7-gke.17` or later to use it). | bool | `"false"` | no | | subnetwork | The subnetwork to host the cluster in | string | n/a | yes | ## Outputs @@ -27,7 +29,6 @@ This example illustrates how to create a simple cluster with beta features. | ca\_certificate | | | client\_token | | | cluster\_name | Cluster name | -| credentials\_path | | | ip\_range\_pods | The secondary IP range used for pods | | ip\_range\_services | The secondary IP range used for services | | kubernetes\_endpoint | | @@ -40,7 +41,7 @@ This example illustrates how to create a simple cluster with beta features. | subnetwork | | | zones | List of zones in which the cluster resides | -[^]: (autogen_docs_end) + To provision this example, run the following from within this directory: - `terraform init` to get the plugins diff --git a/examples/simple_regional_beta/main.tf b/examples/simple_regional_beta/main.tf index fc95090ede..b75fdaa613 100644 --- a/examples/simple_regional_beta/main.tf +++ b/examples/simple_regional_beta/main.tf @@ -19,25 +19,28 @@ locals { } provider "google-beta" { - version = "~> 2.12.0" - credentials = file(var.credentials_path) - region = var.region + version = "~> 2.12.0" + region = var.region } module "gke" { - source = "../../modules/beta-public-cluster/" - project_id = var.project_id - name = "${local.cluster_type}-cluster${var.cluster_name_suffix}" - regional = true - region = var.region - network = var.network - subnetwork = var.subnetwork - ip_range_pods = var.ip_range_pods - ip_range_services = var.ip_range_services - create_service_account = false - service_account = var.compute_engine_service_account - istio = var.istio - cloudrun = var.cloudrun + source = "../../modules/beta-public-cluster/" + project_id = var.project_id + name = "${local.cluster_type}-cluster${var.cluster_name_suffix}" + regional = true + region = var.region + network = var.network + subnetwork = var.subnetwork + ip_range_pods = var.ip_range_pods + ip_range_services = var.ip_range_services + create_service_account = false + service_account = var.compute_engine_service_account + istio = var.istio + cloudrun = var.cloudrun + node_metadata = var.node_metadata + sandbox_enabled = var.sandbox_enabled + remove_default_node_pool = var.remove_default_node_pool + node_pools = var.node_pools } data "google_client_config" "default" { diff --git a/examples/simple_regional_beta/test_outputs.tf b/examples/simple_regional_beta/test_outputs.tf index f250fef192..e64c40e477 100644 --- a/examples/simple_regional_beta/test_outputs.tf +++ b/examples/simple_regional_beta/test_outputs.tf @@ -21,10 +21,6 @@ output "project_id" { value = var.project_id } -output "credentials_path" { - value = var.credentials_path -} - output "region" { value = module.gke.region } diff --git a/examples/simple_regional_beta/variables.tf b/examples/simple_regional_beta/variables.tf index 1da408a790..ed16642774 100644 --- a/examples/simple_regional_beta/variables.tf +++ b/examples/simple_regional_beta/variables.tf @@ -18,10 +18,6 @@ variable "project_id" { description = "The project ID to host the cluster in" } -variable "credentials_path" { - description = "The path to the GCP credentials JSON file" -} - variable "cluster_name_suffix" { description = "A suffix to append to the default cluster name" default = "" @@ -60,3 +56,32 @@ variable "cloudrun" { description = "Boolean to enable / disable CloudRun" default = true } + +variable "node_metadata" { + description = "Specifies how node metadata is exposed to the workload running on the node" + default = "SECURE" + type = string +} + +variable "sandbox_enabled" { + type = bool + description = "(Beta) Enable GKE Sandbox (Do not forget to set `image_type` = `COS_CONTAINERD` and `node_version` = `1.12.7-gke.17` or later to use it)." + default = false +} + +variable "remove_default_node_pool" { + type = bool + description = "Remove default node pool while setting up the cluster" + default = false +} + +variable "node_pools" { + type = list(map(string)) + description = "List of maps containing node pools" + + default = [ + { + name = "default-node-pool" + }, + ] +} diff --git a/test/fixtures/sandbox_enabled/example.tf b/test/fixtures/sandbox_enabled/example.tf new file mode 100644 index 0000000000..05b7edfd9e --- /dev/null +++ b/test/fixtures/sandbox_enabled/example.tf @@ -0,0 +1,40 @@ +/** + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +module "example" { + source = "../../../examples/simple_regional_beta" + + project_id = var.project_id + cluster_name_suffix = "-${random_string.suffix.result}" + region = var.region + network = google_compute_network.main.name + subnetwork = google_compute_subnetwork.main.name + ip_range_pods = google_compute_subnetwork.main.secondary_ip_range[0].range_name + ip_range_services = google_compute_subnetwork.main.secondary_ip_range[1].range_name + compute_engine_service_account = var.compute_engine_service_account + istio = false + cloudrun = false + node_metadata = "UNSPECIFIED" + sandbox_enabled = true + remove_default_node_pool = true + + node_pools = [ + { + name = "default-node-pool" + image_type = "COS_CONTAINERD" + }, + ] +} diff --git a/test/fixtures/sandbox_enabled/network.tf b/test/fixtures/sandbox_enabled/network.tf new file mode 100644 index 0000000000..5d34d43748 --- /dev/null +++ b/test/fixtures/sandbox_enabled/network.tf @@ -0,0 +1,48 @@ +/** + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +resource "random_string" "suffix" { + length = 4 + special = false + upper = false +} + +provider "google" { + project = var.project_id +} + +resource "google_compute_network" "main" { + name = "cft-gke-test-${random_string.suffix.result}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "main" { + name = "cft-gke-test-${random_string.suffix.result}" + ip_cidr_range = "10.0.0.0/17" + region = var.region + network = google_compute_network.main.self_link + + secondary_ip_range { + range_name = "cft-gke-test-pods-${random_string.suffix.result}" + ip_cidr_range = "192.168.0.0/18" + } + + secondary_ip_range { + range_name = "cft-gke-test-services-${random_string.suffix.result}" + ip_cidr_range = "192.168.64.0/18" + } +} + diff --git a/test/fixtures/sandbox_enabled/outputs.tf b/test/fixtures/sandbox_enabled/outputs.tf new file mode 120000 index 0000000000..726bdc722f --- /dev/null +++ b/test/fixtures/sandbox_enabled/outputs.tf @@ -0,0 +1 @@ +../shared/outputs.tf \ No newline at end of file diff --git a/test/fixtures/sandbox_enabled/variables.tf b/test/fixtures/sandbox_enabled/variables.tf new file mode 120000 index 0000000000..c113c00a3d --- /dev/null +++ b/test/fixtures/sandbox_enabled/variables.tf @@ -0,0 +1 @@ +../shared/variables.tf \ No newline at end of file diff --git a/test/integration/sandbox_enabled/controls/gcloud.rb b/test/integration/sandbox_enabled/controls/gcloud.rb new file mode 100644 index 0000000000..eb0ffdaf46 --- /dev/null +++ b/test/integration/sandbox_enabled/controls/gcloud.rb @@ -0,0 +1,102 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +project_id = attribute('project_id') +location = attribute('location') +cluster_name = attribute('cluster_name') + +control "gcloud" do + title "Google Compute Engine GKE configuration" + describe command("gcloud --project=#{project_id} container clusters --zone=#{location} describe #{cluster_name} --format=json") do + its(:exit_status) { should eq 0 } + its(:stderr) { should eq '' } + + let!(:data) do + if subject.exit_status == 0 + JSON.parse(subject.stdout) + else + {} + end + end + + describe "cluster" do + it "is running" do + expect(data['status']).to eq 'RUNNING' + end + + it "is regional" do + expect(data['location']).to match(/^.*[1-9]$/) + end + + it "uses public nodes and master endpoint" do + expect(data['privateClusterConfig']).to eq nil + end + + it "has the expected addon settings" do + expect(data['addonsConfig']).to eq({ + "horizontalPodAutoscaling" => {}, + "httpLoadBalancing" => {}, + "kubernetesDashboard" => { + "disabled" => true, + }, + "networkPolicyConfig" => { + "disabled" => true, + }, + }) + end + end + + describe "node pool" do + let(:node_pools) { data['nodePools'].reject { |p| p['name'] == "default-pool" } } + + it "is the expected image type" do + expect(node_pools).to include( + including( + "config" => including( + "imageType" => "COS_CONTAINERD", + ), + ) + ) + end + + it "has the expected labels" do + expect(node_pools).to include( + including( + "config" => including( + "labels" => including( + "cluster_name" => cluster_name, + "node_pool" => "default-node-pool", + "sandbox.gke.io/runtime" => "gvisor", + ), + ), + ) + ) + end + + it "has the expected network tags" do + expect(node_pools).to include( + including( + "config" => including( + "tags" => match_array([ + "gke-#{cluster_name}", + "gke-#{cluster_name}-default-node-pool", + ]), + ), + ) + ) + end + + end + end +end diff --git a/test/integration/sandbox_enabled/inspec.yml b/test/integration/sandbox_enabled/inspec.yml new file mode 100644 index 0000000000..0454937a36 --- /dev/null +++ b/test/integration/sandbox_enabled/inspec.yml @@ -0,0 +1,17 @@ +name: sandbox_enabled +attributes: + - name: project_id + required: true + type: string + - name: location + required: true + type: string + - name: cluster_name + required: true + type: string + - name: kubernetes_endpoint + required: true + type: string + - name: client_token + required: true + type: string From 8a2fddc70bd5631195714a1f671ab4b9c2114392 Mon Sep 17 00:00:00 2001 From: omazin Date: Mon, 21 Oct 2019 15:52:33 +0300 Subject: [PATCH 53/82] [wait-for-cluster.sh] Fix #284. --- autogen/scripts/wait-for-cluster.sh | 2 +- .../scripts/wait-for-cluster.sh | 2 +- modules/beta-private-cluster/scripts/wait-for-cluster.sh | 2 +- modules/beta-public-cluster/scripts/wait-for-cluster.sh | 2 +- .../private-cluster-update-variant/scripts/wait-for-cluster.sh | 2 +- modules/private-cluster/scripts/wait-for-cluster.sh | 2 +- scripts/wait-for-cluster.sh | 2 +- 7 files changed, 7 insertions(+), 7 deletions(-) diff --git a/autogen/scripts/wait-for-cluster.sh b/autogen/scripts/wait-for-cluster.sh index 37f0176ec7..7bf9919b8f 100755 --- a/autogen/scripts/wait-for-cluster.sh +++ b/autogen/scripts/wait-for-cluster.sh @@ -16,7 +16,7 @@ set -e if [ -n "${GOOGLE_APPLICATION_CREDENTIALS}" ]; then - CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE=${GOOGLE_APPLICATION_CREDENTIALS} + export CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE=${GOOGLE_APPLICATION_CREDENTIALS} fi PROJECT=$1 diff --git a/modules/beta-private-cluster-update-variant/scripts/wait-for-cluster.sh b/modules/beta-private-cluster-update-variant/scripts/wait-for-cluster.sh index 37f0176ec7..7bf9919b8f 100755 --- a/modules/beta-private-cluster-update-variant/scripts/wait-for-cluster.sh +++ b/modules/beta-private-cluster-update-variant/scripts/wait-for-cluster.sh @@ -16,7 +16,7 @@ set -e if [ -n "${GOOGLE_APPLICATION_CREDENTIALS}" ]; then - CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE=${GOOGLE_APPLICATION_CREDENTIALS} + export CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE=${GOOGLE_APPLICATION_CREDENTIALS} fi PROJECT=$1 diff --git a/modules/beta-private-cluster/scripts/wait-for-cluster.sh b/modules/beta-private-cluster/scripts/wait-for-cluster.sh index 37f0176ec7..7bf9919b8f 100755 --- a/modules/beta-private-cluster/scripts/wait-for-cluster.sh +++ b/modules/beta-private-cluster/scripts/wait-for-cluster.sh @@ -16,7 +16,7 @@ set -e if [ -n "${GOOGLE_APPLICATION_CREDENTIALS}" ]; then - CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE=${GOOGLE_APPLICATION_CREDENTIALS} + export CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE=${GOOGLE_APPLICATION_CREDENTIALS} fi PROJECT=$1 diff --git a/modules/beta-public-cluster/scripts/wait-for-cluster.sh b/modules/beta-public-cluster/scripts/wait-for-cluster.sh index 37f0176ec7..7bf9919b8f 100755 --- a/modules/beta-public-cluster/scripts/wait-for-cluster.sh +++ b/modules/beta-public-cluster/scripts/wait-for-cluster.sh @@ -16,7 +16,7 @@ set -e if [ -n "${GOOGLE_APPLICATION_CREDENTIALS}" ]; then - CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE=${GOOGLE_APPLICATION_CREDENTIALS} + export CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE=${GOOGLE_APPLICATION_CREDENTIALS} fi PROJECT=$1 diff --git a/modules/private-cluster-update-variant/scripts/wait-for-cluster.sh b/modules/private-cluster-update-variant/scripts/wait-for-cluster.sh index 37f0176ec7..7bf9919b8f 100755 --- a/modules/private-cluster-update-variant/scripts/wait-for-cluster.sh +++ b/modules/private-cluster-update-variant/scripts/wait-for-cluster.sh @@ -16,7 +16,7 @@ set -e if [ -n "${GOOGLE_APPLICATION_CREDENTIALS}" ]; then - CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE=${GOOGLE_APPLICATION_CREDENTIALS} + export CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE=${GOOGLE_APPLICATION_CREDENTIALS} fi PROJECT=$1 diff --git a/modules/private-cluster/scripts/wait-for-cluster.sh b/modules/private-cluster/scripts/wait-for-cluster.sh index 37f0176ec7..7bf9919b8f 100755 --- a/modules/private-cluster/scripts/wait-for-cluster.sh +++ b/modules/private-cluster/scripts/wait-for-cluster.sh @@ -16,7 +16,7 @@ set -e if [ -n "${GOOGLE_APPLICATION_CREDENTIALS}" ]; then - CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE=${GOOGLE_APPLICATION_CREDENTIALS} + export CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE=${GOOGLE_APPLICATION_CREDENTIALS} fi PROJECT=$1 diff --git a/scripts/wait-for-cluster.sh b/scripts/wait-for-cluster.sh index 37f0176ec7..7bf9919b8f 100755 --- a/scripts/wait-for-cluster.sh +++ b/scripts/wait-for-cluster.sh @@ -16,7 +16,7 @@ set -e if [ -n "${GOOGLE_APPLICATION_CREDENTIALS}" ]; then - CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE=${GOOGLE_APPLICATION_CREDENTIALS} + export CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE=${GOOGLE_APPLICATION_CREDENTIALS} fi PROJECT=$1 From ebabcd0870f72d16dc48f25218731a19b93239e7 Mon Sep 17 00:00:00 2001 From: bharathkkb Date: Tue, 22 Oct 2019 21:54:38 -0500 Subject: [PATCH 54/82] add defaults, remove svc account, remove local --- examples/simple_regional_with_networking/README.md | 12 +++++------- examples/simple_regional_with_networking/main.tf | 9 ++------- .../simple_regional_with_networking/variables.tf | 14 +++++--------- 3 files changed, 12 insertions(+), 23 deletions(-) diff --git a/examples/simple_regional_with_networking/README.md b/examples/simple_regional_with_networking/README.md index f39675063d..33a19683d0 100644 --- a/examples/simple_regional_with_networking/README.md +++ b/examples/simple_regional_with_networking/README.md @@ -7,14 +7,12 @@ This example illustrates how to create a VPC and a simple cluster. | Name | Description | Type | Default | Required | |------|-------------|:----:|:-----:|:-----:| -| cluster\_name\_suffix | A suffix to append to the default cluster name | string | `""` | no | -| compute\_engine\_service\_account | Service account to associate to the nodes in the cluster | string | n/a | yes | -| ip\_range\_pods | The secondary ip range to use for pods | string | n/a | yes | -| ip\_range\_services | The secondary ip range to use for pods | string | n/a | yes | -| network | The VPC network created to host the cluster in | string | n/a | yes | +| ip\_range\_pods | The secondary ip range to use for pods | string | `"ip-range-pods"` | no | +| ip\_range\_services | The secondary ip range to use for pods | string | `"ip-range-scv"` | no | +| network | The VPC network created to host the cluster in | string | `"gke-network"` | no | | project\_id | The project ID to host the cluster in | string | n/a | yes | -| region | The region to host the cluster in | string | n/a | yes | -| subnetwork | The subnetwork created to host the cluster in | string | n/a | yes | +| region | The region to host the cluster in | string | `"us-central1"` | no | +| subnetwork | The subnetwork created to host the cluster in | string | `"gke-subnet"` | no | ## Outputs diff --git a/examples/simple_regional_with_networking/main.tf b/examples/simple_regional_with_networking/main.tf index ebb4ddb290..8bb70d6db9 100644 --- a/examples/simple_regional_with_networking/main.tf +++ b/examples/simple_regional_with_networking/main.tf @@ -14,10 +14,6 @@ * limitations under the License. */ -locals { - cluster_type = "simple-regional" -} - provider "google" { version = "~> 2.12.0" region = var.region @@ -54,15 +50,14 @@ module "gcp-network" { module "gke" { source = "../../" project_id = var.project_id - name = "${local.cluster_type}-cluster${var.cluster_name_suffix}" + name = "simple-regional-cluster" regional = true region = var.region network = module.gcp-network.network_name subnetwork = module.gcp-network.subnets_names[0] ip_range_pods = var.ip_range_pods ip_range_services = var.ip_range_services - create_service_account = false - service_account = var.compute_engine_service_account + create_service_account = true } data "google_client_config" "default" { diff --git a/examples/simple_regional_with_networking/variables.tf b/examples/simple_regional_with_networking/variables.tf index 5c36a81c16..6896accbe4 100644 --- a/examples/simple_regional_with_networking/variables.tf +++ b/examples/simple_regional_with_networking/variables.tf @@ -18,32 +18,28 @@ variable "project_id" { description = "The project ID to host the cluster in" } -variable "cluster_name_suffix" { - description = "A suffix to append to the default cluster name" - default = "" -} - variable "region" { description = "The region to host the cluster in" + default = "us-central1" } variable "network" { description = "The VPC network created to host the cluster in" + default = "gke-network" } variable "subnetwork" { description = "The subnetwork created to host the cluster in" + default = "gke-subnet" } variable "ip_range_pods" { description = "The secondary ip range to use for pods" + default = "ip-range-pods" } variable "ip_range_services" { description = "The secondary ip range to use for pods" -} - -variable "compute_engine_service_account" { - description = "Service account to associate to the nodes in the cluster" + default = "ip-range-scv" } From 5a4f6ce7429e1e1f29f91f3c412fe4dc185456e7 Mon Sep 17 00:00:00 2001 From: bharathkkb Date: Tue, 22 Oct 2019 22:22:35 -0500 Subject: [PATCH 55/82] fix fixtures --- examples/simple_regional_with_networking/README.md | 1 + examples/simple_regional_with_networking/main.tf | 2 +- .../simple_regional_with_networking/variables.tf | 5 +++++ .../simple_regional_with_networking/example.tf | 12 +----------- .../simple_regional_with_networking/outputs.tf | 8 ++++---- .../simple_regional_with_networking/variables.tf | 10 ---------- 6 files changed, 12 insertions(+), 26 deletions(-) diff --git a/examples/simple_regional_with_networking/README.md b/examples/simple_regional_with_networking/README.md index 33a19683d0..e24ee546fb 100644 --- a/examples/simple_regional_with_networking/README.md +++ b/examples/simple_regional_with_networking/README.md @@ -7,6 +7,7 @@ This example illustrates how to create a VPC and a simple cluster. | Name | Description | Type | Default | Required | |------|-------------|:----:|:-----:|:-----:| +| cluster\_name\_suffix | A suffix to append to the default cluster name | string | `""` | no | | ip\_range\_pods | The secondary ip range to use for pods | string | `"ip-range-pods"` | no | | ip\_range\_services | The secondary ip range to use for pods | string | `"ip-range-scv"` | no | | network | The VPC network created to host the cluster in | string | `"gke-network"` | no | diff --git a/examples/simple_regional_with_networking/main.tf b/examples/simple_regional_with_networking/main.tf index 8bb70d6db9..fc53436f2d 100644 --- a/examples/simple_regional_with_networking/main.tf +++ b/examples/simple_regional_with_networking/main.tf @@ -50,7 +50,7 @@ module "gcp-network" { module "gke" { source = "../../" project_id = var.project_id - name = "simple-regional-cluster" + name = "simple-regional-cluster-${var.cluster_name_suffix}" regional = true region = var.region network = module.gcp-network.network_name diff --git a/examples/simple_regional_with_networking/variables.tf b/examples/simple_regional_with_networking/variables.tf index 6896accbe4..e9045a5c0d 100644 --- a/examples/simple_regional_with_networking/variables.tf +++ b/examples/simple_regional_with_networking/variables.tf @@ -18,6 +18,11 @@ variable "project_id" { description = "The project ID to host the cluster in" } +variable "cluster_name_suffix" { + description = "A suffix to append to the default cluster name" + default = "" +} + variable "region" { description = "The region to host the cluster in" default = "us-central1" diff --git a/test/fixtures/simple_regional_with_networking/example.tf b/test/fixtures/simple_regional_with_networking/example.tf index 23caab9b52..01506b034c 100644 --- a/test/fixtures/simple_regional_with_networking/example.tf +++ b/test/fixtures/simple_regional_with_networking/example.tf @@ -19,21 +19,11 @@ resource "random_string" "suffix" { special = false upper = false } -locals { - network = "gke-network-${random_string.suffix.result}" - subnetwork = "gke-subnetwork-${random_string.suffix.result}" - ip_range_pods = "gke-ip-range-pods-${random_string.suffix.result}" - ip_range_services = "gke-ip-range-svc-${random_string.suffix.result}" -} + module "example" { source = "../../../examples/simple_regional_with_networking" project_id = var.project_id cluster_name_suffix = "-${random_string.suffix.result}" region = var.region - network = local.network - subnetwork = local.subnetwork - ip_range_pods = local.ip_range_pods - ip_range_services = local.ip_range_services - compute_engine_service_account = var.compute_engine_service_account } diff --git a/test/fixtures/simple_regional_with_networking/outputs.tf b/test/fixtures/simple_regional_with_networking/outputs.tf index ff11179f95..fd0f7651a6 100644 --- a/test/fixtures/simple_regional_with_networking/outputs.tf +++ b/test/fixtures/simple_regional_with_networking/outputs.tf @@ -49,12 +49,12 @@ output "service_account" { output "network_name" { description = "The name of the VPC being created" - value = local.network + value = var.network } output "subnet_name" { description = "The name of the subnet being created" - value = local.subnetwork + value = var.subnetwork } output "region" { @@ -64,10 +64,10 @@ output "region" { output "ip_range_pods_name" { description = "The secondary range name for pods" - value = local.ip_range_pods + value = var.ip_range_pods } output "ip_range_services_name" { description = "The secondary range name for services" - value = local.ip_range_services + value = var.ip_range_services } diff --git a/test/fixtures/simple_regional_with_networking/variables.tf b/test/fixtures/simple_regional_with_networking/variables.tf index 5b2187af58..b0ba1f508f 100644 --- a/test/fixtures/simple_regional_with_networking/variables.tf +++ b/test/fixtures/simple_regional_with_networking/variables.tf @@ -18,18 +18,8 @@ variable "project_id" { description = "The project ID to host the cluster in" } -variable "cluster_name_suffix" { - description = "A suffix to append to the default cluster name" - default = "" -} - variable "region" { description = "The region to host the cluster in" default = "us-east4" } - -variable "compute_engine_service_account" { - description = "Service account to associate to the nodes in the cluster" -} - From 258543e21e4e40bc82e568413599a0d143d784dd Mon Sep 17 00:00:00 2001 From: bharathkkb Date: Tue, 22 Oct 2019 23:04:30 -0500 Subject: [PATCH 56/82] fix tests --- .../example.tf | 10 +++++++--- .../outputs.tf | 8 ++++---- .../variables.tf | 19 +++++++++++++++++++ 3 files changed, 30 insertions(+), 7 deletions(-) diff --git a/test/fixtures/simple_regional_with_networking/example.tf b/test/fixtures/simple_regional_with_networking/example.tf index 01506b034c..59f6c27ba2 100644 --- a/test/fixtures/simple_regional_with_networking/example.tf +++ b/test/fixtures/simple_regional_with_networking/example.tf @@ -23,7 +23,11 @@ resource "random_string" "suffix" { module "example" { source = "../../../examples/simple_regional_with_networking" - project_id = var.project_id - cluster_name_suffix = "-${random_string.suffix.result}" - region = var.region + project_id = var.project_id + cluster_name_suffix = "-${random_string.suffix.result}" + region = var.region + network = "${var.network}-${random_string.suffix.result}" + subnetwork = "${var.subnetwork}-${random_string.suffix.result}" + ip_range_pods = "${var.ip_range_pods}-${random_string.suffix.result}" + ip_range_services = "${var.ip_range_services}-${random_string.suffix.result}" } diff --git a/test/fixtures/simple_regional_with_networking/outputs.tf b/test/fixtures/simple_regional_with_networking/outputs.tf index fd0f7651a6..2a13237983 100644 --- a/test/fixtures/simple_regional_with_networking/outputs.tf +++ b/test/fixtures/simple_regional_with_networking/outputs.tf @@ -49,12 +49,12 @@ output "service_account" { output "network_name" { description = "The name of the VPC being created" - value = var.network + value = module.example.network } output "subnet_name" { description = "The name of the subnet being created" - value = var.subnetwork + value = module.example.subnetwork } output "region" { @@ -64,10 +64,10 @@ output "region" { output "ip_range_pods_name" { description = "The secondary range name for pods" - value = var.ip_range_pods + value = module.example.ip_range_pods } output "ip_range_services_name" { description = "The secondary range name for services" - value = var.ip_range_services + value = module.example.ip_range_services } diff --git a/test/fixtures/simple_regional_with_networking/variables.tf b/test/fixtures/simple_regional_with_networking/variables.tf index b0ba1f508f..989451fe32 100644 --- a/test/fixtures/simple_regional_with_networking/variables.tf +++ b/test/fixtures/simple_regional_with_networking/variables.tf @@ -23,3 +23,22 @@ variable "region" { default = "us-east4" } +variable "network" { + description = "The VPC network created to host the cluster in" + default = "gke-network" +} + +variable "subnetwork" { + description = "The subnetwork created to host the cluster in" + default = "gke-subnet" +} + +variable "ip_range_pods" { + description = "The secondary ip range to use for pods" + default = "ip-range-pods" +} + +variable "ip_range_services" { + description = "The secondary ip range to use for pods" + default = "ip-range-scv" +} From b050668eecae7c3e8d2f48da1d014f53bf941fb7 Mon Sep 17 00:00:00 2001 From: bharathkkb Date: Wed, 23 Oct 2019 00:33:58 -0500 Subject: [PATCH 57/82] add ci yaml --- test/ci/simple-regional-with-networking.yml | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 test/ci/simple-regional-with-networking.yml diff --git a/test/ci/simple-regional-with-networking.yml b/test/ci/simple-regional-with-networking.yml new file mode 100644 index 0000000000..022044bc8e --- /dev/null +++ b/test/ci/simple-regional-with-networking.yml @@ -0,0 +1,18 @@ +--- + +platform: linux + +inputs: +- name: pull-request + path: terraform-google-kubernetes-engine + +run: + path: make + args: ['test_integration'] + dir: terraform-google-kubernetes-engine + +params: + SUITE: "imple-regional-with-networking-local" + COMPUTE_ENGINE_SERVICE_ACCOUNT: "" + REGION: "us-east4" + ZONES: '["us-east4-a", "us-east4-b", "us-east4-c"]' From 6ee11ce0e01b26667216fd553e0ed71ac4f787c6 Mon Sep 17 00:00:00 2001 From: bharathkkb Date: Wed, 23 Oct 2019 00:34:53 -0500 Subject: [PATCH 58/82] add ci yaml fix --- test/ci/simple-regional-with-networking.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/ci/simple-regional-with-networking.yml b/test/ci/simple-regional-with-networking.yml index 022044bc8e..68ba8c38cd 100644 --- a/test/ci/simple-regional-with-networking.yml +++ b/test/ci/simple-regional-with-networking.yml @@ -12,7 +12,7 @@ run: dir: terraform-google-kubernetes-engine params: - SUITE: "imple-regional-with-networking-local" + SUITE: "simple-regional-with-networking-local" COMPUTE_ENGINE_SERVICE_ACCOUNT: "" REGION: "us-east4" ZONES: '["us-east4-a", "us-east4-b", "us-east4-c"]' From 53ec7a99287ba00838a9811af8e723f6607fec98 Mon Sep 17 00:00:00 2001 From: pp Date: Wed, 23 Oct 2019 10:49:12 +0300 Subject: [PATCH 59/82] Fix logic of skip_provisioners behavior --- README.md | 2 +- autogen/README.md | 4 +++- autogen/cluster.tf | 2 +- autogen/dns.tf | 2 +- autogen/variables.tf | 2 +- cluster.tf | 2 +- dns.tf | 2 +- .../beta-private-cluster-update-variant/README.md | 7 ++++++- .../beta-private-cluster-update-variant/cluster.tf | 1 + modules/beta-private-cluster-update-variant/dns.tf | 2 +- .../beta-private-cluster-update-variant/main.tf | 3 ++- .../networks.tf | 2 +- modules/beta-private-cluster-update-variant/sa.tf | 2 +- .../variables.tf | 14 +++++++++++++- modules/beta-private-cluster/README.md | 2 +- modules/beta-private-cluster/cluster.tf | 2 +- modules/beta-private-cluster/dns.tf | 2 +- modules/beta-private-cluster/variables.tf | 2 +- modules/beta-public-cluster/README.md | 2 +- modules/beta-public-cluster/cluster.tf | 2 +- modules/beta-public-cluster/dns.tf | 2 +- modules/beta-public-cluster/variables.tf | 2 +- modules/private-cluster-update-variant/README.md | 7 ++++++- modules/private-cluster-update-variant/cluster.tf | 1 + modules/private-cluster-update-variant/dns.tf | 2 +- modules/private-cluster-update-variant/main.tf | 3 ++- modules/private-cluster-update-variant/networks.tf | 2 +- modules/private-cluster-update-variant/sa.tf | 2 +- .../private-cluster-update-variant/variables.tf | 14 +++++++++++++- modules/private-cluster/README.md | 2 +- modules/private-cluster/cluster.tf | 2 +- modules/private-cluster/dns.tf | 2 +- modules/private-cluster/variables.tf | 2 +- scripts/wait-for-cluster.sh | 6 +----- variables.tf | 2 +- 35 files changed, 73 insertions(+), 37 deletions(-) diff --git a/README.md b/README.md index 8c9a63fdab..15f6aff13b 100644 --- a/README.md +++ b/README.md @@ -170,7 +170,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | registry\_project\_id | Project holding the Google Container Registry. If empty, we use the cluster project. If grant_registry_access is true, storage.objectViewer role is assigned on this project. | string | `""` | no | | remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | | service\_account | The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created. | string | `""` | no | -| skip\_provisioners | Flag to skip all local-exec provisioners. It breaks down `stub_domains` and `upstream_nameservers` variables functionality. | bool | `"false"` | no | +| skip\_provisioners | Flag to skip all local-exec provisioners. It breaks `stub_domains` and `upstream_nameservers` variables functionality. | bool | `"false"` | no | | stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | map(list(string)) | `` | no | | subnetwork | The subnetwork to host the cluster in (required) | string | n/a | yes | | upstream\_nameservers | If specified, the values replace the nameservers taken by default from the node’s /etc/resolv.conf | list | `` | no | diff --git a/autogen/README.md b/autogen/README.md index dc0b63b003..a0655d62f6 100644 --- a/autogen/README.md +++ b/autogen/README.md @@ -195,12 +195,14 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | non\_masquerade\_cidrs | List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading. | list(string) | `` | no | | pod\_security\_policy\_config | enabled - Enable the PodSecurityPolicy controller for this cluster. If enabled, pods must be valid under a PodSecurityPolicy to be created. | list | `` | no | | project\_id | The project ID to host the cluster in (required) | string | n/a | yes | -| region | The region to host the cluster in (required) | string | n/a | yes | +| region | The region to host the cluster in (optional if zonal cluster / required if regional) | string | `"null"` | no | | regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | +| registry\_project\_id | Project holding the Google Container Registry. If empty, we use the cluster project. If grant_registry_access is true, storage.objectViewer role is assigned on this project. | string | `""` | no | | remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | | resource\_usage\_export\_dataset\_id | The dataset id for which network egress metering for this cluster will be enabled. If enabled, a daemonset will be created in the cluster to meter network egress traffic. | string | `""` | no | | sandbox\_enabled | (Beta) Enable GKE Sandbox (Do not forget to set `image_type` = `COS_CONTAINERD` and `node_version` = `1.12.7-gke.17` or later to use it). | bool | `"false"` | no | | service\_account | The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created. | string | `""` | no | +| skip\_provisioners | Flag to skip all local-exec provisioners. It breaks `stub_domains` and `upstream_nameservers` variables functionality. | bool | `"false"` | no | | stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | map(list(string)) | `` | no | | subnetwork | The subnetwork to host the cluster in (required) | string | n/a | yes | | upstream\_nameservers | If specified, the values replace the nameservers taken by default from the node’s /etc/resolv.conf | list | `` | no | diff --git a/autogen/cluster.tf b/autogen/cluster.tf index c394945f2d..44d1f28639 100644 --- a/autogen/cluster.tf +++ b/autogen/cluster.tf @@ -433,7 +433,7 @@ resource "google_container_node_pool" "pools" { } resource "null_resource" "wait_for_cluster" { - count = var.skip_provisioners ? 1 : 0 + count = var.skip_provisioners ? 0 : 1 provisioner "local-exec" { command = "${path.module}/scripts/wait-for-cluster.sh ${var.project_id} ${var.name}" diff --git a/autogen/dns.tf b/autogen/dns.tf index 731e010b0d..20c3b25ee9 100644 --- a/autogen/dns.tf +++ b/autogen/dns.tf @@ -20,7 +20,7 @@ Delete default kube-dns configmap *****************************************/ resource "null_resource" "delete_default_kube_dns_configmap" { - count = (local.custom_kube_dns_config || local.upstream_nameservers_config) || var.skip_provisioners ? 1 : 0 + count = (local.custom_kube_dns_config || local.upstream_nameservers_config) && ! var.skip_provisioners ? 1 : 0 provisioner "local-exec" { command = "${path.module}/scripts/kubectl_wrapper.sh https://${local.cluster_endpoint} ${data.google_client_config.default.access_token} ${local.cluster_ca_certificate} ${path.module}/scripts/delete-default-resource.sh kube-system configmap kube-dns" diff --git a/autogen/variables.tf b/autogen/variables.tf index dded3fd6cb..ede2bac6c5 100644 --- a/autogen/variables.tf +++ b/autogen/variables.tf @@ -313,7 +313,7 @@ variable "cluster_resource_labels" { variable "skip_provisioners" { type = bool - description = "Flag to skip all local-exec provisioners. It breaks down `stub_domains` and `upstream_nameservers` variables functionality." + description = "Flag to skip all local-exec provisioners. It breaks `stub_domains` and `upstream_nameservers` variables functionality." default = false } {% if private_cluster %} diff --git a/cluster.tf b/cluster.tf index 40b5559703..58023049ac 100644 --- a/cluster.tf +++ b/cluster.tf @@ -227,7 +227,7 @@ resource "google_container_node_pool" "pools" { } resource "null_resource" "wait_for_cluster" { - count = var.skip_provisioners ? 1 : 0 + count = var.skip_provisioners ? 0 : 1 provisioner "local-exec" { command = "${path.module}/scripts/wait-for-cluster.sh ${var.project_id} ${var.name}" diff --git a/dns.tf b/dns.tf index f490c15504..8a581ff68e 100644 --- a/dns.tf +++ b/dns.tf @@ -20,7 +20,7 @@ Delete default kube-dns configmap *****************************************/ resource "null_resource" "delete_default_kube_dns_configmap" { - count = (local.custom_kube_dns_config || local.upstream_nameservers_config) || var.skip_provisioners ? 1 : 0 + count = (local.custom_kube_dns_config || local.upstream_nameservers_config) && ! var.skip_provisioners ? 1 : 0 provisioner "local-exec" { command = "${path.module}/scripts/kubectl_wrapper.sh https://${local.cluster_endpoint} ${data.google_client_config.default.access_token} ${local.cluster_ca_certificate} ${path.module}/scripts/delete-default-resource.sh kube-system configmap kube-dns" diff --git a/modules/beta-private-cluster-update-variant/README.md b/modules/beta-private-cluster-update-variant/README.md index 0fc0068e96..ba84c19cce 100644 --- a/modules/beta-private-cluster-update-variant/README.md +++ b/modules/beta-private-cluster-update-variant/README.md @@ -188,12 +188,14 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | non\_masquerade\_cidrs | List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading. | list(string) | `` | no | | pod\_security\_policy\_config | enabled - Enable the PodSecurityPolicy controller for this cluster. If enabled, pods must be valid under a PodSecurityPolicy to be created. | list | `` | no | | project\_id | The project ID to host the cluster in (required) | string | n/a | yes | -| region | The region to host the cluster in (required) | string | n/a | yes | +| region | The region to host the cluster in (optional if zonal cluster / required if regional) | string | `"null"` | no | | regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | +| registry\_project\_id | Project holding the Google Container Registry. If empty, we use the cluster project. If grant_registry_access is true, storage.objectViewer role is assigned on this project. | string | `""` | no | | remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | | resource\_usage\_export\_dataset\_id | The dataset id for which network egress metering for this cluster will be enabled. If enabled, a daemonset will be created in the cluster to meter network egress traffic. | string | `""` | no | | sandbox\_enabled | (Beta) Enable GKE Sandbox (Do not forget to set `image_type` = `COS_CONTAINERD` and `node_version` = `1.12.7-gke.17` or later to use it). | bool | `"false"` | no | | service\_account | The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created. | string | `""` | no | +| skip\_provisioners | Flag to skip all local-exec provisioners. It breaks `stub_domains` and `upstream_nameservers` variables functionality. | bool | `"false"` | no | | stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | map(list(string)) | `` | no | | subnetwork | The subnetwork to host the cluster in (required) | string | n/a | yes | | upstream\_nameservers | If specified, the values replace the nameservers taken by default from the node’s /etc/resolv.conf | list | `` | no | @@ -258,6 +260,9 @@ following project roles: - roles/iam.serviceAccountUser - roles/resourcemanager.projectIamAdmin (only required if `service_account` is set to `create`) +Additionally, if `service_account` is set to `create` and `grant_registry_access` is requested, the service account requires the following role on the `registry_project_id` project: +- roles/resourcemanager.projectIamAdmin + ### Enable APIs In order to operate with the Service Account you must activate the following APIs on the project where the Service Account was created: diff --git a/modules/beta-private-cluster-update-variant/cluster.tf b/modules/beta-private-cluster-update-variant/cluster.tf index cf1def945d..db3d13d413 100644 --- a/modules/beta-private-cluster-update-variant/cluster.tf +++ b/modules/beta-private-cluster-update-variant/cluster.tf @@ -401,6 +401,7 @@ resource "google_container_node_pool" "pools" { } resource "null_resource" "wait_for_cluster" { + count = var.skip_provisioners ? 0 : 1 provisioner "local-exec" { command = "${path.module}/scripts/wait-for-cluster.sh ${var.project_id} ${var.name}" diff --git a/modules/beta-private-cluster-update-variant/dns.tf b/modules/beta-private-cluster-update-variant/dns.tf index b240a23e65..8a581ff68e 100644 --- a/modules/beta-private-cluster-update-variant/dns.tf +++ b/modules/beta-private-cluster-update-variant/dns.tf @@ -20,7 +20,7 @@ Delete default kube-dns configmap *****************************************/ resource "null_resource" "delete_default_kube_dns_configmap" { - count = local.custom_kube_dns_config || local.upstream_nameservers_config ? 1 : 0 + count = (local.custom_kube_dns_config || local.upstream_nameservers_config) && ! var.skip_provisioners ? 1 : 0 provisioner "local-exec" { command = "${path.module}/scripts/kubectl_wrapper.sh https://${local.cluster_endpoint} ${data.google_client_config.default.access_token} ${local.cluster_ca_certificate} ${path.module}/scripts/delete-default-resource.sh kube-system configmap kube-dns" diff --git a/modules/beta-private-cluster-update-variant/main.tf b/modules/beta-private-cluster-update-variant/main.tf index fc38644871..63bf31ac78 100644 --- a/modules/beta-private-cluster-update-variant/main.tf +++ b/modules/beta-private-cluster-update-variant/main.tf @@ -23,7 +23,7 @@ data "google_compute_zones" "available" { provider = google-beta project = var.project_id - region = var.region + region = local.region } resource "random_shuffle" "available_zones" { @@ -34,6 +34,7 @@ resource "random_shuffle" "available_zones" { locals { // location location = var.regional ? var.region : var.zones[0] + region = var.region == null ? join("-", slice(split("-", var.zones[0]), 0, 2)) : var.region // for regional cluster - use var.zones if provided, use available otherwise, for zonal cluster use var.zones with first element extracted node_locations = var.regional ? coalescelist(compact(var.zones), sort(random_shuffle.available_zones.result)) : slice(var.zones, 1, length(var.zones)) // kuberentes version diff --git a/modules/beta-private-cluster-update-variant/networks.tf b/modules/beta-private-cluster-update-variant/networks.tf index 14ea500e03..2456654130 100644 --- a/modules/beta-private-cluster-update-variant/networks.tf +++ b/modules/beta-private-cluster-update-variant/networks.tf @@ -27,6 +27,6 @@ data "google_compute_subnetwork" "gke_subnetwork" { provider = google-beta name = var.subnetwork - region = var.region + region = local.region project = local.network_project_id } diff --git a/modules/beta-private-cluster-update-variant/sa.tf b/modules/beta-private-cluster-update-variant/sa.tf index 9e063fcc22..c7f34e4fbb 100644 --- a/modules/beta-private-cluster-update-variant/sa.tf +++ b/modules/beta-private-cluster-update-variant/sa.tf @@ -64,7 +64,7 @@ resource "google_project_iam_member" "cluster_service_account-monitoring_viewer" resource "google_project_iam_member" "cluster_service_account-gcr" { count = var.create_service_account && var.grant_registry_access ? 1 : 0 - project = var.project_id + project = var.registry_project_id == "" ? var.project_id : var.registry_project_id role = "roles/storage.objectViewer" member = "serviceAccount:${google_service_account.cluster_service_account[0].email}" } diff --git a/modules/beta-private-cluster-update-variant/variables.tf b/modules/beta-private-cluster-update-variant/variables.tf index 9a869a830f..2585cf4ba8 100644 --- a/modules/beta-private-cluster-update-variant/variables.tf +++ b/modules/beta-private-cluster-update-variant/variables.tf @@ -40,7 +40,8 @@ variable "regional" { variable "region" { type = string - description = "The region to host the cluster in (required)" + description = "The region to host the cluster in (optional if zonal cluster / required if regional)" + default = null } variable "zones" { @@ -267,6 +268,12 @@ variable "grant_registry_access" { default = false } +variable "registry_project_id" { + type = string + description = "Project holding the Google Container Registry. If empty, we use the cluster project. If grant_registry_access is true, storage.objectViewer role is assigned on this project." + default = "" +} + variable "service_account" { type = string description = "The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created." @@ -302,6 +309,11 @@ variable "cluster_resource_labels" { default = {} } +variable "skip_provisioners" { + type = bool + description = "Flag to skip all local-exec provisioners. It breaks `stub_domains` and `upstream_nameservers` variables functionality." + default = false +} variable "deploy_using_private_endpoint" { type = bool diff --git a/modules/beta-private-cluster/README.md b/modules/beta-private-cluster/README.md index f80dcae85b..d7415a8af6 100644 --- a/modules/beta-private-cluster/README.md +++ b/modules/beta-private-cluster/README.md @@ -195,7 +195,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | resource\_usage\_export\_dataset\_id | The dataset id for which network egress metering for this cluster will be enabled. If enabled, a daemonset will be created in the cluster to meter network egress traffic. | string | `""` | no | | sandbox\_enabled | (Beta) Enable GKE Sandbox (Do not forget to set `image_type` = `COS_CONTAINERD` and `node_version` = `1.12.7-gke.17` or later to use it). | bool | `"false"` | no | | service\_account | The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created. | string | `""` | no | -| skip\_provisioners | Flag to skip all local-exec provisioners. It breaks down `stub_domains` and `upstream_nameservers` variables functionality. | bool | `"false"` | no | +| skip\_provisioners | Flag to skip all local-exec provisioners. It breaks `stub_domains` and `upstream_nameservers` variables functionality. | bool | `"false"` | no | | stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | map(list(string)) | `` | no | | subnetwork | The subnetwork to host the cluster in (required) | string | n/a | yes | | upstream\_nameservers | If specified, the values replace the nameservers taken by default from the node’s /etc/resolv.conf | list | `` | no | diff --git a/modules/beta-private-cluster/cluster.tf b/modules/beta-private-cluster/cluster.tf index bb19940d1f..efd394a535 100644 --- a/modules/beta-private-cluster/cluster.tf +++ b/modules/beta-private-cluster/cluster.tf @@ -328,7 +328,7 @@ resource "google_container_node_pool" "pools" { } resource "null_resource" "wait_for_cluster" { - count = var.skip_provisioners ? 1 : 0 + count = var.skip_provisioners ? 0 : 1 provisioner "local-exec" { command = "${path.module}/scripts/wait-for-cluster.sh ${var.project_id} ${var.name}" diff --git a/modules/beta-private-cluster/dns.tf b/modules/beta-private-cluster/dns.tf index f490c15504..8a581ff68e 100644 --- a/modules/beta-private-cluster/dns.tf +++ b/modules/beta-private-cluster/dns.tf @@ -20,7 +20,7 @@ Delete default kube-dns configmap *****************************************/ resource "null_resource" "delete_default_kube_dns_configmap" { - count = (local.custom_kube_dns_config || local.upstream_nameservers_config) || var.skip_provisioners ? 1 : 0 + count = (local.custom_kube_dns_config || local.upstream_nameservers_config) && ! var.skip_provisioners ? 1 : 0 provisioner "local-exec" { command = "${path.module}/scripts/kubectl_wrapper.sh https://${local.cluster_endpoint} ${data.google_client_config.default.access_token} ${local.cluster_ca_certificate} ${path.module}/scripts/delete-default-resource.sh kube-system configmap kube-dns" diff --git a/modules/beta-private-cluster/variables.tf b/modules/beta-private-cluster/variables.tf index a5bcec8336..2585cf4ba8 100644 --- a/modules/beta-private-cluster/variables.tf +++ b/modules/beta-private-cluster/variables.tf @@ -311,7 +311,7 @@ variable "cluster_resource_labels" { variable "skip_provisioners" { type = bool - description = "Flag to skip all local-exec provisioners. It breaks down `stub_domains` and `upstream_nameservers` variables functionality." + description = "Flag to skip all local-exec provisioners. It breaks `stub_domains` and `upstream_nameservers` variables functionality." default = false } diff --git a/modules/beta-public-cluster/README.md b/modules/beta-public-cluster/README.md index 1d3bdc17b2..419c7d017f 100644 --- a/modules/beta-public-cluster/README.md +++ b/modules/beta-public-cluster/README.md @@ -186,7 +186,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | resource\_usage\_export\_dataset\_id | The dataset id for which network egress metering for this cluster will be enabled. If enabled, a daemonset will be created in the cluster to meter network egress traffic. | string | `""` | no | | sandbox\_enabled | (Beta) Enable GKE Sandbox (Do not forget to set `image_type` = `COS_CONTAINERD` and `node_version` = `1.12.7-gke.17` or later to use it). | bool | `"false"` | no | | service\_account | The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created. | string | `""` | no | -| skip\_provisioners | Flag to skip all local-exec provisioners. It breaks down `stub_domains` and `upstream_nameservers` variables functionality. | bool | `"false"` | no | +| skip\_provisioners | Flag to skip all local-exec provisioners. It breaks `stub_domains` and `upstream_nameservers` variables functionality. | bool | `"false"` | no | | stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | map(list(string)) | `` | no | | subnetwork | The subnetwork to host the cluster in (required) | string | n/a | yes | | upstream\_nameservers | If specified, the values replace the nameservers taken by default from the node’s /etc/resolv.conf | list | `` | no | diff --git a/modules/beta-public-cluster/cluster.tf b/modules/beta-public-cluster/cluster.tf index e2e46ac862..f3c342e2d4 100644 --- a/modules/beta-public-cluster/cluster.tf +++ b/modules/beta-public-cluster/cluster.tf @@ -323,7 +323,7 @@ resource "google_container_node_pool" "pools" { } resource "null_resource" "wait_for_cluster" { - count = var.skip_provisioners ? 1 : 0 + count = var.skip_provisioners ? 0 : 1 provisioner "local-exec" { command = "${path.module}/scripts/wait-for-cluster.sh ${var.project_id} ${var.name}" diff --git a/modules/beta-public-cluster/dns.tf b/modules/beta-public-cluster/dns.tf index f490c15504..8a581ff68e 100644 --- a/modules/beta-public-cluster/dns.tf +++ b/modules/beta-public-cluster/dns.tf @@ -20,7 +20,7 @@ Delete default kube-dns configmap *****************************************/ resource "null_resource" "delete_default_kube_dns_configmap" { - count = (local.custom_kube_dns_config || local.upstream_nameservers_config) || var.skip_provisioners ? 1 : 0 + count = (local.custom_kube_dns_config || local.upstream_nameservers_config) && ! var.skip_provisioners ? 1 : 0 provisioner "local-exec" { command = "${path.module}/scripts/kubectl_wrapper.sh https://${local.cluster_endpoint} ${data.google_client_config.default.access_token} ${local.cluster_ca_certificate} ${path.module}/scripts/delete-default-resource.sh kube-system configmap kube-dns" diff --git a/modules/beta-public-cluster/variables.tf b/modules/beta-public-cluster/variables.tf index 32a8eaa33f..aec51dcf34 100644 --- a/modules/beta-public-cluster/variables.tf +++ b/modules/beta-public-cluster/variables.tf @@ -311,7 +311,7 @@ variable "cluster_resource_labels" { variable "skip_provisioners" { type = bool - description = "Flag to skip all local-exec provisioners. It breaks down `stub_domains` and `upstream_nameservers` variables functionality." + description = "Flag to skip all local-exec provisioners. It breaks `stub_domains` and `upstream_nameservers` variables functionality." default = false } diff --git a/modules/private-cluster-update-variant/README.md b/modules/private-cluster-update-variant/README.md index e817361124..fa9cdb8852 100644 --- a/modules/private-cluster-update-variant/README.md +++ b/modules/private-cluster-update-variant/README.md @@ -174,10 +174,12 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | node\_version | The Kubernetes version of the node pools. Defaults kubernetes_version (master) variable and can be overridden for individual node pools by setting the `version` key on them. Must be empyty or set the same as master at cluster creation. | string | `""` | no | | non\_masquerade\_cidrs | List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading. | list(string) | `` | no | | project\_id | The project ID to host the cluster in (required) | string | n/a | yes | -| region | The region to host the cluster in (required) | string | n/a | yes | +| region | The region to host the cluster in (optional if zonal cluster / required if regional) | string | `"null"` | no | | regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | +| registry\_project\_id | Project holding the Google Container Registry. If empty, we use the cluster project. If grant_registry_access is true, storage.objectViewer role is assigned on this project. | string | `""` | no | | remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | | service\_account | The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created. | string | `""` | no | +| skip\_provisioners | Flag to skip all local-exec provisioners. It breaks `stub_domains` and `upstream_nameservers` variables functionality. | bool | `"false"` | no | | stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | map(list(string)) | `` | no | | subnetwork | The subnetwork to host the cluster in (required) | string | n/a | yes | | upstream\_nameservers | If specified, the values replace the nameservers taken by default from the node’s /etc/resolv.conf | list | `` | no | @@ -237,6 +239,9 @@ following project roles: - roles/iam.serviceAccountUser - roles/resourcemanager.projectIamAdmin (only required if `service_account` is set to `create`) +Additionally, if `service_account` is set to `create` and `grant_registry_access` is requested, the service account requires the following role on the `registry_project_id` project: +- roles/resourcemanager.projectIamAdmin + ### Enable APIs In order to operate with the Service Account you must activate the following APIs on the project where the Service Account was created: diff --git a/modules/private-cluster-update-variant/cluster.tf b/modules/private-cluster-update-variant/cluster.tf index e8db91a77a..c447c39a39 100644 --- a/modules/private-cluster-update-variant/cluster.tf +++ b/modules/private-cluster-update-variant/cluster.tf @@ -305,6 +305,7 @@ resource "google_container_node_pool" "pools" { } resource "null_resource" "wait_for_cluster" { + count = var.skip_provisioners ? 0 : 1 provisioner "local-exec" { command = "${path.module}/scripts/wait-for-cluster.sh ${var.project_id} ${var.name}" diff --git a/modules/private-cluster-update-variant/dns.tf b/modules/private-cluster-update-variant/dns.tf index b240a23e65..8a581ff68e 100644 --- a/modules/private-cluster-update-variant/dns.tf +++ b/modules/private-cluster-update-variant/dns.tf @@ -20,7 +20,7 @@ Delete default kube-dns configmap *****************************************/ resource "null_resource" "delete_default_kube_dns_configmap" { - count = local.custom_kube_dns_config || local.upstream_nameservers_config ? 1 : 0 + count = (local.custom_kube_dns_config || local.upstream_nameservers_config) && ! var.skip_provisioners ? 1 : 0 provisioner "local-exec" { command = "${path.module}/scripts/kubectl_wrapper.sh https://${local.cluster_endpoint} ${data.google_client_config.default.access_token} ${local.cluster_ca_certificate} ${path.module}/scripts/delete-default-resource.sh kube-system configmap kube-dns" diff --git a/modules/private-cluster-update-variant/main.tf b/modules/private-cluster-update-variant/main.tf index bfe746401c..2bd1c40d14 100644 --- a/modules/private-cluster-update-variant/main.tf +++ b/modules/private-cluster-update-variant/main.tf @@ -23,7 +23,7 @@ data "google_compute_zones" "available" { provider = google project = var.project_id - region = var.region + region = local.region } resource "random_shuffle" "available_zones" { @@ -34,6 +34,7 @@ resource "random_shuffle" "available_zones" { locals { // location location = var.regional ? var.region : var.zones[0] + region = var.region == null ? join("-", slice(split("-", var.zones[0]), 0, 2)) : var.region // for regional cluster - use var.zones if provided, use available otherwise, for zonal cluster use var.zones with first element extracted node_locations = var.regional ? coalescelist(compact(var.zones), sort(random_shuffle.available_zones.result)) : slice(var.zones, 1, length(var.zones)) // kuberentes version diff --git a/modules/private-cluster-update-variant/networks.tf b/modules/private-cluster-update-variant/networks.tf index a382073dc0..aae034eee5 100644 --- a/modules/private-cluster-update-variant/networks.tf +++ b/modules/private-cluster-update-variant/networks.tf @@ -27,6 +27,6 @@ data "google_compute_subnetwork" "gke_subnetwork" { provider = google name = var.subnetwork - region = var.region + region = local.region project = local.network_project_id } diff --git a/modules/private-cluster-update-variant/sa.tf b/modules/private-cluster-update-variant/sa.tf index 9e063fcc22..c7f34e4fbb 100644 --- a/modules/private-cluster-update-variant/sa.tf +++ b/modules/private-cluster-update-variant/sa.tf @@ -64,7 +64,7 @@ resource "google_project_iam_member" "cluster_service_account-monitoring_viewer" resource "google_project_iam_member" "cluster_service_account-gcr" { count = var.create_service_account && var.grant_registry_access ? 1 : 0 - project = var.project_id + project = var.registry_project_id == "" ? var.project_id : var.registry_project_id role = "roles/storage.objectViewer" member = "serviceAccount:${google_service_account.cluster_service_account[0].email}" } diff --git a/modules/private-cluster-update-variant/variables.tf b/modules/private-cluster-update-variant/variables.tf index 8008e08975..508a4f1b96 100644 --- a/modules/private-cluster-update-variant/variables.tf +++ b/modules/private-cluster-update-variant/variables.tf @@ -40,7 +40,8 @@ variable "regional" { variable "region" { type = string - description = "The region to host the cluster in (required)" + description = "The region to host the cluster in (optional if zonal cluster / required if regional)" + default = null } variable "zones" { @@ -257,6 +258,12 @@ variable "grant_registry_access" { default = false } +variable "registry_project_id" { + type = string + description = "Project holding the Google Container Registry. If empty, we use the cluster project. If grant_registry_access is true, storage.objectViewer role is assigned on this project." + default = "" +} + variable "service_account" { type = string description = "The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created." @@ -292,6 +299,11 @@ variable "cluster_resource_labels" { default = {} } +variable "skip_provisioners" { + type = bool + description = "Flag to skip all local-exec provisioners. It breaks `stub_domains` and `upstream_nameservers` variables functionality." + default = false +} variable "deploy_using_private_endpoint" { type = bool diff --git a/modules/private-cluster/README.md b/modules/private-cluster/README.md index 5c01b849db..5465544b82 100644 --- a/modules/private-cluster/README.md +++ b/modules/private-cluster/README.md @@ -179,7 +179,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | registry\_project\_id | Project holding the Google Container Registry. If empty, we use the cluster project. If grant_registry_access is true, storage.objectViewer role is assigned on this project. | string | `""` | no | | remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | | service\_account | The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created. | string | `""` | no | -| skip\_provisioners | Flag to skip all local-exec provisioners. It breaks down `stub_domains` and `upstream_nameservers` variables functionality. | bool | `"false"` | no | +| skip\_provisioners | Flag to skip all local-exec provisioners. It breaks `stub_domains` and `upstream_nameservers` variables functionality. | bool | `"false"` | no | | stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | map(list(string)) | `` | no | | subnetwork | The subnetwork to host the cluster in (required) | string | n/a | yes | | upstream\_nameservers | If specified, the values replace the nameservers taken by default from the node’s /etc/resolv.conf | list | `` | no | diff --git a/modules/private-cluster/cluster.tf b/modules/private-cluster/cluster.tf index ab8b275ab1..afb9a633ea 100644 --- a/modules/private-cluster/cluster.tf +++ b/modules/private-cluster/cluster.tf @@ -232,7 +232,7 @@ resource "google_container_node_pool" "pools" { } resource "null_resource" "wait_for_cluster" { - count = var.skip_provisioners ? 1 : 0 + count = var.skip_provisioners ? 0 : 1 provisioner "local-exec" { command = "${path.module}/scripts/wait-for-cluster.sh ${var.project_id} ${var.name}" diff --git a/modules/private-cluster/dns.tf b/modules/private-cluster/dns.tf index f490c15504..8a581ff68e 100644 --- a/modules/private-cluster/dns.tf +++ b/modules/private-cluster/dns.tf @@ -20,7 +20,7 @@ Delete default kube-dns configmap *****************************************/ resource "null_resource" "delete_default_kube_dns_configmap" { - count = (local.custom_kube_dns_config || local.upstream_nameservers_config) || var.skip_provisioners ? 1 : 0 + count = (local.custom_kube_dns_config || local.upstream_nameservers_config) && ! var.skip_provisioners ? 1 : 0 provisioner "local-exec" { command = "${path.module}/scripts/kubectl_wrapper.sh https://${local.cluster_endpoint} ${data.google_client_config.default.access_token} ${local.cluster_ca_certificate} ${path.module}/scripts/delete-default-resource.sh kube-system configmap kube-dns" diff --git a/modules/private-cluster/variables.tf b/modules/private-cluster/variables.tf index d6381cb297..508a4f1b96 100644 --- a/modules/private-cluster/variables.tf +++ b/modules/private-cluster/variables.tf @@ -301,7 +301,7 @@ variable "cluster_resource_labels" { variable "skip_provisioners" { type = bool - description = "Flag to skip all local-exec provisioners. It breaks down `stub_domains` and `upstream_nameservers` variables functionality." + description = "Flag to skip all local-exec provisioners. It breaks `stub_domains` and `upstream_nameservers` variables functionality." default = false } diff --git a/scripts/wait-for-cluster.sh b/scripts/wait-for-cluster.sh index 37f0176ec7..6ff3253d58 100755 --- a/scripts/wait-for-cluster.sh +++ b/scripts/wait-for-cluster.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2019 Google LLC +# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,10 +15,6 @@ set -e -if [ -n "${GOOGLE_APPLICATION_CREDENTIALS}" ]; then - CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE=${GOOGLE_APPLICATION_CREDENTIALS} -fi - PROJECT=$1 CLUSTER_NAME=$2 gcloud_command="gcloud container clusters list --project=$PROJECT --format=json" diff --git a/variables.tf b/variables.tf index b7fbfba8cc..58cf1f4685 100644 --- a/variables.tf +++ b/variables.tf @@ -301,6 +301,6 @@ variable "cluster_resource_labels" { variable "skip_provisioners" { type = bool - description = "Flag to skip all local-exec provisioners. It breaks down `stub_domains` and `upstream_nameservers` variables functionality." + description = "Flag to skip all local-exec provisioners. It breaks `stub_domains` and `upstream_nameservers` variables functionality." default = false } From bf8a5ef5fed5bc26002970f50c31404195a327d0 Mon Sep 17 00:00:00 2001 From: Aaron Lane Date: Tue, 22 Oct 2019 16:05:42 -0400 Subject: [PATCH 60/82] Parallelize execution of Kitchen instances Running all instances in serial is taking over 3 hours to complete. --- build/int.cloudbuild.yaml | 204 ++++++++++++++++++++++++++++++++++++-- 1 file changed, 196 insertions(+), 8 deletions(-) diff --git a/build/int.cloudbuild.yaml b/build/int.cloudbuild.yaml index 85139efe7d..c1efa77057 100644 --- a/build/int.cloudbuild.yaml +++ b/build/int.cloudbuild.yaml @@ -21,18 +21,206 @@ steps: - 'TF_VAR_org_id=$_ORG_ID' - 'TF_VAR_folder_id=$_FOLDER_ID' - 'TF_VAR_billing_account=$_BILLING_ACCOUNT' -- id: create +- id: create disable-client-cert-local + waitFor: + - prepare name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do create'] -- id: converge + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do create disable-client-cert-local'] +- id: converge disable-client-cert-local + waitFor: + - create disable-client-cert-local name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge'] -- id: verify + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge disable-client-cert-local'] +- id: verify disable-client-cert-local + waitFor: + - converge disable-client-cert-local name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify'] -- id: destroy + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify disable-client-cert-local'] +- id: destroy disable-client-cert-local + waitFor: + - verify disable-client-cert-local name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy'] + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy disable-client-cert-local'] +- id: create shared-vpc-local + waitFor: + - prepare + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do create shared-vpc-local'] +- id: converge shared-vpc-local + waitFor: + - create shared-vpc-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge shared-vpc-local'] +- id: verify shared-vpc-local + waitFor: + - converge shared-vpc-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify shared-vpc-local'] +- id: destroy shared-vpc-local + waitFor: + - verify shared-vpc-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy shared-vpc-local'] +- id: create simple-regional-local + waitFor: + - prepare + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do create simple-regional-local'] +- id: converge simple-regional-local + waitFor: + - create simple-regional-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge simple-regional-local'] +- id: verify simple-regional-local + waitFor: + - converge simple-regional-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify simple-regional-local'] +- id: destroy simple-regional-local + waitFor: + - verify simple-regional-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy simple-regional-local'] +- id: create simple-regional-private-local + waitFor: + - prepare + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do create simple-regional-private-local'] +- id: converge simple-regional-private-local + waitFor: + - create simple-regional-private-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge simple-regional-private-local'] +- id: verify simple-regional-private-local + waitFor: + - converge simple-regional-private-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify simple-regional-private-local'] +- id: destroy simple-regional-private-local + waitFor: + - verify simple-regional-private-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy simple-regional-private-local'] +- id: create simple-zonal-local + waitFor: + - prepare + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do create simple-zonal-local'] +- id: converge simple-zonal-local + waitFor: + - create simple-zonal-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge simple-zonal-local'] +- id: verify simple-zonal-local + waitFor: + - converge simple-zonal-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify simple-zonal-local'] +- id: destroy simple-zonal-local + waitFor: + - verify simple-zonal-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy simple-zonal-local'] +- id: create simple-zonal-private-local + waitFor: + - prepare + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do create simple-zonal-private-local'] +- id: converge simple-zonal-private-local + waitFor: + - create simple-zonal-private-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge simple-zonal-private-local'] +- id: verify simple-zonal-private-local + waitFor: + - converge simple-zonal-private-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify simple-zonal-private-local'] +- id: destroy simple-zonal-private-local + waitFor: + - verify simple-zonal-private-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy simple-zonal-private-local'] +- id: create stub-domains-local + waitFor: + - prepare + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do create stub-domains-local'] +- id: converge stub-domains-local + waitFor: + - create stub-domains-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge stub-domains-local'] +- id: verify stub-domains-local + waitFor: + - converge stub-domains-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify stub-domains-local'] +- id: destroy stub-domains-local + waitFor: + - verify stub-domains-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy stub-domains-local'] +- id: create upstream-nameservers-local + waitFor: + - prepare + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do create upstream-nameservers-local'] +- id: converge upstream-nameservers-local + waitFor: + - create upstream-nameservers-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge upstream-nameservers-local'] +- id: verify upstream-nameservers-local + waitFor: + - converge upstream-nameservers-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify upstream-nameservers-local'] +- id: destroy upstream-nameservers-local + waitFor: + - verify upstream-nameservers-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy upstream-nameservers-local'] +- id: create stub-domains-upstream-nameservers-local + waitFor: + - prepare + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do create stub-domains-upstream-nameservers-local'] +- id: converge stub-domains-upstream-nameservers-local + waitFor: + - create stub-domains-upstream-nameservers-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge stub-domains-upstream-nameservers-local'] +- id: verify stub-domains-upstream-nameservers-local + waitFor: + - converge stub-domains-upstream-nameservers-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify stub-domains-upstream-nameservers-local'] +- id: destroy stub-domains-upstream-nameservers-local + waitFor: + - verify stub-domains-upstream-nameservers-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy stub-domains-upstream-nameservers-local'] +- id: create workload-metadata-config-local + waitFor: + - prepare + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do create workload-metadata-config-local'] +- id: converge workload-metadata-config-local + waitFor: + - create workload-metadata-config-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge workload-metadata-config-local'] +- id: verify workload-metadata-config-local + waitFor: + - converge workload-metadata-config-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify workload-metadata-config-local'] +- id: destroy workload-metadata-config-local + waitFor: + - verify workload-metadata-config-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy workload-metadata-config-local'] tags: - 'ci' - 'integration' From 3b83eb379e22b2718f12b8f184d4efdf8f63f83e Mon Sep 17 00:00:00 2001 From: Aaron Lane Date: Tue, 22 Oct 2019 16:06:32 -0400 Subject: [PATCH 61/82] Update Docker image to 0.4.3 --- Makefile | 2 +- build/int.cloudbuild.yaml | 2 +- build/lint.cloudbuild.yaml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index 5039822a75..b6471861d6 100644 --- a/Makefile +++ b/Makefile @@ -18,7 +18,7 @@ # Make will use bash instead of sh SHELL := /usr/bin/env bash -DOCKER_TAG_VERSION_DEVELOPER_TOOLS := 0.1.0 +DOCKER_TAG_VERSION_DEVELOPER_TOOLS := 0.4.3 DOCKER_IMAGE_DEVELOPER_TOOLS := cft/developer-tools REGISTRY_URL := gcr.io/cloud-foundation-cicd diff --git a/build/int.cloudbuild.yaml b/build/int.cloudbuild.yaml index c1efa77057..cc1dd0dcfe 100644 --- a/build/int.cloudbuild.yaml +++ b/build/int.cloudbuild.yaml @@ -226,4 +226,4 @@ tags: - 'integration' substitutions: _DOCKER_IMAGE_DEVELOPER_TOOLS: 'cft/developer-tools' - _DOCKER_TAG_VERSION_DEVELOPER_TOOLS: '0.1.0' + _DOCKER_TAG_VERSION_DEVELOPER_TOOLS: '0.4.3' diff --git a/build/lint.cloudbuild.yaml b/build/lint.cloudbuild.yaml index 3b7306297c..d08fe185bd 100644 --- a/build/lint.cloudbuild.yaml +++ b/build/lint.cloudbuild.yaml @@ -24,4 +24,4 @@ tags: - 'lint' substitutions: _DOCKER_IMAGE_DEVELOPER_TOOLS: 'cft/developer-tools' - _DOCKER_TAG_VERSION_DEVELOPER_TOOLS: '0.1.0' + _DOCKER_TAG_VERSION_DEVELOPER_TOOLS: '0.4.3' From c197597d17f8cf266d4a8ee8e24be7a9f609c14d Mon Sep 17 00:00:00 2001 From: Aaron Lane Date: Wed, 23 Oct 2019 10:14:29 -0400 Subject: [PATCH 62/82] Regenerate modules --- README.md | 26 +++++++++++++++++-- .../README.md | 3 +++ .../main.tf | 3 ++- .../networks.tf | 2 +- .../beta-private-cluster-update-variant/sa.tf | 2 +- .../variables.tf | 9 ++++++- modules/beta-private-cluster/README.md | 3 +-- modules/beta-public-cluster/README.md | 7 +++-- .../private-cluster-update-variant/README.md | 22 ++++++++++++++++ .../private-cluster-update-variant/main.tf | 3 ++- .../networks.tf | 2 +- modules/private-cluster-update-variant/sa.tf | 2 +- .../variables.tf | 9 ++++++- modules/private-cluster/README.md | 22 ++++++++++++++-- 14 files changed, 99 insertions(+), 16 deletions(-) diff --git a/README.md b/README.md index b269a1772f..281cfe7c73 100644 --- a/README.md +++ b/README.md @@ -129,46 +129,63 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | Name | Description | Type | Default | Required | |------|-------------|:----:|:-----:|:-----:| +| authenticator\_security\_group | The name of the RBAC security group for use with Google security groups in Kubernetes RBAC. Group name must be in format gke-security-groups@yourdomain.com | string | `"null"` | no | | basic\_auth\_password | The password to be used with Basic Authentication. | string | `""` | no | | basic\_auth\_username | The username to be used with Basic Authentication. An empty value will disable Basic Authentication, which is the recommended configuration. | string | `""` | no | +| cloudrun | (Beta) Enable CloudRun addon | string | `"false"` | no | | cluster\_ipv4\_cidr | The IP address range of the kubernetes pods in this cluster. Default is an automatically assigned CIDR. | string | `""` | no | | cluster\_resource\_labels | The GCE resource labels (a map of key/value pairs) to be applied to the cluster | map(string) | `` | no | | configure\_ip\_masq | Enables the installation of ip masquerading, which is usually no longer required when using aliasied IP addresses. IP masquerading uses a kubectl call, so when you have a private cluster, you will need access to the API server. | string | `"false"` | no | | create\_service\_account | Defines if service account specified to run nodes should be created. | bool | `"true"` | no | +| database\_encryption | Application-layer Secrets Encryption settings. The object format is {state = string, key_name = string}. Valid values of state are: "ENCRYPTED"; "DECRYPTED". key_name is the name of a CloudKMS key. | object | `` | no | +| default\_max\_pods\_per\_node | The maximum number of pods to schedule per node | string | `"110"` | no | +| deploy\_using\_private\_endpoint | (Beta) A toggle for Terraform and kubectl to connect to the master's internal IP address during deployment. | bool | `"false"` | no | | description | The description of the cluster | string | `""` | no | | disable\_legacy\_metadata\_endpoints | Disable the /0.1/ and /v1beta1/ metadata server endpoints on the node. Changing this value will cause all node pools to be recreated. | bool | `"true"` | no | +| enable\_binary\_authorization | Enable BinAuthZ Admission controller | string | `"false"` | no | +| enable\_intranode\_visibility | Whether Intra-node visibility is enabled for this cluster. This makes same node pod to pod traffic visible for VPC network | bool | `"false"` | no | +| enable\_private\_endpoint | (Beta) Whether the master's internal IP address is used as the cluster endpoint | bool | `"false"` | no | +| enable\_private\_nodes | (Beta) Whether nodes have internal IP addresses only | bool | `"false"` | no | +| enable\_vertical\_pod\_autoscaling | Vertical Pod Autoscaling automatically adjusts the resources of pods controlled by it | bool | `"false"` | no | | grant\_registry\_access | Grants created cluster-specific service account storage.objectViewer role. | bool | `"false"` | no | | horizontal\_pod\_autoscaling | Enable horizontal pod autoscaling addon | bool | `"true"` | no | | http\_load\_balancing | Enable httpload balancer addon | bool | `"true"` | no | +| identity\_namespace | Workload Identity namespace | string | `""` | no | | initial\_node\_count | The number of nodes to create in this cluster's default node pool. | number | `"0"` | no | | ip\_masq\_link\_local | Whether to masquerade traffic to the link-local prefix (169.254.0.0/16). | bool | `"false"` | no | | ip\_masq\_resync\_interval | The interval at which the agent attempts to sync its ConfigMap file from the disk. | string | `"60s"` | no | | ip\_range\_pods | The _name_ of the secondary subnet ip range to use for pods | string | n/a | yes | | ip\_range\_services | The _name_ of the secondary subnet range to use for services | string | n/a | yes | | issue\_client\_certificate | Issues a client certificate to authenticate to the cluster endpoint. To maximize the security of your cluster, leave this option disabled. Client certificates don't automatically rotate and aren't easily revocable. WARNING: changing this after cluster creation is destructive! | bool | `"false"` | no | +| istio | (Beta) Enable Istio addon | string | `"false"` | no | | kubernetes\_dashboard | Enable kubernetes dashboard addon | bool | `"false"` | no | | kubernetes\_version | The Kubernetes version of the masters. If set to 'latest' it will pull latest available version in the selected region. | string | `"latest"` | no | | logging\_service | The logging service that the cluster should write logs to. Available options include logging.googleapis.com, logging.googleapis.com/kubernetes (beta), and none | string | `"logging.googleapis.com"` | no | | maintenance\_start\_time | Time window specified for daily maintenance operations in RFC3339 format | string | `"05:00"` | no | | master\_authorized\_networks\_config | The desired configuration options for master authorized networks. The object format is {cidr_blocks = list(object({cidr_block = string, display_name = string}))}. Omit the nested cidr_blocks attribute to disallow external access (except the cluster node IPs, which GKE automatically whitelists). | object | `` | no | +| master\_ipv4\_cidr\_block | (Beta) The IP range in CIDR notation to use for the hosted master network | string | `"10.0.0.0/28"` | no | | monitoring\_service | The monitoring service that the cluster should write metrics to. Automatically send metrics from pods in the cluster to the Google Cloud Monitoring API. VM metrics will be collected by Google Compute Engine regardless of this setting Available options include monitoring.googleapis.com, monitoring.googleapis.com/kubernetes (beta) and none | string | `"monitoring.googleapis.com"` | no | | name | The name of the cluster (required) | string | n/a | yes | | network | The VPC network to host the cluster in (required) | string | n/a | yes | | network\_policy | Enable network policy addon | bool | `"false"` | no | | network\_policy\_provider | The network policy provider. | string | `"CALICO"` | no | | network\_project\_id | The project ID of the shared VPC's host (for shared vpc support) | string | `""` | no | +| node\_metadata | Specifies how node metadata is exposed to the workload running on the node | string | `"SECURE"` | no | | node\_pools | List of maps containing node pools | list(map(string)) | `` | no | | node\_pools\_labels | Map of maps containing node labels by node-pool name | map(map(string)) | `` | no | | node\_pools\_metadata | Map of maps containing node metadata by node-pool name | map(map(string)) | `` | no | | node\_pools\_oauth\_scopes | Map of lists containing node oauth scopes by node-pool name | map(list(string)) | `` | no | | node\_pools\_tags | Map of lists containing node network tags by node-pool name | map(list(string)) | `` | no | +| node\_pools\_taints | Map of lists containing node taints by node-pool name | object | `` | no | | node\_version | The Kubernetes version of the node pools. Defaults kubernetes_version (master) variable and can be overridden for individual node pools by setting the `version` key on them. Must be empyty or set the same as master at cluster creation. | string | `""` | no | | non\_masquerade\_cidrs | List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading. | list(string) | `` | no | +| pod\_security\_policy\_config | enabled - Enable the PodSecurityPolicy controller for this cluster. If enabled, pods must be valid under a PodSecurityPolicy to be created. | list | `` | no | | project\_id | The project ID to host the cluster in (required) | string | n/a | yes | -| region | The region to host the cluster in (optional if zonal cluster / required if regional) | string | `"null"` | no | +| region | The region to host the cluster in (required) | string | n/a | yes | | regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | -| registry\_project\_id | Project holding the Google Container Registry. If empty, we use the cluster project. If grant_registry_access is true, storage.objectViewer role is assigned on this project. | string | `""` | no | | remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | +| resource\_usage\_export\_dataset\_id | The dataset id for which network egress metering for this cluster will be enabled. If enabled, a daemonset will be created in the cluster to meter network egress traffic. | string | `""` | no | +| sandbox\_enabled | (Beta) Enable GKE Sandbox (Do not forget to set `image_type` = `COS_CONTAINERD` and `node_version` = `1.12.7-gke.17` or later to use it). | bool | `"false"` | no | | service\_account | The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created. | string | `""` | no | | stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | map(list(string)) | `` | no | | subnetwork | The subnetwork to host the cluster in (required) | string | n/a | yes | @@ -180,9 +197,12 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | Name | Description | |------|-------------| | ca\_certificate | Cluster ca certificate (base64 encoded) | +| cloudrun\_enabled | Whether CloudRun enabled | | endpoint | Cluster endpoint | | horizontal\_pod\_autoscaling\_enabled | Whether horizontal pod autoscaling enabled | | http\_load\_balancing\_enabled | Whether http load balancing enabled | +| intranode\_visibility\_enabled | Whether intra-node visibility is enabled | +| istio\_enabled | Whether Istio is enabled | | kubernetes\_dashboard\_enabled | Whether kubernetes dashboard enabled | | location | Cluster location (region if regional cluster, zone if zonal cluster) | | logging\_service | Logging service used | @@ -194,9 +214,11 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | network\_policy\_enabled | Whether network policy enabled | | node\_pools\_names | List of node pools names | | node\_pools\_versions | List of node pools versions | +| pod\_security\_policy\_enabled | Whether pod security policy is enabled | | region | Cluster region | | service\_account | The service account to default running nodes as if not overridden in `node_pools`. | | type | Cluster type (regional / zonal) | +| vertical\_pod\_autoscaling\_enabled | Whether veritical pod autoscaling is enabled | | zones | List of zones in which the cluster resides | diff --git a/modules/beta-private-cluster-update-variant/README.md b/modules/beta-private-cluster-update-variant/README.md index 0fc0068e96..17eabe56d2 100644 --- a/modules/beta-private-cluster-update-variant/README.md +++ b/modules/beta-private-cluster-update-variant/README.md @@ -258,6 +258,9 @@ following project roles: - roles/iam.serviceAccountUser - roles/resourcemanager.projectIamAdmin (only required if `service_account` is set to `create`) +Additionally, if `service_account` is set to `create` and `grant_registry_access` is requested, the service account requires the following role on the `registry_project_id` project: +- roles/resourcemanager.projectIamAdmin + ### Enable APIs In order to operate with the Service Account you must activate the following APIs on the project where the Service Account was created: diff --git a/modules/beta-private-cluster-update-variant/main.tf b/modules/beta-private-cluster-update-variant/main.tf index fc38644871..63bf31ac78 100644 --- a/modules/beta-private-cluster-update-variant/main.tf +++ b/modules/beta-private-cluster-update-variant/main.tf @@ -23,7 +23,7 @@ data "google_compute_zones" "available" { provider = google-beta project = var.project_id - region = var.region + region = local.region } resource "random_shuffle" "available_zones" { @@ -34,6 +34,7 @@ resource "random_shuffle" "available_zones" { locals { // location location = var.regional ? var.region : var.zones[0] + region = var.region == null ? join("-", slice(split("-", var.zones[0]), 0, 2)) : var.region // for regional cluster - use var.zones if provided, use available otherwise, for zonal cluster use var.zones with first element extracted node_locations = var.regional ? coalescelist(compact(var.zones), sort(random_shuffle.available_zones.result)) : slice(var.zones, 1, length(var.zones)) // kuberentes version diff --git a/modules/beta-private-cluster-update-variant/networks.tf b/modules/beta-private-cluster-update-variant/networks.tf index 14ea500e03..2456654130 100644 --- a/modules/beta-private-cluster-update-variant/networks.tf +++ b/modules/beta-private-cluster-update-variant/networks.tf @@ -27,6 +27,6 @@ data "google_compute_subnetwork" "gke_subnetwork" { provider = google-beta name = var.subnetwork - region = var.region + region = local.region project = local.network_project_id } diff --git a/modules/beta-private-cluster-update-variant/sa.tf b/modules/beta-private-cluster-update-variant/sa.tf index 9e063fcc22..c7f34e4fbb 100644 --- a/modules/beta-private-cluster-update-variant/sa.tf +++ b/modules/beta-private-cluster-update-variant/sa.tf @@ -64,7 +64,7 @@ resource "google_project_iam_member" "cluster_service_account-monitoring_viewer" resource "google_project_iam_member" "cluster_service_account-gcr" { count = var.create_service_account && var.grant_registry_access ? 1 : 0 - project = var.project_id + project = var.registry_project_id == "" ? var.project_id : var.registry_project_id role = "roles/storage.objectViewer" member = "serviceAccount:${google_service_account.cluster_service_account[0].email}" } diff --git a/modules/beta-private-cluster-update-variant/variables.tf b/modules/beta-private-cluster-update-variant/variables.tf index 9a869a830f..ee2d5b5556 100644 --- a/modules/beta-private-cluster-update-variant/variables.tf +++ b/modules/beta-private-cluster-update-variant/variables.tf @@ -40,7 +40,8 @@ variable "regional" { variable "region" { type = string - description = "The region to host the cluster in (required)" + description = "The region to host the cluster in (optional if zonal cluster / required if regional)" + default = null } variable "zones" { @@ -267,6 +268,12 @@ variable "grant_registry_access" { default = false } +variable "registry_project_id" { + type = string + description = "Project holding the Google Container Registry. If empty, we use the cluster project. If grant_registry_access is true, storage.objectViewer role is assigned on this project." + default = "" +} + variable "service_account" { type = string description = "The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created." diff --git a/modules/beta-private-cluster/README.md b/modules/beta-private-cluster/README.md index 425ef67fa1..fc35c08c54 100644 --- a/modules/beta-private-cluster/README.md +++ b/modules/beta-private-cluster/README.md @@ -188,9 +188,8 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | non\_masquerade\_cidrs | List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading. | list(string) | `` | no | | pod\_security\_policy\_config | enabled - Enable the PodSecurityPolicy controller for this cluster. If enabled, pods must be valid under a PodSecurityPolicy to be created. | list | `` | no | | project\_id | The project ID to host the cluster in (required) | string | n/a | yes | -| region | The region to host the cluster in (optional if zonal cluster / required if regional) | string | `"null"` | no | +| region | The region to host the cluster in (required) | string | n/a | yes | | regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | -| registry\_project\_id | Project holding the Google Container Registry. If empty, we use the cluster project. If grant_registry_access is true, storage.objectViewer role is assigned on this project. | string | `""` | no | | remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | | resource\_usage\_export\_dataset\_id | The dataset id for which network egress metering for this cluster will be enabled. If enabled, a daemonset will be created in the cluster to meter network egress traffic. | string | `""` | no | | sandbox\_enabled | (Beta) Enable GKE Sandbox (Do not forget to set `image_type` = `COS_CONTAINERD` and `node_version` = `1.12.7-gke.17` or later to use it). | bool | `"false"` | no | diff --git a/modules/beta-public-cluster/README.md b/modules/beta-public-cluster/README.md index 73cf4ea4c5..2b2ac1e15b 100644 --- a/modules/beta-public-cluster/README.md +++ b/modules/beta-public-cluster/README.md @@ -141,10 +141,13 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | create\_service\_account | Defines if service account specified to run nodes should be created. | bool | `"true"` | no | | database\_encryption | Application-layer Secrets Encryption settings. The object format is {state = string, key_name = string}. Valid values of state are: "ENCRYPTED"; "DECRYPTED". key_name is the name of a CloudKMS key. | object | `` | no | | default\_max\_pods\_per\_node | The maximum number of pods to schedule per node | string | `"110"` | no | +| deploy\_using\_private\_endpoint | (Beta) A toggle for Terraform and kubectl to connect to the master's internal IP address during deployment. | bool | `"false"` | no | | description | The description of the cluster | string | `""` | no | | disable\_legacy\_metadata\_endpoints | Disable the /0.1/ and /v1beta1/ metadata server endpoints on the node. Changing this value will cause all node pools to be recreated. | bool | `"true"` | no | | enable\_binary\_authorization | Enable BinAuthZ Admission controller | string | `"false"` | no | | enable\_intranode\_visibility | Whether Intra-node visibility is enabled for this cluster. This makes same node pod to pod traffic visible for VPC network | bool | `"false"` | no | +| enable\_private\_endpoint | (Beta) Whether the master's internal IP address is used as the cluster endpoint | bool | `"false"` | no | +| enable\_private\_nodes | (Beta) Whether nodes have internal IP addresses only | bool | `"false"` | no | | enable\_vertical\_pod\_autoscaling | Vertical Pod Autoscaling automatically adjusts the resources of pods controlled by it | bool | `"false"` | no | | grant\_registry\_access | Grants created cluster-specific service account storage.objectViewer role. | bool | `"false"` | no | | horizontal\_pod\_autoscaling | Enable horizontal pod autoscaling addon | bool | `"true"` | no | @@ -162,6 +165,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | logging\_service | The logging service that the cluster should write logs to. Available options include logging.googleapis.com, logging.googleapis.com/kubernetes (beta), and none | string | `"logging.googleapis.com"` | no | | maintenance\_start\_time | Time window specified for daily maintenance operations in RFC3339 format | string | `"05:00"` | no | | master\_authorized\_networks\_config | The desired configuration options for master authorized networks. The object format is {cidr_blocks = list(object({cidr_block = string, display_name = string}))}. Omit the nested cidr_blocks attribute to disallow external access (except the cluster node IPs, which GKE automatically whitelists). | object | `` | no | +| master\_ipv4\_cidr\_block | (Beta) The IP range in CIDR notation to use for the hosted master network | string | `"10.0.0.0/28"` | no | | monitoring\_service | The monitoring service that the cluster should write metrics to. Automatically send metrics from pods in the cluster to the Google Cloud Monitoring API. VM metrics will be collected by Google Compute Engine regardless of this setting Available options include monitoring.googleapis.com, monitoring.googleapis.com/kubernetes (beta) and none | string | `"monitoring.googleapis.com"` | no | | name | The name of the cluster (required) | string | n/a | yes | | network | The VPC network to host the cluster in (required) | string | n/a | yes | @@ -179,9 +183,8 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | non\_masquerade\_cidrs | List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading. | list(string) | `` | no | | pod\_security\_policy\_config | enabled - Enable the PodSecurityPolicy controller for this cluster. If enabled, pods must be valid under a PodSecurityPolicy to be created. | list | `` | no | | project\_id | The project ID to host the cluster in (required) | string | n/a | yes | -| region | The region to host the cluster in (optional if zonal cluster / required if regional) | string | `"null"` | no | +| region | The region to host the cluster in (required) | string | n/a | yes | | regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | -| registry\_project\_id | Project holding the Google Container Registry. If empty, we use the cluster project. If grant_registry_access is true, storage.objectViewer role is assigned on this project. | string | `""` | no | | remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | | resource\_usage\_export\_dataset\_id | The dataset id for which network egress metering for this cluster will be enabled. If enabled, a daemonset will be created in the cluster to meter network egress traffic. | string | `""` | no | | sandbox\_enabled | (Beta) Enable GKE Sandbox (Do not forget to set `image_type` = `COS_CONTAINERD` and `node_version` = `1.12.7-gke.17` or later to use it). | bool | `"false"` | no | diff --git a/modules/private-cluster-update-variant/README.md b/modules/private-cluster-update-variant/README.md index e817361124..72491fd134 100644 --- a/modules/private-cluster-update-variant/README.md +++ b/modules/private-cluster-update-variant/README.md @@ -134,26 +134,35 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | Name | Description | Type | Default | Required | |------|-------------|:----:|:-----:|:-----:| +| authenticator\_security\_group | The name of the RBAC security group for use with Google security groups in Kubernetes RBAC. Group name must be in format gke-security-groups@yourdomain.com | string | `"null"` | no | | basic\_auth\_password | The password to be used with Basic Authentication. | string | `""` | no | | basic\_auth\_username | The username to be used with Basic Authentication. An empty value will disable Basic Authentication, which is the recommended configuration. | string | `""` | no | +| cloudrun | (Beta) Enable CloudRun addon | string | `"false"` | no | | cluster\_ipv4\_cidr | The IP address range of the kubernetes pods in this cluster. Default is an automatically assigned CIDR. | string | `""` | no | | cluster\_resource\_labels | The GCE resource labels (a map of key/value pairs) to be applied to the cluster | map(string) | `` | no | | configure\_ip\_masq | Enables the installation of ip masquerading, which is usually no longer required when using aliasied IP addresses. IP masquerading uses a kubectl call, so when you have a private cluster, you will need access to the API server. | string | `"false"` | no | | create\_service\_account | Defines if service account specified to run nodes should be created. | bool | `"true"` | no | +| database\_encryption | Application-layer Secrets Encryption settings. The object format is {state = string, key_name = string}. Valid values of state are: "ENCRYPTED"; "DECRYPTED". key_name is the name of a CloudKMS key. | object | `` | no | +| default\_max\_pods\_per\_node | The maximum number of pods to schedule per node | string | `"110"` | no | | deploy\_using\_private\_endpoint | (Beta) A toggle for Terraform and kubectl to connect to the master's internal IP address during deployment. | bool | `"false"` | no | | description | The description of the cluster | string | `""` | no | | disable\_legacy\_metadata\_endpoints | Disable the /0.1/ and /v1beta1/ metadata server endpoints on the node. Changing this value will cause all node pools to be recreated. | bool | `"true"` | no | +| enable\_binary\_authorization | Enable BinAuthZ Admission controller | string | `"false"` | no | +| enable\_intranode\_visibility | Whether Intra-node visibility is enabled for this cluster. This makes same node pod to pod traffic visible for VPC network | bool | `"false"` | no | | enable\_private\_endpoint | (Beta) Whether the master's internal IP address is used as the cluster endpoint | bool | `"false"` | no | | enable\_private\_nodes | (Beta) Whether nodes have internal IP addresses only | bool | `"false"` | no | +| enable\_vertical\_pod\_autoscaling | Vertical Pod Autoscaling automatically adjusts the resources of pods controlled by it | bool | `"false"` | no | | grant\_registry\_access | Grants created cluster-specific service account storage.objectViewer role. | bool | `"false"` | no | | horizontal\_pod\_autoscaling | Enable horizontal pod autoscaling addon | bool | `"true"` | no | | http\_load\_balancing | Enable httpload balancer addon | bool | `"true"` | no | +| identity\_namespace | Workload Identity namespace | string | `""` | no | | initial\_node\_count | The number of nodes to create in this cluster's default node pool. | number | `"0"` | no | | ip\_masq\_link\_local | Whether to masquerade traffic to the link-local prefix (169.254.0.0/16). | bool | `"false"` | no | | ip\_masq\_resync\_interval | The interval at which the agent attempts to sync its ConfigMap file from the disk. | string | `"60s"` | no | | ip\_range\_pods | The _name_ of the secondary subnet ip range to use for pods | string | n/a | yes | | ip\_range\_services | The _name_ of the secondary subnet range to use for services | string | n/a | yes | | issue\_client\_certificate | Issues a client certificate to authenticate to the cluster endpoint. To maximize the security of your cluster, leave this option disabled. Client certificates don't automatically rotate and aren't easily revocable. WARNING: changing this after cluster creation is destructive! | bool | `"false"` | no | +| istio | (Beta) Enable Istio addon | string | `"false"` | no | | kubernetes\_dashboard | Enable kubernetes dashboard addon | bool | `"false"` | no | | kubernetes\_version | The Kubernetes version of the masters. If set to 'latest' it will pull latest available version in the selected region. | string | `"latest"` | no | | logging\_service | The logging service that the cluster should write logs to. Available options include logging.googleapis.com, logging.googleapis.com/kubernetes (beta), and none | string | `"logging.googleapis.com"` | no | @@ -166,17 +175,22 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | network\_policy | Enable network policy addon | bool | `"false"` | no | | network\_policy\_provider | The network policy provider. | string | `"CALICO"` | no | | network\_project\_id | The project ID of the shared VPC's host (for shared vpc support) | string | `""` | no | +| node\_metadata | Specifies how node metadata is exposed to the workload running on the node | string | `"SECURE"` | no | | node\_pools | List of maps containing node pools | list(map(string)) | `` | no | | node\_pools\_labels | Map of maps containing node labels by node-pool name | map(map(string)) | `` | no | | node\_pools\_metadata | Map of maps containing node metadata by node-pool name | map(map(string)) | `` | no | | node\_pools\_oauth\_scopes | Map of lists containing node oauth scopes by node-pool name | map(list(string)) | `` | no | | node\_pools\_tags | Map of lists containing node network tags by node-pool name | map(list(string)) | `` | no | +| node\_pools\_taints | Map of lists containing node taints by node-pool name | object | `` | no | | node\_version | The Kubernetes version of the node pools. Defaults kubernetes_version (master) variable and can be overridden for individual node pools by setting the `version` key on them. Must be empyty or set the same as master at cluster creation. | string | `""` | no | | non\_masquerade\_cidrs | List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading. | list(string) | `` | no | +| pod\_security\_policy\_config | enabled - Enable the PodSecurityPolicy controller for this cluster. If enabled, pods must be valid under a PodSecurityPolicy to be created. | list | `` | no | | project\_id | The project ID to host the cluster in (required) | string | n/a | yes | | region | The region to host the cluster in (required) | string | n/a | yes | | regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | | remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | +| resource\_usage\_export\_dataset\_id | The dataset id for which network egress metering for this cluster will be enabled. If enabled, a daemonset will be created in the cluster to meter network egress traffic. | string | `""` | no | +| sandbox\_enabled | (Beta) Enable GKE Sandbox (Do not forget to set `image_type` = `COS_CONTAINERD` and `node_version` = `1.12.7-gke.17` or later to use it). | bool | `"false"` | no | | service\_account | The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created. | string | `""` | no | | stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | map(list(string)) | `` | no | | subnetwork | The subnetwork to host the cluster in (required) | string | n/a | yes | @@ -188,9 +202,12 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | Name | Description | |------|-------------| | ca\_certificate | Cluster ca certificate (base64 encoded) | +| cloudrun\_enabled | Whether CloudRun enabled | | endpoint | Cluster endpoint | | horizontal\_pod\_autoscaling\_enabled | Whether horizontal pod autoscaling enabled | | http\_load\_balancing\_enabled | Whether http load balancing enabled | +| intranode\_visibility\_enabled | Whether intra-node visibility is enabled | +| istio\_enabled | Whether Istio is enabled | | kubernetes\_dashboard\_enabled | Whether kubernetes dashboard enabled | | location | Cluster location (region if regional cluster, zone if zonal cluster) | | logging\_service | Logging service used | @@ -202,9 +219,11 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | network\_policy\_enabled | Whether network policy enabled | | node\_pools\_names | List of node pools names | | node\_pools\_versions | List of node pools versions | +| pod\_security\_policy\_enabled | Whether pod security policy is enabled | | region | Cluster region | | service\_account | The service account to default running nodes as if not overridden in `node_pools`. | | type | Cluster type (regional / zonal) | +| vertical\_pod\_autoscaling\_enabled | Whether veritical pod autoscaling is enabled | | zones | List of zones in which the cluster resides | @@ -237,6 +256,9 @@ following project roles: - roles/iam.serviceAccountUser - roles/resourcemanager.projectIamAdmin (only required if `service_account` is set to `create`) +Additionally, if `service_account` is set to `create` and `grant_registry_access` is requested, the service account requires the following role on the `registry_project_id` project: +- roles/resourcemanager.projectIamAdmin + ### Enable APIs In order to operate with the Service Account you must activate the following APIs on the project where the Service Account was created: diff --git a/modules/private-cluster-update-variant/main.tf b/modules/private-cluster-update-variant/main.tf index bfe746401c..2bd1c40d14 100644 --- a/modules/private-cluster-update-variant/main.tf +++ b/modules/private-cluster-update-variant/main.tf @@ -23,7 +23,7 @@ data "google_compute_zones" "available" { provider = google project = var.project_id - region = var.region + region = local.region } resource "random_shuffle" "available_zones" { @@ -34,6 +34,7 @@ resource "random_shuffle" "available_zones" { locals { // location location = var.regional ? var.region : var.zones[0] + region = var.region == null ? join("-", slice(split("-", var.zones[0]), 0, 2)) : var.region // for regional cluster - use var.zones if provided, use available otherwise, for zonal cluster use var.zones with first element extracted node_locations = var.regional ? coalescelist(compact(var.zones), sort(random_shuffle.available_zones.result)) : slice(var.zones, 1, length(var.zones)) // kuberentes version diff --git a/modules/private-cluster-update-variant/networks.tf b/modules/private-cluster-update-variant/networks.tf index a382073dc0..aae034eee5 100644 --- a/modules/private-cluster-update-variant/networks.tf +++ b/modules/private-cluster-update-variant/networks.tf @@ -27,6 +27,6 @@ data "google_compute_subnetwork" "gke_subnetwork" { provider = google name = var.subnetwork - region = var.region + region = local.region project = local.network_project_id } diff --git a/modules/private-cluster-update-variant/sa.tf b/modules/private-cluster-update-variant/sa.tf index 9e063fcc22..c7f34e4fbb 100644 --- a/modules/private-cluster-update-variant/sa.tf +++ b/modules/private-cluster-update-variant/sa.tf @@ -64,7 +64,7 @@ resource "google_project_iam_member" "cluster_service_account-monitoring_viewer" resource "google_project_iam_member" "cluster_service_account-gcr" { count = var.create_service_account && var.grant_registry_access ? 1 : 0 - project = var.project_id + project = var.registry_project_id == "" ? var.project_id : var.registry_project_id role = "roles/storage.objectViewer" member = "serviceAccount:${google_service_account.cluster_service_account[0].email}" } diff --git a/modules/private-cluster-update-variant/variables.tf b/modules/private-cluster-update-variant/variables.tf index 8008e08975..a425c13d9a 100644 --- a/modules/private-cluster-update-variant/variables.tf +++ b/modules/private-cluster-update-variant/variables.tf @@ -40,7 +40,8 @@ variable "regional" { variable "region" { type = string - description = "The region to host the cluster in (required)" + description = "The region to host the cluster in (optional if zonal cluster / required if regional)" + default = null } variable "zones" { @@ -257,6 +258,12 @@ variable "grant_registry_access" { default = false } +variable "registry_project_id" { + type = string + description = "Project holding the Google Container Registry. If empty, we use the cluster project. If grant_registry_access is true, storage.objectViewer role is assigned on this project." + default = "" +} + variable "service_account" { type = string description = "The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created." diff --git a/modules/private-cluster/README.md b/modules/private-cluster/README.md index a9e780d24b..791ebbbd17 100644 --- a/modules/private-cluster/README.md +++ b/modules/private-cluster/README.md @@ -134,26 +134,35 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | Name | Description | Type | Default | Required | |------|-------------|:----:|:-----:|:-----:| +| authenticator\_security\_group | The name of the RBAC security group for use with Google security groups in Kubernetes RBAC. Group name must be in format gke-security-groups@yourdomain.com | string | `"null"` | no | | basic\_auth\_password | The password to be used with Basic Authentication. | string | `""` | no | | basic\_auth\_username | The username to be used with Basic Authentication. An empty value will disable Basic Authentication, which is the recommended configuration. | string | `""` | no | +| cloudrun | (Beta) Enable CloudRun addon | string | `"false"` | no | | cluster\_ipv4\_cidr | The IP address range of the kubernetes pods in this cluster. Default is an automatically assigned CIDR. | string | `""` | no | | cluster\_resource\_labels | The GCE resource labels (a map of key/value pairs) to be applied to the cluster | map(string) | `` | no | | configure\_ip\_masq | Enables the installation of ip masquerading, which is usually no longer required when using aliasied IP addresses. IP masquerading uses a kubectl call, so when you have a private cluster, you will need access to the API server. | string | `"false"` | no | | create\_service\_account | Defines if service account specified to run nodes should be created. | bool | `"true"` | no | +| database\_encryption | Application-layer Secrets Encryption settings. The object format is {state = string, key_name = string}. Valid values of state are: "ENCRYPTED"; "DECRYPTED". key_name is the name of a CloudKMS key. | object | `` | no | +| default\_max\_pods\_per\_node | The maximum number of pods to schedule per node | string | `"110"` | no | | deploy\_using\_private\_endpoint | (Beta) A toggle for Terraform and kubectl to connect to the master's internal IP address during deployment. | bool | `"false"` | no | | description | The description of the cluster | string | `""` | no | | disable\_legacy\_metadata\_endpoints | Disable the /0.1/ and /v1beta1/ metadata server endpoints on the node. Changing this value will cause all node pools to be recreated. | bool | `"true"` | no | +| enable\_binary\_authorization | Enable BinAuthZ Admission controller | string | `"false"` | no | +| enable\_intranode\_visibility | Whether Intra-node visibility is enabled for this cluster. This makes same node pod to pod traffic visible for VPC network | bool | `"false"` | no | | enable\_private\_endpoint | (Beta) Whether the master's internal IP address is used as the cluster endpoint | bool | `"false"` | no | | enable\_private\_nodes | (Beta) Whether nodes have internal IP addresses only | bool | `"false"` | no | +| enable\_vertical\_pod\_autoscaling | Vertical Pod Autoscaling automatically adjusts the resources of pods controlled by it | bool | `"false"` | no | | grant\_registry\_access | Grants created cluster-specific service account storage.objectViewer role. | bool | `"false"` | no | | horizontal\_pod\_autoscaling | Enable horizontal pod autoscaling addon | bool | `"true"` | no | | http\_load\_balancing | Enable httpload balancer addon | bool | `"true"` | no | +| identity\_namespace | Workload Identity namespace | string | `""` | no | | initial\_node\_count | The number of nodes to create in this cluster's default node pool. | number | `"0"` | no | | ip\_masq\_link\_local | Whether to masquerade traffic to the link-local prefix (169.254.0.0/16). | bool | `"false"` | no | | ip\_masq\_resync\_interval | The interval at which the agent attempts to sync its ConfigMap file from the disk. | string | `"60s"` | no | | ip\_range\_pods | The _name_ of the secondary subnet ip range to use for pods | string | n/a | yes | | ip\_range\_services | The _name_ of the secondary subnet range to use for services | string | n/a | yes | | issue\_client\_certificate | Issues a client certificate to authenticate to the cluster endpoint. To maximize the security of your cluster, leave this option disabled. Client certificates don't automatically rotate and aren't easily revocable. WARNING: changing this after cluster creation is destructive! | bool | `"false"` | no | +| istio | (Beta) Enable Istio addon | string | `"false"` | no | | kubernetes\_dashboard | Enable kubernetes dashboard addon | bool | `"false"` | no | | kubernetes\_version | The Kubernetes version of the masters. If set to 'latest' it will pull latest available version in the selected region. | string | `"latest"` | no | | logging\_service | The logging service that the cluster should write logs to. Available options include logging.googleapis.com, logging.googleapis.com/kubernetes (beta), and none | string | `"logging.googleapis.com"` | no | @@ -166,18 +175,22 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | network\_policy | Enable network policy addon | bool | `"false"` | no | | network\_policy\_provider | The network policy provider. | string | `"CALICO"` | no | | network\_project\_id | The project ID of the shared VPC's host (for shared vpc support) | string | `""` | no | +| node\_metadata | Specifies how node metadata is exposed to the workload running on the node | string | `"SECURE"` | no | | node\_pools | List of maps containing node pools | list(map(string)) | `` | no | | node\_pools\_labels | Map of maps containing node labels by node-pool name | map(map(string)) | `` | no | | node\_pools\_metadata | Map of maps containing node metadata by node-pool name | map(map(string)) | `` | no | | node\_pools\_oauth\_scopes | Map of lists containing node oauth scopes by node-pool name | map(list(string)) | `` | no | | node\_pools\_tags | Map of lists containing node network tags by node-pool name | map(list(string)) | `` | no | +| node\_pools\_taints | Map of lists containing node taints by node-pool name | object | `` | no | | node\_version | The Kubernetes version of the node pools. Defaults kubernetes_version (master) variable and can be overridden for individual node pools by setting the `version` key on them. Must be empyty or set the same as master at cluster creation. | string | `""` | no | | non\_masquerade\_cidrs | List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading. | list(string) | `` | no | +| pod\_security\_policy\_config | enabled - Enable the PodSecurityPolicy controller for this cluster. If enabled, pods must be valid under a PodSecurityPolicy to be created. | list | `` | no | | project\_id | The project ID to host the cluster in (required) | string | n/a | yes | -| region | The region to host the cluster in (optional if zonal cluster / required if regional) | string | `"null"` | no | +| region | The region to host the cluster in (required) | string | n/a | yes | | regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | -| registry\_project\_id | Project holding the Google Container Registry. If empty, we use the cluster project. If grant_registry_access is true, storage.objectViewer role is assigned on this project. | string | `""` | no | | remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | +| resource\_usage\_export\_dataset\_id | The dataset id for which network egress metering for this cluster will be enabled. If enabled, a daemonset will be created in the cluster to meter network egress traffic. | string | `""` | no | +| sandbox\_enabled | (Beta) Enable GKE Sandbox (Do not forget to set `image_type` = `COS_CONTAINERD` and `node_version` = `1.12.7-gke.17` or later to use it). | bool | `"false"` | no | | service\_account | The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created. | string | `""` | no | | stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | map(list(string)) | `` | no | | subnetwork | The subnetwork to host the cluster in (required) | string | n/a | yes | @@ -189,9 +202,12 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | Name | Description | |------|-------------| | ca\_certificate | Cluster ca certificate (base64 encoded) | +| cloudrun\_enabled | Whether CloudRun enabled | | endpoint | Cluster endpoint | | horizontal\_pod\_autoscaling\_enabled | Whether horizontal pod autoscaling enabled | | http\_load\_balancing\_enabled | Whether http load balancing enabled | +| intranode\_visibility\_enabled | Whether intra-node visibility is enabled | +| istio\_enabled | Whether Istio is enabled | | kubernetes\_dashboard\_enabled | Whether kubernetes dashboard enabled | | location | Cluster location (region if regional cluster, zone if zonal cluster) | | logging\_service | Logging service used | @@ -203,9 +219,11 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | network\_policy\_enabled | Whether network policy enabled | | node\_pools\_names | List of node pools names | | node\_pools\_versions | List of node pools versions | +| pod\_security\_policy\_enabled | Whether pod security policy is enabled | | region | Cluster region | | service\_account | The service account to default running nodes as if not overridden in `node_pools`. | | type | Cluster type (regional / zonal) | +| vertical\_pod\_autoscaling\_enabled | Whether veritical pod autoscaling is enabled | | zones | List of zones in which the cluster resides | From 956e504f2d93ce7fa1d468737542ef58ccb84f3e Mon Sep 17 00:00:00 2001 From: Aaron Lane Date: Wed, 23 Oct 2019 10:23:15 -0400 Subject: [PATCH 63/82] Regenerate docs --- README.md | 26 ++----------------- .../README.md | 3 ++- modules/beta-private-cluster/README.md | 3 ++- modules/beta-public-cluster/README.md | 7 ++--- .../private-cluster-update-variant/README.md | 22 ++-------------- modules/private-cluster/README.md | 22 ++-------------- 6 files changed, 12 insertions(+), 71 deletions(-) diff --git a/README.md b/README.md index 281cfe7c73..b269a1772f 100644 --- a/README.md +++ b/README.md @@ -129,63 +129,46 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | Name | Description | Type | Default | Required | |------|-------------|:----:|:-----:|:-----:| -| authenticator\_security\_group | The name of the RBAC security group for use with Google security groups in Kubernetes RBAC. Group name must be in format gke-security-groups@yourdomain.com | string | `"null"` | no | | basic\_auth\_password | The password to be used with Basic Authentication. | string | `""` | no | | basic\_auth\_username | The username to be used with Basic Authentication. An empty value will disable Basic Authentication, which is the recommended configuration. | string | `""` | no | -| cloudrun | (Beta) Enable CloudRun addon | string | `"false"` | no | | cluster\_ipv4\_cidr | The IP address range of the kubernetes pods in this cluster. Default is an automatically assigned CIDR. | string | `""` | no | | cluster\_resource\_labels | The GCE resource labels (a map of key/value pairs) to be applied to the cluster | map(string) | `` | no | | configure\_ip\_masq | Enables the installation of ip masquerading, which is usually no longer required when using aliasied IP addresses. IP masquerading uses a kubectl call, so when you have a private cluster, you will need access to the API server. | string | `"false"` | no | | create\_service\_account | Defines if service account specified to run nodes should be created. | bool | `"true"` | no | -| database\_encryption | Application-layer Secrets Encryption settings. The object format is {state = string, key_name = string}. Valid values of state are: "ENCRYPTED"; "DECRYPTED". key_name is the name of a CloudKMS key. | object | `` | no | -| default\_max\_pods\_per\_node | The maximum number of pods to schedule per node | string | `"110"` | no | -| deploy\_using\_private\_endpoint | (Beta) A toggle for Terraform and kubectl to connect to the master's internal IP address during deployment. | bool | `"false"` | no | | description | The description of the cluster | string | `""` | no | | disable\_legacy\_metadata\_endpoints | Disable the /0.1/ and /v1beta1/ metadata server endpoints on the node. Changing this value will cause all node pools to be recreated. | bool | `"true"` | no | -| enable\_binary\_authorization | Enable BinAuthZ Admission controller | string | `"false"` | no | -| enable\_intranode\_visibility | Whether Intra-node visibility is enabled for this cluster. This makes same node pod to pod traffic visible for VPC network | bool | `"false"` | no | -| enable\_private\_endpoint | (Beta) Whether the master's internal IP address is used as the cluster endpoint | bool | `"false"` | no | -| enable\_private\_nodes | (Beta) Whether nodes have internal IP addresses only | bool | `"false"` | no | -| enable\_vertical\_pod\_autoscaling | Vertical Pod Autoscaling automatically adjusts the resources of pods controlled by it | bool | `"false"` | no | | grant\_registry\_access | Grants created cluster-specific service account storage.objectViewer role. | bool | `"false"` | no | | horizontal\_pod\_autoscaling | Enable horizontal pod autoscaling addon | bool | `"true"` | no | | http\_load\_balancing | Enable httpload balancer addon | bool | `"true"` | no | -| identity\_namespace | Workload Identity namespace | string | `""` | no | | initial\_node\_count | The number of nodes to create in this cluster's default node pool. | number | `"0"` | no | | ip\_masq\_link\_local | Whether to masquerade traffic to the link-local prefix (169.254.0.0/16). | bool | `"false"` | no | | ip\_masq\_resync\_interval | The interval at which the agent attempts to sync its ConfigMap file from the disk. | string | `"60s"` | no | | ip\_range\_pods | The _name_ of the secondary subnet ip range to use for pods | string | n/a | yes | | ip\_range\_services | The _name_ of the secondary subnet range to use for services | string | n/a | yes | | issue\_client\_certificate | Issues a client certificate to authenticate to the cluster endpoint. To maximize the security of your cluster, leave this option disabled. Client certificates don't automatically rotate and aren't easily revocable. WARNING: changing this after cluster creation is destructive! | bool | `"false"` | no | -| istio | (Beta) Enable Istio addon | string | `"false"` | no | | kubernetes\_dashboard | Enable kubernetes dashboard addon | bool | `"false"` | no | | kubernetes\_version | The Kubernetes version of the masters. If set to 'latest' it will pull latest available version in the selected region. | string | `"latest"` | no | | logging\_service | The logging service that the cluster should write logs to. Available options include logging.googleapis.com, logging.googleapis.com/kubernetes (beta), and none | string | `"logging.googleapis.com"` | no | | maintenance\_start\_time | Time window specified for daily maintenance operations in RFC3339 format | string | `"05:00"` | no | | master\_authorized\_networks\_config | The desired configuration options for master authorized networks. The object format is {cidr_blocks = list(object({cidr_block = string, display_name = string}))}. Omit the nested cidr_blocks attribute to disallow external access (except the cluster node IPs, which GKE automatically whitelists). | object | `` | no | -| master\_ipv4\_cidr\_block | (Beta) The IP range in CIDR notation to use for the hosted master network | string | `"10.0.0.0/28"` | no | | monitoring\_service | The monitoring service that the cluster should write metrics to. Automatically send metrics from pods in the cluster to the Google Cloud Monitoring API. VM metrics will be collected by Google Compute Engine regardless of this setting Available options include monitoring.googleapis.com, monitoring.googleapis.com/kubernetes (beta) and none | string | `"monitoring.googleapis.com"` | no | | name | The name of the cluster (required) | string | n/a | yes | | network | The VPC network to host the cluster in (required) | string | n/a | yes | | network\_policy | Enable network policy addon | bool | `"false"` | no | | network\_policy\_provider | The network policy provider. | string | `"CALICO"` | no | | network\_project\_id | The project ID of the shared VPC's host (for shared vpc support) | string | `""` | no | -| node\_metadata | Specifies how node metadata is exposed to the workload running on the node | string | `"SECURE"` | no | | node\_pools | List of maps containing node pools | list(map(string)) | `` | no | | node\_pools\_labels | Map of maps containing node labels by node-pool name | map(map(string)) | `` | no | | node\_pools\_metadata | Map of maps containing node metadata by node-pool name | map(map(string)) | `` | no | | node\_pools\_oauth\_scopes | Map of lists containing node oauth scopes by node-pool name | map(list(string)) | `` | no | | node\_pools\_tags | Map of lists containing node network tags by node-pool name | map(list(string)) | `` | no | -| node\_pools\_taints | Map of lists containing node taints by node-pool name | object | `` | no | | node\_version | The Kubernetes version of the node pools. Defaults kubernetes_version (master) variable and can be overridden for individual node pools by setting the `version` key on them. Must be empyty or set the same as master at cluster creation. | string | `""` | no | | non\_masquerade\_cidrs | List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading. | list(string) | `` | no | -| pod\_security\_policy\_config | enabled - Enable the PodSecurityPolicy controller for this cluster. If enabled, pods must be valid under a PodSecurityPolicy to be created. | list | `` | no | | project\_id | The project ID to host the cluster in (required) | string | n/a | yes | -| region | The region to host the cluster in (required) | string | n/a | yes | +| region | The region to host the cluster in (optional if zonal cluster / required if regional) | string | `"null"` | no | | regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | +| registry\_project\_id | Project holding the Google Container Registry. If empty, we use the cluster project. If grant_registry_access is true, storage.objectViewer role is assigned on this project. | string | `""` | no | | remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | -| resource\_usage\_export\_dataset\_id | The dataset id for which network egress metering for this cluster will be enabled. If enabled, a daemonset will be created in the cluster to meter network egress traffic. | string | `""` | no | -| sandbox\_enabled | (Beta) Enable GKE Sandbox (Do not forget to set `image_type` = `COS_CONTAINERD` and `node_version` = `1.12.7-gke.17` or later to use it). | bool | `"false"` | no | | service\_account | The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created. | string | `""` | no | | stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | map(list(string)) | `` | no | | subnetwork | The subnetwork to host the cluster in (required) | string | n/a | yes | @@ -197,12 +180,9 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | Name | Description | |------|-------------| | ca\_certificate | Cluster ca certificate (base64 encoded) | -| cloudrun\_enabled | Whether CloudRun enabled | | endpoint | Cluster endpoint | | horizontal\_pod\_autoscaling\_enabled | Whether horizontal pod autoscaling enabled | | http\_load\_balancing\_enabled | Whether http load balancing enabled | -| intranode\_visibility\_enabled | Whether intra-node visibility is enabled | -| istio\_enabled | Whether Istio is enabled | | kubernetes\_dashboard\_enabled | Whether kubernetes dashboard enabled | | location | Cluster location (region if regional cluster, zone if zonal cluster) | | logging\_service | Logging service used | @@ -214,11 +194,9 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | network\_policy\_enabled | Whether network policy enabled | | node\_pools\_names | List of node pools names | | node\_pools\_versions | List of node pools versions | -| pod\_security\_policy\_enabled | Whether pod security policy is enabled | | region | Cluster region | | service\_account | The service account to default running nodes as if not overridden in `node_pools`. | | type | Cluster type (regional / zonal) | -| vertical\_pod\_autoscaling\_enabled | Whether veritical pod autoscaling is enabled | | zones | List of zones in which the cluster resides | diff --git a/modules/beta-private-cluster-update-variant/README.md b/modules/beta-private-cluster-update-variant/README.md index 17eabe56d2..d6eee7e7f7 100644 --- a/modules/beta-private-cluster-update-variant/README.md +++ b/modules/beta-private-cluster-update-variant/README.md @@ -188,8 +188,9 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | non\_masquerade\_cidrs | List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading. | list(string) | `` | no | | pod\_security\_policy\_config | enabled - Enable the PodSecurityPolicy controller for this cluster. If enabled, pods must be valid under a PodSecurityPolicy to be created. | list | `` | no | | project\_id | The project ID to host the cluster in (required) | string | n/a | yes | -| region | The region to host the cluster in (required) | string | n/a | yes | +| region | The region to host the cluster in (optional if zonal cluster / required if regional) | string | `"null"` | no | | regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | +| registry\_project\_id | Project holding the Google Container Registry. If empty, we use the cluster project. If grant_registry_access is true, storage.objectViewer role is assigned on this project. | string | `""` | no | | remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | | resource\_usage\_export\_dataset\_id | The dataset id for which network egress metering for this cluster will be enabled. If enabled, a daemonset will be created in the cluster to meter network egress traffic. | string | `""` | no | | sandbox\_enabled | (Beta) Enable GKE Sandbox (Do not forget to set `image_type` = `COS_CONTAINERD` and `node_version` = `1.12.7-gke.17` or later to use it). | bool | `"false"` | no | diff --git a/modules/beta-private-cluster/README.md b/modules/beta-private-cluster/README.md index fc35c08c54..425ef67fa1 100644 --- a/modules/beta-private-cluster/README.md +++ b/modules/beta-private-cluster/README.md @@ -188,8 +188,9 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | non\_masquerade\_cidrs | List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading. | list(string) | `` | no | | pod\_security\_policy\_config | enabled - Enable the PodSecurityPolicy controller for this cluster. If enabled, pods must be valid under a PodSecurityPolicy to be created. | list | `` | no | | project\_id | The project ID to host the cluster in (required) | string | n/a | yes | -| region | The region to host the cluster in (required) | string | n/a | yes | +| region | The region to host the cluster in (optional if zonal cluster / required if regional) | string | `"null"` | no | | regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | +| registry\_project\_id | Project holding the Google Container Registry. If empty, we use the cluster project. If grant_registry_access is true, storage.objectViewer role is assigned on this project. | string | `""` | no | | remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | | resource\_usage\_export\_dataset\_id | The dataset id for which network egress metering for this cluster will be enabled. If enabled, a daemonset will be created in the cluster to meter network egress traffic. | string | `""` | no | | sandbox\_enabled | (Beta) Enable GKE Sandbox (Do not forget to set `image_type` = `COS_CONTAINERD` and `node_version` = `1.12.7-gke.17` or later to use it). | bool | `"false"` | no | diff --git a/modules/beta-public-cluster/README.md b/modules/beta-public-cluster/README.md index 2b2ac1e15b..73cf4ea4c5 100644 --- a/modules/beta-public-cluster/README.md +++ b/modules/beta-public-cluster/README.md @@ -141,13 +141,10 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | create\_service\_account | Defines if service account specified to run nodes should be created. | bool | `"true"` | no | | database\_encryption | Application-layer Secrets Encryption settings. The object format is {state = string, key_name = string}. Valid values of state are: "ENCRYPTED"; "DECRYPTED". key_name is the name of a CloudKMS key. | object | `` | no | | default\_max\_pods\_per\_node | The maximum number of pods to schedule per node | string | `"110"` | no | -| deploy\_using\_private\_endpoint | (Beta) A toggle for Terraform and kubectl to connect to the master's internal IP address during deployment. | bool | `"false"` | no | | description | The description of the cluster | string | `""` | no | | disable\_legacy\_metadata\_endpoints | Disable the /0.1/ and /v1beta1/ metadata server endpoints on the node. Changing this value will cause all node pools to be recreated. | bool | `"true"` | no | | enable\_binary\_authorization | Enable BinAuthZ Admission controller | string | `"false"` | no | | enable\_intranode\_visibility | Whether Intra-node visibility is enabled for this cluster. This makes same node pod to pod traffic visible for VPC network | bool | `"false"` | no | -| enable\_private\_endpoint | (Beta) Whether the master's internal IP address is used as the cluster endpoint | bool | `"false"` | no | -| enable\_private\_nodes | (Beta) Whether nodes have internal IP addresses only | bool | `"false"` | no | | enable\_vertical\_pod\_autoscaling | Vertical Pod Autoscaling automatically adjusts the resources of pods controlled by it | bool | `"false"` | no | | grant\_registry\_access | Grants created cluster-specific service account storage.objectViewer role. | bool | `"false"` | no | | horizontal\_pod\_autoscaling | Enable horizontal pod autoscaling addon | bool | `"true"` | no | @@ -165,7 +162,6 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | logging\_service | The logging service that the cluster should write logs to. Available options include logging.googleapis.com, logging.googleapis.com/kubernetes (beta), and none | string | `"logging.googleapis.com"` | no | | maintenance\_start\_time | Time window specified for daily maintenance operations in RFC3339 format | string | `"05:00"` | no | | master\_authorized\_networks\_config | The desired configuration options for master authorized networks. The object format is {cidr_blocks = list(object({cidr_block = string, display_name = string}))}. Omit the nested cidr_blocks attribute to disallow external access (except the cluster node IPs, which GKE automatically whitelists). | object | `` | no | -| master\_ipv4\_cidr\_block | (Beta) The IP range in CIDR notation to use for the hosted master network | string | `"10.0.0.0/28"` | no | | monitoring\_service | The monitoring service that the cluster should write metrics to. Automatically send metrics from pods in the cluster to the Google Cloud Monitoring API. VM metrics will be collected by Google Compute Engine regardless of this setting Available options include monitoring.googleapis.com, monitoring.googleapis.com/kubernetes (beta) and none | string | `"monitoring.googleapis.com"` | no | | name | The name of the cluster (required) | string | n/a | yes | | network | The VPC network to host the cluster in (required) | string | n/a | yes | @@ -183,8 +179,9 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | non\_masquerade\_cidrs | List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading. | list(string) | `` | no | | pod\_security\_policy\_config | enabled - Enable the PodSecurityPolicy controller for this cluster. If enabled, pods must be valid under a PodSecurityPolicy to be created. | list | `` | no | | project\_id | The project ID to host the cluster in (required) | string | n/a | yes | -| region | The region to host the cluster in (required) | string | n/a | yes | +| region | The region to host the cluster in (optional if zonal cluster / required if regional) | string | `"null"` | no | | regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | +| registry\_project\_id | Project holding the Google Container Registry. If empty, we use the cluster project. If grant_registry_access is true, storage.objectViewer role is assigned on this project. | string | `""` | no | | remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | | resource\_usage\_export\_dataset\_id | The dataset id for which network egress metering for this cluster will be enabled. If enabled, a daemonset will be created in the cluster to meter network egress traffic. | string | `""` | no | | sandbox\_enabled | (Beta) Enable GKE Sandbox (Do not forget to set `image_type` = `COS_CONTAINERD` and `node_version` = `1.12.7-gke.17` or later to use it). | bool | `"false"` | no | diff --git a/modules/private-cluster-update-variant/README.md b/modules/private-cluster-update-variant/README.md index 72491fd134..e0ef130d3e 100644 --- a/modules/private-cluster-update-variant/README.md +++ b/modules/private-cluster-update-variant/README.md @@ -134,35 +134,26 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | Name | Description | Type | Default | Required | |------|-------------|:----:|:-----:|:-----:| -| authenticator\_security\_group | The name of the RBAC security group for use with Google security groups in Kubernetes RBAC. Group name must be in format gke-security-groups@yourdomain.com | string | `"null"` | no | | basic\_auth\_password | The password to be used with Basic Authentication. | string | `""` | no | | basic\_auth\_username | The username to be used with Basic Authentication. An empty value will disable Basic Authentication, which is the recommended configuration. | string | `""` | no | -| cloudrun | (Beta) Enable CloudRun addon | string | `"false"` | no | | cluster\_ipv4\_cidr | The IP address range of the kubernetes pods in this cluster. Default is an automatically assigned CIDR. | string | `""` | no | | cluster\_resource\_labels | The GCE resource labels (a map of key/value pairs) to be applied to the cluster | map(string) | `` | no | | configure\_ip\_masq | Enables the installation of ip masquerading, which is usually no longer required when using aliasied IP addresses. IP masquerading uses a kubectl call, so when you have a private cluster, you will need access to the API server. | string | `"false"` | no | | create\_service\_account | Defines if service account specified to run nodes should be created. | bool | `"true"` | no | -| database\_encryption | Application-layer Secrets Encryption settings. The object format is {state = string, key_name = string}. Valid values of state are: "ENCRYPTED"; "DECRYPTED". key_name is the name of a CloudKMS key. | object | `` | no | -| default\_max\_pods\_per\_node | The maximum number of pods to schedule per node | string | `"110"` | no | | deploy\_using\_private\_endpoint | (Beta) A toggle for Terraform and kubectl to connect to the master's internal IP address during deployment. | bool | `"false"` | no | | description | The description of the cluster | string | `""` | no | | disable\_legacy\_metadata\_endpoints | Disable the /0.1/ and /v1beta1/ metadata server endpoints on the node. Changing this value will cause all node pools to be recreated. | bool | `"true"` | no | -| enable\_binary\_authorization | Enable BinAuthZ Admission controller | string | `"false"` | no | -| enable\_intranode\_visibility | Whether Intra-node visibility is enabled for this cluster. This makes same node pod to pod traffic visible for VPC network | bool | `"false"` | no | | enable\_private\_endpoint | (Beta) Whether the master's internal IP address is used as the cluster endpoint | bool | `"false"` | no | | enable\_private\_nodes | (Beta) Whether nodes have internal IP addresses only | bool | `"false"` | no | -| enable\_vertical\_pod\_autoscaling | Vertical Pod Autoscaling automatically adjusts the resources of pods controlled by it | bool | `"false"` | no | | grant\_registry\_access | Grants created cluster-specific service account storage.objectViewer role. | bool | `"false"` | no | | horizontal\_pod\_autoscaling | Enable horizontal pod autoscaling addon | bool | `"true"` | no | | http\_load\_balancing | Enable httpload balancer addon | bool | `"true"` | no | -| identity\_namespace | Workload Identity namespace | string | `""` | no | | initial\_node\_count | The number of nodes to create in this cluster's default node pool. | number | `"0"` | no | | ip\_masq\_link\_local | Whether to masquerade traffic to the link-local prefix (169.254.0.0/16). | bool | `"false"` | no | | ip\_masq\_resync\_interval | The interval at which the agent attempts to sync its ConfigMap file from the disk. | string | `"60s"` | no | | ip\_range\_pods | The _name_ of the secondary subnet ip range to use for pods | string | n/a | yes | | ip\_range\_services | The _name_ of the secondary subnet range to use for services | string | n/a | yes | | issue\_client\_certificate | Issues a client certificate to authenticate to the cluster endpoint. To maximize the security of your cluster, leave this option disabled. Client certificates don't automatically rotate and aren't easily revocable. WARNING: changing this after cluster creation is destructive! | bool | `"false"` | no | -| istio | (Beta) Enable Istio addon | string | `"false"` | no | | kubernetes\_dashboard | Enable kubernetes dashboard addon | bool | `"false"` | no | | kubernetes\_version | The Kubernetes version of the masters. If set to 'latest' it will pull latest available version in the selected region. | string | `"latest"` | no | | logging\_service | The logging service that the cluster should write logs to. Available options include logging.googleapis.com, logging.googleapis.com/kubernetes (beta), and none | string | `"logging.googleapis.com"` | no | @@ -175,22 +166,18 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | network\_policy | Enable network policy addon | bool | `"false"` | no | | network\_policy\_provider | The network policy provider. | string | `"CALICO"` | no | | network\_project\_id | The project ID of the shared VPC's host (for shared vpc support) | string | `""` | no | -| node\_metadata | Specifies how node metadata is exposed to the workload running on the node | string | `"SECURE"` | no | | node\_pools | List of maps containing node pools | list(map(string)) | `` | no | | node\_pools\_labels | Map of maps containing node labels by node-pool name | map(map(string)) | `` | no | | node\_pools\_metadata | Map of maps containing node metadata by node-pool name | map(map(string)) | `` | no | | node\_pools\_oauth\_scopes | Map of lists containing node oauth scopes by node-pool name | map(list(string)) | `` | no | | node\_pools\_tags | Map of lists containing node network tags by node-pool name | map(list(string)) | `` | no | -| node\_pools\_taints | Map of lists containing node taints by node-pool name | object | `` | no | | node\_version | The Kubernetes version of the node pools. Defaults kubernetes_version (master) variable and can be overridden for individual node pools by setting the `version` key on them. Must be empyty or set the same as master at cluster creation. | string | `""` | no | | non\_masquerade\_cidrs | List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading. | list(string) | `` | no | -| pod\_security\_policy\_config | enabled - Enable the PodSecurityPolicy controller for this cluster. If enabled, pods must be valid under a PodSecurityPolicy to be created. | list | `` | no | | project\_id | The project ID to host the cluster in (required) | string | n/a | yes | -| region | The region to host the cluster in (required) | string | n/a | yes | +| region | The region to host the cluster in (optional if zonal cluster / required if regional) | string | `"null"` | no | | regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | +| registry\_project\_id | Project holding the Google Container Registry. If empty, we use the cluster project. If grant_registry_access is true, storage.objectViewer role is assigned on this project. | string | `""` | no | | remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | -| resource\_usage\_export\_dataset\_id | The dataset id for which network egress metering for this cluster will be enabled. If enabled, a daemonset will be created in the cluster to meter network egress traffic. | string | `""` | no | -| sandbox\_enabled | (Beta) Enable GKE Sandbox (Do not forget to set `image_type` = `COS_CONTAINERD` and `node_version` = `1.12.7-gke.17` or later to use it). | bool | `"false"` | no | | service\_account | The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created. | string | `""` | no | | stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | map(list(string)) | `` | no | | subnetwork | The subnetwork to host the cluster in (required) | string | n/a | yes | @@ -202,12 +189,9 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | Name | Description | |------|-------------| | ca\_certificate | Cluster ca certificate (base64 encoded) | -| cloudrun\_enabled | Whether CloudRun enabled | | endpoint | Cluster endpoint | | horizontal\_pod\_autoscaling\_enabled | Whether horizontal pod autoscaling enabled | | http\_load\_balancing\_enabled | Whether http load balancing enabled | -| intranode\_visibility\_enabled | Whether intra-node visibility is enabled | -| istio\_enabled | Whether Istio is enabled | | kubernetes\_dashboard\_enabled | Whether kubernetes dashboard enabled | | location | Cluster location (region if regional cluster, zone if zonal cluster) | | logging\_service | Logging service used | @@ -219,11 +203,9 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | network\_policy\_enabled | Whether network policy enabled | | node\_pools\_names | List of node pools names | | node\_pools\_versions | List of node pools versions | -| pod\_security\_policy\_enabled | Whether pod security policy is enabled | | region | Cluster region | | service\_account | The service account to default running nodes as if not overridden in `node_pools`. | | type | Cluster type (regional / zonal) | -| vertical\_pod\_autoscaling\_enabled | Whether veritical pod autoscaling is enabled | | zones | List of zones in which the cluster resides | diff --git a/modules/private-cluster/README.md b/modules/private-cluster/README.md index 791ebbbd17..a9e780d24b 100644 --- a/modules/private-cluster/README.md +++ b/modules/private-cluster/README.md @@ -134,35 +134,26 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | Name | Description | Type | Default | Required | |------|-------------|:----:|:-----:|:-----:| -| authenticator\_security\_group | The name of the RBAC security group for use with Google security groups in Kubernetes RBAC. Group name must be in format gke-security-groups@yourdomain.com | string | `"null"` | no | | basic\_auth\_password | The password to be used with Basic Authentication. | string | `""` | no | | basic\_auth\_username | The username to be used with Basic Authentication. An empty value will disable Basic Authentication, which is the recommended configuration. | string | `""` | no | -| cloudrun | (Beta) Enable CloudRun addon | string | `"false"` | no | | cluster\_ipv4\_cidr | The IP address range of the kubernetes pods in this cluster. Default is an automatically assigned CIDR. | string | `""` | no | | cluster\_resource\_labels | The GCE resource labels (a map of key/value pairs) to be applied to the cluster | map(string) | `` | no | | configure\_ip\_masq | Enables the installation of ip masquerading, which is usually no longer required when using aliasied IP addresses. IP masquerading uses a kubectl call, so when you have a private cluster, you will need access to the API server. | string | `"false"` | no | | create\_service\_account | Defines if service account specified to run nodes should be created. | bool | `"true"` | no | -| database\_encryption | Application-layer Secrets Encryption settings. The object format is {state = string, key_name = string}. Valid values of state are: "ENCRYPTED"; "DECRYPTED". key_name is the name of a CloudKMS key. | object | `` | no | -| default\_max\_pods\_per\_node | The maximum number of pods to schedule per node | string | `"110"` | no | | deploy\_using\_private\_endpoint | (Beta) A toggle for Terraform and kubectl to connect to the master's internal IP address during deployment. | bool | `"false"` | no | | description | The description of the cluster | string | `""` | no | | disable\_legacy\_metadata\_endpoints | Disable the /0.1/ and /v1beta1/ metadata server endpoints on the node. Changing this value will cause all node pools to be recreated. | bool | `"true"` | no | -| enable\_binary\_authorization | Enable BinAuthZ Admission controller | string | `"false"` | no | -| enable\_intranode\_visibility | Whether Intra-node visibility is enabled for this cluster. This makes same node pod to pod traffic visible for VPC network | bool | `"false"` | no | | enable\_private\_endpoint | (Beta) Whether the master's internal IP address is used as the cluster endpoint | bool | `"false"` | no | | enable\_private\_nodes | (Beta) Whether nodes have internal IP addresses only | bool | `"false"` | no | -| enable\_vertical\_pod\_autoscaling | Vertical Pod Autoscaling automatically adjusts the resources of pods controlled by it | bool | `"false"` | no | | grant\_registry\_access | Grants created cluster-specific service account storage.objectViewer role. | bool | `"false"` | no | | horizontal\_pod\_autoscaling | Enable horizontal pod autoscaling addon | bool | `"true"` | no | | http\_load\_balancing | Enable httpload balancer addon | bool | `"true"` | no | -| identity\_namespace | Workload Identity namespace | string | `""` | no | | initial\_node\_count | The number of nodes to create in this cluster's default node pool. | number | `"0"` | no | | ip\_masq\_link\_local | Whether to masquerade traffic to the link-local prefix (169.254.0.0/16). | bool | `"false"` | no | | ip\_masq\_resync\_interval | The interval at which the agent attempts to sync its ConfigMap file from the disk. | string | `"60s"` | no | | ip\_range\_pods | The _name_ of the secondary subnet ip range to use for pods | string | n/a | yes | | ip\_range\_services | The _name_ of the secondary subnet range to use for services | string | n/a | yes | | issue\_client\_certificate | Issues a client certificate to authenticate to the cluster endpoint. To maximize the security of your cluster, leave this option disabled. Client certificates don't automatically rotate and aren't easily revocable. WARNING: changing this after cluster creation is destructive! | bool | `"false"` | no | -| istio | (Beta) Enable Istio addon | string | `"false"` | no | | kubernetes\_dashboard | Enable kubernetes dashboard addon | bool | `"false"` | no | | kubernetes\_version | The Kubernetes version of the masters. If set to 'latest' it will pull latest available version in the selected region. | string | `"latest"` | no | | logging\_service | The logging service that the cluster should write logs to. Available options include logging.googleapis.com, logging.googleapis.com/kubernetes (beta), and none | string | `"logging.googleapis.com"` | no | @@ -175,22 +166,18 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | network\_policy | Enable network policy addon | bool | `"false"` | no | | network\_policy\_provider | The network policy provider. | string | `"CALICO"` | no | | network\_project\_id | The project ID of the shared VPC's host (for shared vpc support) | string | `""` | no | -| node\_metadata | Specifies how node metadata is exposed to the workload running on the node | string | `"SECURE"` | no | | node\_pools | List of maps containing node pools | list(map(string)) | `` | no | | node\_pools\_labels | Map of maps containing node labels by node-pool name | map(map(string)) | `` | no | | node\_pools\_metadata | Map of maps containing node metadata by node-pool name | map(map(string)) | `` | no | | node\_pools\_oauth\_scopes | Map of lists containing node oauth scopes by node-pool name | map(list(string)) | `` | no | | node\_pools\_tags | Map of lists containing node network tags by node-pool name | map(list(string)) | `` | no | -| node\_pools\_taints | Map of lists containing node taints by node-pool name | object | `` | no | | node\_version | The Kubernetes version of the node pools. Defaults kubernetes_version (master) variable and can be overridden for individual node pools by setting the `version` key on them. Must be empyty or set the same as master at cluster creation. | string | `""` | no | | non\_masquerade\_cidrs | List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading. | list(string) | `` | no | -| pod\_security\_policy\_config | enabled - Enable the PodSecurityPolicy controller for this cluster. If enabled, pods must be valid under a PodSecurityPolicy to be created. | list | `` | no | | project\_id | The project ID to host the cluster in (required) | string | n/a | yes | -| region | The region to host the cluster in (required) | string | n/a | yes | +| region | The region to host the cluster in (optional if zonal cluster / required if regional) | string | `"null"` | no | | regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | +| registry\_project\_id | Project holding the Google Container Registry. If empty, we use the cluster project. If grant_registry_access is true, storage.objectViewer role is assigned on this project. | string | `""` | no | | remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | -| resource\_usage\_export\_dataset\_id | The dataset id for which network egress metering for this cluster will be enabled. If enabled, a daemonset will be created in the cluster to meter network egress traffic. | string | `""` | no | -| sandbox\_enabled | (Beta) Enable GKE Sandbox (Do not forget to set `image_type` = `COS_CONTAINERD` and `node_version` = `1.12.7-gke.17` or later to use it). | bool | `"false"` | no | | service\_account | The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created. | string | `""` | no | | stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | map(list(string)) | `` | no | | subnetwork | The subnetwork to host the cluster in (required) | string | n/a | yes | @@ -202,12 +189,9 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | Name | Description | |------|-------------| | ca\_certificate | Cluster ca certificate (base64 encoded) | -| cloudrun\_enabled | Whether CloudRun enabled | | endpoint | Cluster endpoint | | horizontal\_pod\_autoscaling\_enabled | Whether horizontal pod autoscaling enabled | | http\_load\_balancing\_enabled | Whether http load balancing enabled | -| intranode\_visibility\_enabled | Whether intra-node visibility is enabled | -| istio\_enabled | Whether Istio is enabled | | kubernetes\_dashboard\_enabled | Whether kubernetes dashboard enabled | | location | Cluster location (region if regional cluster, zone if zonal cluster) | | logging\_service | Logging service used | @@ -219,11 +203,9 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | network\_policy\_enabled | Whether network policy enabled | | node\_pools\_names | List of node pools names | | node\_pools\_versions | List of node pools versions | -| pod\_security\_policy\_enabled | Whether pod security policy is enabled | | region | Cluster region | | service\_account | The service account to default running nodes as if not overridden in `node_pools`. | | type | Cluster type (regional / zonal) | -| vertical\_pod\_autoscaling\_enabled | Whether veritical pod autoscaling is enabled | | zones | List of zones in which the cluster resides | From 86eb36db82147f18003b7120efdc0b90a39719ab Mon Sep 17 00:00:00 2001 From: Aaron Lane Date: Wed, 23 Oct 2019 11:27:42 -0400 Subject: [PATCH 64/82] Update Docker image to 0.4.5 This version fixes `check_documentation` to ignore the autogen directory. --- Makefile | 2 +- build/int.cloudbuild.yaml | 2 +- build/lint.cloudbuild.yaml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index b6471861d6..52c9deaf8f 100644 --- a/Makefile +++ b/Makefile @@ -18,7 +18,7 @@ # Make will use bash instead of sh SHELL := /usr/bin/env bash -DOCKER_TAG_VERSION_DEVELOPER_TOOLS := 0.4.3 +DOCKER_TAG_VERSION_DEVELOPER_TOOLS := 0.4.5 DOCKER_IMAGE_DEVELOPER_TOOLS := cft/developer-tools REGISTRY_URL := gcr.io/cloud-foundation-cicd diff --git a/build/int.cloudbuild.yaml b/build/int.cloudbuild.yaml index cc1dd0dcfe..379205db45 100644 --- a/build/int.cloudbuild.yaml +++ b/build/int.cloudbuild.yaml @@ -226,4 +226,4 @@ tags: - 'integration' substitutions: _DOCKER_IMAGE_DEVELOPER_TOOLS: 'cft/developer-tools' - _DOCKER_TAG_VERSION_DEVELOPER_TOOLS: '0.4.3' + _DOCKER_TAG_VERSION_DEVELOPER_TOOLS: '0.4.5' diff --git a/build/lint.cloudbuild.yaml b/build/lint.cloudbuild.yaml index d08fe185bd..d5b2622e4f 100644 --- a/build/lint.cloudbuild.yaml +++ b/build/lint.cloudbuild.yaml @@ -24,4 +24,4 @@ tags: - 'lint' substitutions: _DOCKER_IMAGE_DEVELOPER_TOOLS: 'cft/developer-tools' - _DOCKER_TAG_VERSION_DEVELOPER_TOOLS: '0.4.3' + _DOCKER_TAG_VERSION_DEVELOPER_TOOLS: '0.4.5' From 71449a9ba4c290c32bc14413cd3ee4bb569b2b3d Mon Sep 17 00:00:00 2001 From: Aaron Lane Date: Wed, 23 Oct 2019 11:28:07 -0400 Subject: [PATCH 65/82] Remove Inputs, Outputs tables from autogen README This content needs to be generated specifically for each module. It does not make sense to include it in the autogen template. --- autogen/README.md | 95 ----------------------------------------------- 1 file changed, 95 deletions(-) diff --git a/autogen/README.md b/autogen/README.md index dc0b63b003..300717fdd2 100644 --- a/autogen/README.md +++ b/autogen/README.md @@ -139,101 +139,6 @@ Version 1.0.0 of this module introduces a breaking change: adding the `disable-l In either case, upgrading to module version `v1.0.0` will trigger a recreation of all node pools in the cluster. -## Inputs - -| Name | Description | Type | Default | Required | -|------|-------------|:----:|:-----:|:-----:| -| authenticator\_security\_group | The name of the RBAC security group for use with Google security groups in Kubernetes RBAC. Group name must be in format gke-security-groups@yourdomain.com | string | `"null"` | no | -| basic\_auth\_password | The password to be used with Basic Authentication. | string | `""` | no | -| basic\_auth\_username | The username to be used with Basic Authentication. An empty value will disable Basic Authentication, which is the recommended configuration. | string | `""` | no | -| cloudrun | (Beta) Enable CloudRun addon | string | `"false"` | no | -| cluster\_ipv4\_cidr | The IP address range of the kubernetes pods in this cluster. Default is an automatically assigned CIDR. | string | `""` | no | -| cluster\_resource\_labels | The GCE resource labels (a map of key/value pairs) to be applied to the cluster | map(string) | `` | no | -| configure\_ip\_masq | Enables the installation of ip masquerading, which is usually no longer required when using aliasied IP addresses. IP masquerading uses a kubectl call, so when you have a private cluster, you will need access to the API server. | string | `"false"` | no | -| create\_service\_account | Defines if service account specified to run nodes should be created. | bool | `"true"` | no | -| database\_encryption | Application-layer Secrets Encryption settings. The object format is {state = string, key_name = string}. Valid values of state are: "ENCRYPTED"; "DECRYPTED". key_name is the name of a CloudKMS key. | object | `` | no | -| default\_max\_pods\_per\_node | The maximum number of pods to schedule per node | string | `"110"` | no | -| deploy\_using\_private\_endpoint | (Beta) A toggle for Terraform and kubectl to connect to the master's internal IP address during deployment. | bool | `"false"` | no | -| description | The description of the cluster | string | `""` | no | -| disable\_legacy\_metadata\_endpoints | Disable the /0.1/ and /v1beta1/ metadata server endpoints on the node. Changing this value will cause all node pools to be recreated. | bool | `"true"` | no | -| enable\_binary\_authorization | Enable BinAuthZ Admission controller | string | `"false"` | no | -| enable\_intranode\_visibility | Whether Intra-node visibility is enabled for this cluster. This makes same node pod to pod traffic visible for VPC network | bool | `"false"` | no | -| enable\_private\_endpoint | (Beta) Whether the master's internal IP address is used as the cluster endpoint | bool | `"false"` | no | -| enable\_private\_nodes | (Beta) Whether nodes have internal IP addresses only | bool | `"false"` | no | -| enable\_vertical\_pod\_autoscaling | Vertical Pod Autoscaling automatically adjusts the resources of pods controlled by it | bool | `"false"` | no | -| grant\_registry\_access | Grants created cluster-specific service account storage.objectViewer role. | bool | `"false"` | no | -| horizontal\_pod\_autoscaling | Enable horizontal pod autoscaling addon | bool | `"true"` | no | -| http\_load\_balancing | Enable httpload balancer addon | bool | `"true"` | no | -| identity\_namespace | Workload Identity namespace | string | `""` | no | -| initial\_node\_count | The number of nodes to create in this cluster's default node pool. | number | `"0"` | no | -| ip\_masq\_link\_local | Whether to masquerade traffic to the link-local prefix (169.254.0.0/16). | bool | `"false"` | no | -| ip\_masq\_resync\_interval | The interval at which the agent attempts to sync its ConfigMap file from the disk. | string | `"60s"` | no | -| ip\_range\_pods | The _name_ of the secondary subnet ip range to use for pods | string | n/a | yes | -| ip\_range\_services | The _name_ of the secondary subnet range to use for services | string | n/a | yes | -| issue\_client\_certificate | Issues a client certificate to authenticate to the cluster endpoint. To maximize the security of your cluster, leave this option disabled. Client certificates don't automatically rotate and aren't easily revocable. WARNING: changing this after cluster creation is destructive! | bool | `"false"` | no | -| istio | (Beta) Enable Istio addon | string | `"false"` | no | -| kubernetes\_dashboard | Enable kubernetes dashboard addon | bool | `"false"` | no | -| kubernetes\_version | The Kubernetes version of the masters. If set to 'latest' it will pull latest available version in the selected region. | string | `"latest"` | no | -| logging\_service | The logging service that the cluster should write logs to. Available options include logging.googleapis.com, logging.googleapis.com/kubernetes (beta), and none | string | `"logging.googleapis.com"` | no | -| maintenance\_start\_time | Time window specified for daily maintenance operations in RFC3339 format | string | `"05:00"` | no | -| master\_authorized\_networks\_config | The desired configuration options for master authorized networks. The object format is {cidr_blocks = list(object({cidr_block = string, display_name = string}))}. Omit the nested cidr_blocks attribute to disallow external access (except the cluster node IPs, which GKE automatically whitelists). | object | `` | no | -| master\_ipv4\_cidr\_block | (Beta) The IP range in CIDR notation to use for the hosted master network | string | `"10.0.0.0/28"` | no | -| monitoring\_service | The monitoring service that the cluster should write metrics to. Automatically send metrics from pods in the cluster to the Google Cloud Monitoring API. VM metrics will be collected by Google Compute Engine regardless of this setting Available options include monitoring.googleapis.com, monitoring.googleapis.com/kubernetes (beta) and none | string | `"monitoring.googleapis.com"` | no | -| name | The name of the cluster (required) | string | n/a | yes | -| network | The VPC network to host the cluster in (required) | string | n/a | yes | -| network\_policy | Enable network policy addon | bool | `"false"` | no | -| network\_policy\_provider | The network policy provider. | string | `"CALICO"` | no | -| network\_project\_id | The project ID of the shared VPC's host (for shared vpc support) | string | `""` | no | -| node\_metadata | Specifies how node metadata is exposed to the workload running on the node | string | `"SECURE"` | no | -| node\_pools | List of maps containing node pools | list(map(string)) | `` | no | -| node\_pools\_labels | Map of maps containing node labels by node-pool name | map(map(string)) | `` | no | -| node\_pools\_metadata | Map of maps containing node metadata by node-pool name | map(map(string)) | `` | no | -| node\_pools\_oauth\_scopes | Map of lists containing node oauth scopes by node-pool name | map(list(string)) | `` | no | -| node\_pools\_tags | Map of lists containing node network tags by node-pool name | map(list(string)) | `` | no | -| node\_pools\_taints | Map of lists containing node taints by node-pool name | object | `` | no | -| node\_version | The Kubernetes version of the node pools. Defaults kubernetes_version (master) variable and can be overridden for individual node pools by setting the `version` key on them. Must be empyty or set the same as master at cluster creation. | string | `""` | no | -| non\_masquerade\_cidrs | List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading. | list(string) | `` | no | -| pod\_security\_policy\_config | enabled - Enable the PodSecurityPolicy controller for this cluster. If enabled, pods must be valid under a PodSecurityPolicy to be created. | list | `` | no | -| project\_id | The project ID to host the cluster in (required) | string | n/a | yes | -| region | The region to host the cluster in (required) | string | n/a | yes | -| regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | -| remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | -| resource\_usage\_export\_dataset\_id | The dataset id for which network egress metering for this cluster will be enabled. If enabled, a daemonset will be created in the cluster to meter network egress traffic. | string | `""` | no | -| sandbox\_enabled | (Beta) Enable GKE Sandbox (Do not forget to set `image_type` = `COS_CONTAINERD` and `node_version` = `1.12.7-gke.17` or later to use it). | bool | `"false"` | no | -| service\_account | The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created. | string | `""` | no | -| stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | map(list(string)) | `` | no | -| subnetwork | The subnetwork to host the cluster in (required) | string | n/a | yes | -| upstream\_nameservers | If specified, the values replace the nameservers taken by default from the node’s /etc/resolv.conf | list | `` | no | -| zones | The zones to host the cluster in (optional if regional cluster / required if zonal) | list(string) | `` | no | - -## Outputs - -| Name | Description | -|------|-------------| -| ca\_certificate | Cluster ca certificate (base64 encoded) | -| cloudrun\_enabled | Whether CloudRun enabled | -| endpoint | Cluster endpoint | -| horizontal\_pod\_autoscaling\_enabled | Whether horizontal pod autoscaling enabled | -| http\_load\_balancing\_enabled | Whether http load balancing enabled | -| intranode\_visibility\_enabled | Whether intra-node visibility is enabled | -| istio\_enabled | Whether Istio is enabled | -| kubernetes\_dashboard\_enabled | Whether kubernetes dashboard enabled | -| location | Cluster location (region if regional cluster, zone if zonal cluster) | -| logging\_service | Logging service used | -| master\_authorized\_networks\_config | Networks from which access to master is permitted | -| master\_version | Current master kubernetes version | -| min\_master\_version | Minimum master kubernetes version | -| monitoring\_service | Monitoring service used | -| name | Cluster name | -| network\_policy\_enabled | Whether network policy enabled | -| node\_pools\_names | List of node pools names | -| node\_pools\_versions | List of node pools versions | -| pod\_security\_policy\_enabled | Whether pod security policy is enabled | -| region | Cluster region | -| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | -| type | Cluster type (regional / zonal) | -| vertical\_pod\_autoscaling\_enabled | Whether veritical pod autoscaling is enabled | -| zones | List of zones in which the cluster resides | From 947cd1b5f527bbb491ad6f5feb7d87a8caf2cee5 Mon Sep 17 00:00:00 2001 From: Chris Sng Date: Thu, 24 Oct 2019 10:31:52 +0800 Subject: [PATCH 66/82] Regen code and docs --- modules/beta-private-cluster-update-variant/README.md | 1 + scripts/wait-for-cluster.sh | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/modules/beta-private-cluster-update-variant/README.md b/modules/beta-private-cluster-update-variant/README.md index feb8d4584c..c2f9aeffd4 100644 --- a/modules/beta-private-cluster-update-variant/README.md +++ b/modules/beta-private-cluster-update-variant/README.md @@ -192,6 +192,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | region | The region to host the cluster in (optional if zonal cluster / required if regional) | string | `"null"` | no | | regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | | registry\_project\_id | Project holding the Google Container Registry. If empty, we use the cluster project. If grant_registry_access is true, storage.objectViewer role is assigned on this project. | string | `""` | no | +| release\_channel | (Beta) The release channel of this cluster. Accepted values are `UNSPECIFIED`, `RAPID`, `REGULAR` and `STABLE`. Defaults to `UNSPECIFIED`. | string | `"UNSPECIFIED"` | no | | remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | | resource\_usage\_export\_dataset\_id | The dataset id for which network egress metering for this cluster will be enabled. If enabled, a daemonset will be created in the cluster to meter network egress traffic. | string | `""` | no | | sandbox\_enabled | (Beta) Enable GKE Sandbox (Do not forget to set `image_type` = `COS_CONTAINERD` and `node_version` = `1.12.7-gke.17` or later to use it). | bool | `"false"` | no | diff --git a/scripts/wait-for-cluster.sh b/scripts/wait-for-cluster.sh index 42c9841fec..7bf9919b8f 100755 --- a/scripts/wait-for-cluster.sh +++ b/scripts/wait-for-cluster.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. From cad1ad3c1f5b15dc2a3bce72a27bfbd96487f7ad Mon Sep 17 00:00:00 2001 From: Chris Sng Date: Thu, 24 Oct 2019 12:21:57 +0800 Subject: [PATCH 67/82] Use provider version 2.18.0 --- test/setup/versions.tf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/setup/versions.tf b/test/setup/versions.tf index efbd8ea517..51f6a433b0 100644 --- a/test/setup/versions.tf +++ b/test/setup/versions.tf @@ -19,9 +19,9 @@ terraform { } provider "google" { - version = "~> 2.13.0" + version = "~> 2.18.0" } provider "google-beta" { - version = "~> 2.13.0" + version = "~> 2.18.0" } From a219515a20934f1e4df6789d9f903afd6dc92f0a Mon Sep 17 00:00:00 2001 From: bharathkkb Date: Thu, 24 Oct 2019 01:46:46 -0500 Subject: [PATCH 68/82] fix svc account description --- examples/deploy_service/README.md | 2 +- examples/deploy_service/outputs.tf | 2 +- examples/disable_client_cert/README.md | 2 +- examples/disable_client_cert/outputs.tf | 2 +- examples/node_pool/README.md | 2 +- examples/node_pool/outputs.tf | 2 +- examples/shared_vpc/README.md | 2 +- examples/shared_vpc/outputs.tf | 2 +- examples/simple_regional/README.md | 2 +- examples/simple_regional/outputs.tf | 2 +- examples/simple_regional_beta/outputs.tf | 2 +- examples/simple_regional_private/README.md | 2 +- examples/simple_regional_private/outputs.tf | 2 +- examples/simple_regional_private_beta/outputs.tf | 2 +- examples/simple_zonal/README.md | 2 +- examples/simple_zonal/outputs.tf | 2 +- examples/simple_zonal_private/README.md | 2 +- examples/simple_zonal_private/outputs.tf | 2 +- examples/stub_domains/README.md | 2 +- examples/stub_domains/outputs.tf | 2 +- examples/stub_domains_private/README.md | 2 +- examples/stub_domains_private/outputs.tf | 2 +- examples/stub_domains_upstream_nameservers/outputs.tf | 2 +- examples/upstream_nameservers/outputs.tf | 2 +- examples/workload_metadata_config/outputs.tf | 2 +- 25 files changed, 25 insertions(+), 25 deletions(-) diff --git a/examples/deploy_service/README.md b/examples/deploy_service/README.md index 5dcb7ca7a7..e13981c450 100644 --- a/examples/deploy_service/README.md +++ b/examples/deploy_service/README.md @@ -37,7 +37,7 @@ It will: | network | | | project\_id | | | region | | -| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | +| service\_account | The default service account used for running nodes. | | subnetwork | | | zones | List of zones in which the cluster resides | diff --git a/examples/deploy_service/outputs.tf b/examples/deploy_service/outputs.tf index 0d972dcd88..01a13147c2 100644 --- a/examples/deploy_service/outputs.tf +++ b/examples/deploy_service/outputs.tf @@ -29,7 +29,7 @@ output "ca_certificate" { } output "service_account" { - description = "The service account to default running nodes as if not overridden in `node_pools`." + description = "The default service account used for running nodes." value = module.gke.service_account } diff --git a/examples/disable_client_cert/README.md b/examples/disable_client_cert/README.md index 14dd6545c0..2f531b9906 100644 --- a/examples/disable_client_cert/README.md +++ b/examples/disable_client_cert/README.md @@ -36,7 +36,7 @@ This example illustrates how to create a simple cluster and disable deprecated s | network | | | project\_id | | | region | | -| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | +| service\_account | The default service account used for running nodes. | | subnetwork | | | zones | List of zones in which the cluster resides | diff --git a/examples/disable_client_cert/outputs.tf b/examples/disable_client_cert/outputs.tf index 0d972dcd88..01a13147c2 100644 --- a/examples/disable_client_cert/outputs.tf +++ b/examples/disable_client_cert/outputs.tf @@ -29,7 +29,7 @@ output "ca_certificate" { } output "service_account" { - description = "The service account to default running nodes as if not overridden in `node_pools`." + description = "The default service account used for running nodes." value = module.gke.service_account } diff --git a/examples/node_pool/README.md b/examples/node_pool/README.md index 9215f091cb..237b3f0b6f 100644 --- a/examples/node_pool/README.md +++ b/examples/node_pool/README.md @@ -32,7 +32,7 @@ This example illustrates how to create a cluster with multiple custom node-pool | network | | | project\_id | | | region | | -| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | +| service\_account | The default service account used for running nodes. | | subnetwork | | | zones | List of zones in which the cluster resides | diff --git a/examples/node_pool/outputs.tf b/examples/node_pool/outputs.tf index 0d972dcd88..01a13147c2 100644 --- a/examples/node_pool/outputs.tf +++ b/examples/node_pool/outputs.tf @@ -29,7 +29,7 @@ output "ca_certificate" { } output "service_account" { - description = "The service account to default running nodes as if not overridden in `node_pools`." + description = "The default service account used for running nodes." value = module.gke.service_account } diff --git a/examples/shared_vpc/README.md b/examples/shared_vpc/README.md index 3b0f5a6157..964a346349 100644 --- a/examples/shared_vpc/README.md +++ b/examples/shared_vpc/README.md @@ -32,7 +32,7 @@ This example illustrates how to create a simple cluster where the host network i | network | | | project\_id | | | region | | -| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | +| service\_account | The default service account used for running nodes. | | subnetwork | | | zones | List of zones in which the cluster resides | diff --git a/examples/shared_vpc/outputs.tf b/examples/shared_vpc/outputs.tf index 0d972dcd88..01a13147c2 100644 --- a/examples/shared_vpc/outputs.tf +++ b/examples/shared_vpc/outputs.tf @@ -29,7 +29,7 @@ output "ca_certificate" { } output "service_account" { - description = "The service account to default running nodes as if not overridden in `node_pools`." + description = "The default service account used for running nodes." value = module.gke.service_account } diff --git a/examples/simple_regional/README.md b/examples/simple_regional/README.md index fb209e47b5..d1d986cd6e 100644 --- a/examples/simple_regional/README.md +++ b/examples/simple_regional/README.md @@ -31,7 +31,7 @@ This example illustrates how to create a simple cluster. | network | | | project\_id | | | region | | -| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | +| service\_account | The default service account used for running nodes. | | subnetwork | | | zones | List of zones in which the cluster resides | diff --git a/examples/simple_regional/outputs.tf b/examples/simple_regional/outputs.tf index 0d972dcd88..01a13147c2 100644 --- a/examples/simple_regional/outputs.tf +++ b/examples/simple_regional/outputs.tf @@ -29,7 +29,7 @@ output "ca_certificate" { } output "service_account" { - description = "The service account to default running nodes as if not overridden in `node_pools`." + description = "The default service account used for running nodes." value = module.gke.service_account } diff --git a/examples/simple_regional_beta/outputs.tf b/examples/simple_regional_beta/outputs.tf index ad152e186c..0d770aa809 100644 --- a/examples/simple_regional_beta/outputs.tf +++ b/examples/simple_regional_beta/outputs.tf @@ -29,6 +29,6 @@ output "ca_certificate" { } output "service_account" { - description = "The service account to default running nodes as if not overridden in `node_pools`." + description = "The default service account used for running nodes." value = module.gke.service_account } diff --git a/examples/simple_regional_private/README.md b/examples/simple_regional_private/README.md index 8175482731..917c097951 100644 --- a/examples/simple_regional_private/README.md +++ b/examples/simple_regional_private/README.md @@ -31,7 +31,7 @@ This example illustrates how to create a simple private cluster. | network | | | project\_id | | | region | | -| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | +| service\_account | The default service account used for running nodes. | | subnetwork | | | zones | List of zones in which the cluster resides | diff --git a/examples/simple_regional_private/outputs.tf b/examples/simple_regional_private/outputs.tf index 0d972dcd88..01a13147c2 100644 --- a/examples/simple_regional_private/outputs.tf +++ b/examples/simple_regional_private/outputs.tf @@ -29,7 +29,7 @@ output "ca_certificate" { } output "service_account" { - description = "The service account to default running nodes as if not overridden in `node_pools`." + description = "The default service account used for running nodes." value = module.gke.service_account } diff --git a/examples/simple_regional_private_beta/outputs.tf b/examples/simple_regional_private_beta/outputs.tf index 0d972dcd88..01a13147c2 100644 --- a/examples/simple_regional_private_beta/outputs.tf +++ b/examples/simple_regional_private_beta/outputs.tf @@ -29,7 +29,7 @@ output "ca_certificate" { } output "service_account" { - description = "The service account to default running nodes as if not overridden in `node_pools`." + description = "The default service account used for running nodes." value = module.gke.service_account } diff --git a/examples/simple_zonal/README.md b/examples/simple_zonal/README.md index 691f95c719..c086ea3a4b 100644 --- a/examples/simple_zonal/README.md +++ b/examples/simple_zonal/README.md @@ -31,7 +31,7 @@ This example illustrates how to create a simple cluster. | network | | | project\_id | | | region | | -| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | +| service\_account | The default service account used for running nodes. | | subnetwork | | | zones | List of zones in which the cluster resides | diff --git a/examples/simple_zonal/outputs.tf b/examples/simple_zonal/outputs.tf index 0d972dcd88..01a13147c2 100644 --- a/examples/simple_zonal/outputs.tf +++ b/examples/simple_zonal/outputs.tf @@ -29,7 +29,7 @@ output "ca_certificate" { } output "service_account" { - description = "The service account to default running nodes as if not overridden in `node_pools`." + description = "The default service account used for running nodes." value = module.gke.service_account } diff --git a/examples/simple_zonal_private/README.md b/examples/simple_zonal_private/README.md index e576800d72..83cb7c575b 100644 --- a/examples/simple_zonal_private/README.md +++ b/examples/simple_zonal_private/README.md @@ -32,7 +32,7 @@ This example illustrates how to create a simple private cluster. | network | | | project\_id | | | region | | -| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | +| service\_account | The default service account used for running nodes. | | subnetwork | | | zones | List of zones in which the cluster resides | diff --git a/examples/simple_zonal_private/outputs.tf b/examples/simple_zonal_private/outputs.tf index 0d972dcd88..01a13147c2 100644 --- a/examples/simple_zonal_private/outputs.tf +++ b/examples/simple_zonal_private/outputs.tf @@ -29,7 +29,7 @@ output "ca_certificate" { } output "service_account" { - description = "The service account to default running nodes as if not overridden in `node_pools`." + description = "The default service account used for running nodes." value = module.gke.service_account } diff --git a/examples/stub_domains/README.md b/examples/stub_domains/README.md index 126a1cd54c..bc4491b880 100644 --- a/examples/stub_domains/README.md +++ b/examples/stub_domains/README.md @@ -36,7 +36,7 @@ It will: | network | | | project\_id | | | region | | -| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | +| service\_account | The default service account used for running nodes. | | subnetwork | | | zones | List of zones in which the cluster resides | diff --git a/examples/stub_domains/outputs.tf b/examples/stub_domains/outputs.tf index 0d972dcd88..01a13147c2 100644 --- a/examples/stub_domains/outputs.tf +++ b/examples/stub_domains/outputs.tf @@ -29,7 +29,7 @@ output "ca_certificate" { } output "service_account" { - description = "The service account to default running nodes as if not overridden in `node_pools`." + description = "The default service account used for running nodes." value = module.gke.service_account } diff --git a/examples/stub_domains_private/README.md b/examples/stub_domains_private/README.md index ee4b89fa7f..205d5fdf76 100644 --- a/examples/stub_domains_private/README.md +++ b/examples/stub_domains_private/README.md @@ -38,7 +38,7 @@ It will: | network | | | project\_id | | | region | | -| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | +| service\_account | The default service account used for running nodes. | | subnetwork | | | zones | List of zones in which the cluster resides | diff --git a/examples/stub_domains_private/outputs.tf b/examples/stub_domains_private/outputs.tf index 0d972dcd88..01a13147c2 100644 --- a/examples/stub_domains_private/outputs.tf +++ b/examples/stub_domains_private/outputs.tf @@ -29,7 +29,7 @@ output "ca_certificate" { } output "service_account" { - description = "The service account to default running nodes as if not overridden in `node_pools`." + description = "The default service account used for running nodes." value = module.gke.service_account } diff --git a/examples/stub_domains_upstream_nameservers/outputs.tf b/examples/stub_domains_upstream_nameservers/outputs.tf index 0d972dcd88..01a13147c2 100644 --- a/examples/stub_domains_upstream_nameservers/outputs.tf +++ b/examples/stub_domains_upstream_nameservers/outputs.tf @@ -29,7 +29,7 @@ output "ca_certificate" { } output "service_account" { - description = "The service account to default running nodes as if not overridden in `node_pools`." + description = "The default service account used for running nodes." value = module.gke.service_account } diff --git a/examples/upstream_nameservers/outputs.tf b/examples/upstream_nameservers/outputs.tf index 0d972dcd88..01a13147c2 100644 --- a/examples/upstream_nameservers/outputs.tf +++ b/examples/upstream_nameservers/outputs.tf @@ -29,7 +29,7 @@ output "ca_certificate" { } output "service_account" { - description = "The service account to default running nodes as if not overridden in `node_pools`." + description = "The default service account used for running nodes." value = module.gke.service_account } diff --git a/examples/workload_metadata_config/outputs.tf b/examples/workload_metadata_config/outputs.tf index 0d972dcd88..01a13147c2 100644 --- a/examples/workload_metadata_config/outputs.tf +++ b/examples/workload_metadata_config/outputs.tf @@ -29,7 +29,7 @@ output "ca_certificate" { } output "service_account" { - description = "The service account to default running nodes as if not overridden in `node_pools`." + description = "The default service account used for running nodes." value = module.gke.service_account } From 98b0690c07883b513dd944b51abf6a7e8459caf6 Mon Sep 17 00:00:00 2001 From: Morgante Pell Date: Thu, 24 Oct 2019 16:29:45 -0400 Subject: [PATCH 69/82] Add #258 --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c78c043f18..d39fb93396 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ Extending the adopted spec, each change should have a link to its corresponding ### Added +* Added ability to skip local-exec provisioners. [#258] * Added [private](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/tree/master/modules/private-cluster-update-variant) and [beta private](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/tree/master/modules/beta-private-cluster-update-variant) variants which allow node pools to be created before being destroyed. [#256] * Add a parameter `registry_project_id` to allow connecting to registries in other projects. [#273] @@ -205,6 +206,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o [v0.3.0]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v0.2.0...v0.3.0 [v0.2.0]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v0.1.0...v0.2.0 +[#258]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/issues/258 [#273]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/273 [#247]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/247 [#256]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/256 From 2438e71a68f118026dd7bcd751a73e31feea30a6 Mon Sep 17 00:00:00 2001 From: Aaron Lane Date: Thu, 24 Oct 2019 16:52:25 -0400 Subject: [PATCH 70/82] Add entry for 5.1.0 to CHANGELOG Add entries: - #282 - #284 - #285 --- CHANGELOG.md | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d39fb93396..5e609c0eca 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,9 +8,7 @@ Extending the adopted spec, each change should have a link to its corresponding ## [Unreleased] -### Changed - -* Made `region` variable optional for zonal clusters [#247] +## [v5.1.0] - 2019-10-24 ### Added @@ -18,6 +16,15 @@ Extending the adopted spec, each change should have a link to its corresponding * Added [private](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/tree/master/modules/private-cluster-update-variant) and [beta private](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/tree/master/modules/beta-private-cluster-update-variant) variants which allow node pools to be created before being destroyed. [#256] * Add a parameter `registry_project_id` to allow connecting to registries in other projects. [#273] +### Changed + +* Made `region` variable optional for zonal clusters. [#247] +* Made default metadata, labels, and tags optional. [#282] + +### Fixed + +* Authenticate gcloud in wait-for-cluster.sh using value of `GOOGLE_APPLICATION_CREDENTIALS`. [#284] [#285] + ## [v5.0.0] - 2019-09-25 v5.0.0 is a backwards-incompatible release. Please see the [upgrading guide](./docs/upgrading_to_v5.0.md). @@ -191,7 +198,8 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o * Initial release of module. -[Unreleased]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v5.0.0...HEAD +[Unreleased]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v5.1.0...HEAD +[v5.1.0]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v5.0.0...v5.1.0 [v5.0.0]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v4.1.0...v5.0.0 [v4.1.0]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v4.0.0...v4.1.0 [v4.0.0]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v3.0.0...v4.0.0 @@ -206,11 +214,14 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o [v0.3.0]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v0.2.0...v0.3.0 [v0.2.0]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v0.1.0...v0.2.0 -[#258]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/issues/258 +[#285]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/285 +[#284]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/284 +[#282]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/282 [#273]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/273 -[#247]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/247 +[#258]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/issues/258 [#256]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/256 [#248]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/248 +[#247]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/247 [#228]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/228 [#238]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/238 [#241]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/241 From ed765c478b01d763b422a1e53a7ebec0034971ab Mon Sep 17 00:00:00 2001 From: bharathkkb Date: Thu, 24 Oct 2019 17:51:57 -0500 Subject: [PATCH 71/82] minor nits, clean fixtures --- .../simple_regional_with_networking/README.md | 14 ++--- .../simple_regional_with_networking/main.tf | 10 ++-- .../outputs.tf | 14 ++++- .../test_outputs.tf | 59 ++++++++++++++++++- .../variables.tf | 10 ++-- .../example.tf | 9 +-- .../outputs.tf | 6 +- .../variables.tf | 20 ------- 8 files changed, 91 insertions(+), 51 deletions(-) mode change 120000 => 100644 examples/simple_regional_with_networking/test_outputs.tf diff --git a/examples/simple_regional_with_networking/README.md b/examples/simple_regional_with_networking/README.md index e24ee546fb..2f9d844a61 100644 --- a/examples/simple_regional_with_networking/README.md +++ b/examples/simple_regional_with_networking/README.md @@ -7,9 +7,9 @@ This example illustrates how to create a VPC and a simple cluster. | Name | Description | Type | Default | Required | |------|-------------|:----:|:-----:|:-----:| -| cluster\_name\_suffix | A suffix to append to the default cluster name | string | `""` | no | -| ip\_range\_pods | The secondary ip range to use for pods | string | `"ip-range-pods"` | no | -| ip\_range\_services | The secondary ip range to use for pods | string | `"ip-range-scv"` | no | +| cluster\_name | The name for the GKE cluster | string | `"gke-on-vpc-cluster"` | no | +| ip\_range\_pods\_name | The secondary ip range to use for pods | string | `"ip-range-pods"` | no | +| ip\_range\_services\_name | The secondary ip range to use for pods | string | `"ip-range-scv"` | no | | network | The VPC network created to host the cluster in | string | `"gke-network"` | no | | project\_id | The project ID to host the cluster in | string | n/a | yes | | region | The region to host the cluster in | string | `"us-central1"` | no | @@ -22,8 +22,8 @@ This example illustrates how to create a VPC and a simple cluster. | ca\_certificate | | | client\_token | | | cluster\_name | Cluster name | -| ip\_range\_pods | The secondary IP range used for pods | -| ip\_range\_services | The secondary IP range used for services | +| ip\_range\_pods\_name | The secondary IP range used for pods | +| ip\_range\_services\_name | The secondary IP range used for services | | kubernetes\_endpoint | | | location | | | master\_kubernetes\_version | The master Kubernetes version | @@ -31,8 +31,8 @@ This example illustrates how to create a VPC and a simple cluster. | network\_name | The name of the VPC being created | | project\_id | | | region | | -| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | -| subnet\_names | The name of the subnet being created | +| service\_account | The default service account used for running nodes. | +| subnet\_name | The name of the subnet being created | | subnet\_secondary\_ranges | The secondary ranges associated with the subnet | | subnetwork | | | zones | List of zones in which the cluster resides | diff --git a/examples/simple_regional_with_networking/main.tf b/examples/simple_regional_with_networking/main.tf index fc53436f2d..7789d73cdd 100644 --- a/examples/simple_regional_with_networking/main.tf +++ b/examples/simple_regional_with_networking/main.tf @@ -36,11 +36,11 @@ module "gcp-network" { secondary_ranges = { "${var.subnetwork}" = [ { - range_name = var.ip_range_pods + range_name = var.ip_range_pods_name ip_cidr_range = "192.168.0.0/18" }, { - range_name = var.ip_range_services + range_name = var.ip_range_services_name ip_cidr_range = "192.168.64.0/18" }, ] @@ -50,13 +50,13 @@ module "gcp-network" { module "gke" { source = "../../" project_id = var.project_id - name = "simple-regional-cluster-${var.cluster_name_suffix}" + name = var.cluster_name regional = true region = var.region network = module.gcp-network.network_name subnetwork = module.gcp-network.subnets_names[0] - ip_range_pods = var.ip_range_pods - ip_range_services = var.ip_range_services + ip_range_pods = var.ip_range_pods_name + ip_range_services = var.ip_range_services_name create_service_account = true } diff --git a/examples/simple_regional_with_networking/outputs.tf b/examples/simple_regional_with_networking/outputs.tf index 6cf2ab5eab..4bd320aae8 100644 --- a/examples/simple_regional_with_networking/outputs.tf +++ b/examples/simple_regional_with_networking/outputs.tf @@ -29,21 +29,29 @@ output "ca_certificate" { } output "service_account" { - description = "The service account to default running nodes as if not overridden in `node_pools`." + description = "The default service account used for running nodes." value = module.gke.service_account } +output "cluster_name" { + description = "Cluster name" + value = module.gke.name +} + output "network_name" { description = "The name of the VPC being created" value = module.gcp-network.network_name } -output "subnet_names" { +output "subnet_name" { description = "The name of the subnet being created" - value = module.gcp-network.subnets_names + value = module.gcp-network.subnets_name } output "subnet_secondary_ranges" { description = "The secondary ranges associated with the subnet" value = module.gcp-network.subnets_secondary_ranges } + + + diff --git a/examples/simple_regional_with_networking/test_outputs.tf b/examples/simple_regional_with_networking/test_outputs.tf deleted file mode 120000 index 17b34213ba..0000000000 --- a/examples/simple_regional_with_networking/test_outputs.tf +++ /dev/null @@ -1 +0,0 @@ -../../test/fixtures/all_examples/test_outputs.tf \ No newline at end of file diff --git a/examples/simple_regional_with_networking/test_outputs.tf b/examples/simple_regional_with_networking/test_outputs.tf new file mode 100644 index 0000000000..a703679105 --- /dev/null +++ b/examples/simple_regional_with_networking/test_outputs.tf @@ -0,0 +1,58 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// These outputs are used to test the module with kitchen-terraform +// They do not need to be included in real-world uses of this module + +output "project_id" { + value = var.project_id +} + +output "region" { + value = module.gke.region +} + +output "network" { + value = var.network +} + +output "subnetwork" { + value = var.subnetwork +} + +output "location" { + value = module.gke.location +} + +output "ip_range_pods_name" { + description = "The secondary IP range used for pods" + value = var.ip_range_pods_name +} + +output "ip_range_services_name" { + description = "The secondary IP range used for services" + value = var.ip_range_services_name +} + +output "zones" { + description = "List of zones in which the cluster resides" + value = module.gke.zones +} + +output "master_kubernetes_version" { + description = "The master Kubernetes version" + value = module.gke.master_version +} diff --git a/examples/simple_regional_with_networking/variables.tf b/examples/simple_regional_with_networking/variables.tf index e9045a5c0d..8e9c0688de 100644 --- a/examples/simple_regional_with_networking/variables.tf +++ b/examples/simple_regional_with_networking/variables.tf @@ -18,9 +18,9 @@ variable "project_id" { description = "The project ID to host the cluster in" } -variable "cluster_name_suffix" { - description = "A suffix to append to the default cluster name" - default = "" +variable "cluster_name" { + description = "The name for the GKE cluster" + default = "gke-on-vpc-cluster" } variable "region" { @@ -38,12 +38,12 @@ variable "subnetwork" { default = "gke-subnet" } -variable "ip_range_pods" { +variable "ip_range_pods_name" { description = "The secondary ip range to use for pods" default = "ip-range-pods" } -variable "ip_range_services" { +variable "ip_range_services_name" { description = "The secondary ip range to use for pods" default = "ip-range-scv" } diff --git a/test/fixtures/simple_regional_with_networking/example.tf b/test/fixtures/simple_regional_with_networking/example.tf index 59f6c27ba2..ace90efb3d 100644 --- a/test/fixtures/simple_regional_with_networking/example.tf +++ b/test/fixtures/simple_regional_with_networking/example.tf @@ -23,11 +23,6 @@ resource "random_string" "suffix" { module "example" { source = "../../../examples/simple_regional_with_networking" - project_id = var.project_id - cluster_name_suffix = "-${random_string.suffix.result}" - region = var.region - network = "${var.network}-${random_string.suffix.result}" - subnetwork = "${var.subnetwork}-${random_string.suffix.result}" - ip_range_pods = "${var.ip_range_pods}-${random_string.suffix.result}" - ip_range_services = "${var.ip_range_services}-${random_string.suffix.result}" + project_id = var.project_id + region = var.region } diff --git a/test/fixtures/simple_regional_with_networking/outputs.tf b/test/fixtures/simple_regional_with_networking/outputs.tf index 2a13237983..08f9a8a2e8 100644 --- a/test/fixtures/simple_regional_with_networking/outputs.tf +++ b/test/fixtures/simple_regional_with_networking/outputs.tf @@ -43,7 +43,7 @@ output "ca_certificate" { } output "service_account" { - description = "The service account to default running nodes as if not overridden in `node_pools`." + description = "The default service account used for running nodes." value = module.example.service_account } @@ -64,10 +64,10 @@ output "region" { output "ip_range_pods_name" { description = "The secondary range name for pods" - value = module.example.ip_range_pods + value = module.example.ip_range_pods_name } output "ip_range_services_name" { description = "The secondary range name for services" - value = module.example.ip_range_services + value = module.example.ip_range_services_name } diff --git a/test/fixtures/simple_regional_with_networking/variables.tf b/test/fixtures/simple_regional_with_networking/variables.tf index 989451fe32..e9310a56c5 100644 --- a/test/fixtures/simple_regional_with_networking/variables.tf +++ b/test/fixtures/simple_regional_with_networking/variables.tf @@ -22,23 +22,3 @@ variable "region" { description = "The region to host the cluster in" default = "us-east4" } - -variable "network" { - description = "The VPC network created to host the cluster in" - default = "gke-network" -} - -variable "subnetwork" { - description = "The subnetwork created to host the cluster in" - default = "gke-subnet" -} - -variable "ip_range_pods" { - description = "The secondary ip range to use for pods" - default = "ip-range-pods" -} - -variable "ip_range_services" { - description = "The secondary ip range to use for pods" - default = "ip-range-scv" -} From 7e21e69017d01a2c48be84d33090f34323cb7bf4 Mon Sep 17 00:00:00 2001 From: bharathkkb Date: Thu, 24 Oct 2019 19:40:12 -0500 Subject: [PATCH 72/82] typo --- examples/simple_regional_with_networking/outputs.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/simple_regional_with_networking/outputs.tf b/examples/simple_regional_with_networking/outputs.tf index 4bd320aae8..a26f0275e0 100644 --- a/examples/simple_regional_with_networking/outputs.tf +++ b/examples/simple_regional_with_networking/outputs.tf @@ -45,7 +45,7 @@ output "network_name" { output "subnet_name" { description = "The name of the subnet being created" - value = module.gcp-network.subnets_name + value = module.gcp-network.subnets_names } output "subnet_secondary_ranges" { From ef865736b878a0d9a595645b09831858de7ce8b6 Mon Sep 17 00:00:00 2001 From: bharathkkb Date: Thu, 24 Oct 2019 01:46:46 -0500 Subject: [PATCH 73/82] fix svc account description --- examples/deploy_service/README.md | 2 +- examples/deploy_service/outputs.tf | 2 +- examples/disable_client_cert/README.md | 2 +- examples/disable_client_cert/outputs.tf | 2 +- examples/node_pool/README.md | 2 +- examples/node_pool/outputs.tf | 2 +- examples/shared_vpc/README.md | 2 +- examples/shared_vpc/outputs.tf | 2 +- examples/simple_regional/README.md | 2 +- examples/simple_regional/outputs.tf | 2 +- examples/simple_regional_beta/outputs.tf | 2 +- examples/simple_regional_private/README.md | 2 +- examples/simple_regional_private/outputs.tf | 2 +- examples/simple_regional_private_beta/outputs.tf | 2 +- examples/simple_zonal/README.md | 2 +- examples/simple_zonal/outputs.tf | 2 +- examples/simple_zonal_private/README.md | 2 +- examples/simple_zonal_private/outputs.tf | 2 +- examples/stub_domains/README.md | 2 +- examples/stub_domains/outputs.tf | 2 +- examples/stub_domains_private/README.md | 2 +- examples/stub_domains_private/outputs.tf | 2 +- examples/stub_domains_upstream_nameservers/outputs.tf | 2 +- examples/upstream_nameservers/outputs.tf | 2 +- examples/workload_metadata_config/outputs.tf | 2 +- 25 files changed, 25 insertions(+), 25 deletions(-) diff --git a/examples/deploy_service/README.md b/examples/deploy_service/README.md index 5dcb7ca7a7..e13981c450 100644 --- a/examples/deploy_service/README.md +++ b/examples/deploy_service/README.md @@ -37,7 +37,7 @@ It will: | network | | | project\_id | | | region | | -| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | +| service\_account | The default service account used for running nodes. | | subnetwork | | | zones | List of zones in which the cluster resides | diff --git a/examples/deploy_service/outputs.tf b/examples/deploy_service/outputs.tf index 0d972dcd88..01a13147c2 100644 --- a/examples/deploy_service/outputs.tf +++ b/examples/deploy_service/outputs.tf @@ -29,7 +29,7 @@ output "ca_certificate" { } output "service_account" { - description = "The service account to default running nodes as if not overridden in `node_pools`." + description = "The default service account used for running nodes." value = module.gke.service_account } diff --git a/examples/disable_client_cert/README.md b/examples/disable_client_cert/README.md index 14dd6545c0..2f531b9906 100644 --- a/examples/disable_client_cert/README.md +++ b/examples/disable_client_cert/README.md @@ -36,7 +36,7 @@ This example illustrates how to create a simple cluster and disable deprecated s | network | | | project\_id | | | region | | -| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | +| service\_account | The default service account used for running nodes. | | subnetwork | | | zones | List of zones in which the cluster resides | diff --git a/examples/disable_client_cert/outputs.tf b/examples/disable_client_cert/outputs.tf index 0d972dcd88..01a13147c2 100644 --- a/examples/disable_client_cert/outputs.tf +++ b/examples/disable_client_cert/outputs.tf @@ -29,7 +29,7 @@ output "ca_certificate" { } output "service_account" { - description = "The service account to default running nodes as if not overridden in `node_pools`." + description = "The default service account used for running nodes." value = module.gke.service_account } diff --git a/examples/node_pool/README.md b/examples/node_pool/README.md index 9215f091cb..237b3f0b6f 100644 --- a/examples/node_pool/README.md +++ b/examples/node_pool/README.md @@ -32,7 +32,7 @@ This example illustrates how to create a cluster with multiple custom node-pool | network | | | project\_id | | | region | | -| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | +| service\_account | The default service account used for running nodes. | | subnetwork | | | zones | List of zones in which the cluster resides | diff --git a/examples/node_pool/outputs.tf b/examples/node_pool/outputs.tf index 0d972dcd88..01a13147c2 100644 --- a/examples/node_pool/outputs.tf +++ b/examples/node_pool/outputs.tf @@ -29,7 +29,7 @@ output "ca_certificate" { } output "service_account" { - description = "The service account to default running nodes as if not overridden in `node_pools`." + description = "The default service account used for running nodes." value = module.gke.service_account } diff --git a/examples/shared_vpc/README.md b/examples/shared_vpc/README.md index 3b0f5a6157..964a346349 100644 --- a/examples/shared_vpc/README.md +++ b/examples/shared_vpc/README.md @@ -32,7 +32,7 @@ This example illustrates how to create a simple cluster where the host network i | network | | | project\_id | | | region | | -| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | +| service\_account | The default service account used for running nodes. | | subnetwork | | | zones | List of zones in which the cluster resides | diff --git a/examples/shared_vpc/outputs.tf b/examples/shared_vpc/outputs.tf index 0d972dcd88..01a13147c2 100644 --- a/examples/shared_vpc/outputs.tf +++ b/examples/shared_vpc/outputs.tf @@ -29,7 +29,7 @@ output "ca_certificate" { } output "service_account" { - description = "The service account to default running nodes as if not overridden in `node_pools`." + description = "The default service account used for running nodes." value = module.gke.service_account } diff --git a/examples/simple_regional/README.md b/examples/simple_regional/README.md index 1f0a187b6b..4950b21e9a 100644 --- a/examples/simple_regional/README.md +++ b/examples/simple_regional/README.md @@ -32,7 +32,7 @@ This example illustrates how to create a simple cluster. | network | | | project\_id | | | region | | -| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | +| service\_account | The default service account used for running nodes. | | subnetwork | | | zones | List of zones in which the cluster resides | diff --git a/examples/simple_regional/outputs.tf b/examples/simple_regional/outputs.tf index 0d972dcd88..01a13147c2 100644 --- a/examples/simple_regional/outputs.tf +++ b/examples/simple_regional/outputs.tf @@ -29,7 +29,7 @@ output "ca_certificate" { } output "service_account" { - description = "The service account to default running nodes as if not overridden in `node_pools`." + description = "The default service account used for running nodes." value = module.gke.service_account } diff --git a/examples/simple_regional_beta/outputs.tf b/examples/simple_regional_beta/outputs.tf index ad152e186c..0d770aa809 100644 --- a/examples/simple_regional_beta/outputs.tf +++ b/examples/simple_regional_beta/outputs.tf @@ -29,6 +29,6 @@ output "ca_certificate" { } output "service_account" { - description = "The service account to default running nodes as if not overridden in `node_pools`." + description = "The default service account used for running nodes." value = module.gke.service_account } diff --git a/examples/simple_regional_private/README.md b/examples/simple_regional_private/README.md index 8175482731..917c097951 100644 --- a/examples/simple_regional_private/README.md +++ b/examples/simple_regional_private/README.md @@ -31,7 +31,7 @@ This example illustrates how to create a simple private cluster. | network | | | project\_id | | | region | | -| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | +| service\_account | The default service account used for running nodes. | | subnetwork | | | zones | List of zones in which the cluster resides | diff --git a/examples/simple_regional_private/outputs.tf b/examples/simple_regional_private/outputs.tf index 0d972dcd88..01a13147c2 100644 --- a/examples/simple_regional_private/outputs.tf +++ b/examples/simple_regional_private/outputs.tf @@ -29,7 +29,7 @@ output "ca_certificate" { } output "service_account" { - description = "The service account to default running nodes as if not overridden in `node_pools`." + description = "The default service account used for running nodes." value = module.gke.service_account } diff --git a/examples/simple_regional_private_beta/outputs.tf b/examples/simple_regional_private_beta/outputs.tf index 0d972dcd88..01a13147c2 100644 --- a/examples/simple_regional_private_beta/outputs.tf +++ b/examples/simple_regional_private_beta/outputs.tf @@ -29,7 +29,7 @@ output "ca_certificate" { } output "service_account" { - description = "The service account to default running nodes as if not overridden in `node_pools`." + description = "The default service account used for running nodes." value = module.gke.service_account } diff --git a/examples/simple_zonal/README.md b/examples/simple_zonal/README.md index 691f95c719..c086ea3a4b 100644 --- a/examples/simple_zonal/README.md +++ b/examples/simple_zonal/README.md @@ -31,7 +31,7 @@ This example illustrates how to create a simple cluster. | network | | | project\_id | | | region | | -| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | +| service\_account | The default service account used for running nodes. | | subnetwork | | | zones | List of zones in which the cluster resides | diff --git a/examples/simple_zonal/outputs.tf b/examples/simple_zonal/outputs.tf index 0d972dcd88..01a13147c2 100644 --- a/examples/simple_zonal/outputs.tf +++ b/examples/simple_zonal/outputs.tf @@ -29,7 +29,7 @@ output "ca_certificate" { } output "service_account" { - description = "The service account to default running nodes as if not overridden in `node_pools`." + description = "The default service account used for running nodes." value = module.gke.service_account } diff --git a/examples/simple_zonal_private/README.md b/examples/simple_zonal_private/README.md index e576800d72..83cb7c575b 100644 --- a/examples/simple_zonal_private/README.md +++ b/examples/simple_zonal_private/README.md @@ -32,7 +32,7 @@ This example illustrates how to create a simple private cluster. | network | | | project\_id | | | region | | -| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | +| service\_account | The default service account used for running nodes. | | subnetwork | | | zones | List of zones in which the cluster resides | diff --git a/examples/simple_zonal_private/outputs.tf b/examples/simple_zonal_private/outputs.tf index 0d972dcd88..01a13147c2 100644 --- a/examples/simple_zonal_private/outputs.tf +++ b/examples/simple_zonal_private/outputs.tf @@ -29,7 +29,7 @@ output "ca_certificate" { } output "service_account" { - description = "The service account to default running nodes as if not overridden in `node_pools`." + description = "The default service account used for running nodes." value = module.gke.service_account } diff --git a/examples/stub_domains/README.md b/examples/stub_domains/README.md index 126a1cd54c..bc4491b880 100644 --- a/examples/stub_domains/README.md +++ b/examples/stub_domains/README.md @@ -36,7 +36,7 @@ It will: | network | | | project\_id | | | region | | -| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | +| service\_account | The default service account used for running nodes. | | subnetwork | | | zones | List of zones in which the cluster resides | diff --git a/examples/stub_domains/outputs.tf b/examples/stub_domains/outputs.tf index 0d972dcd88..01a13147c2 100644 --- a/examples/stub_domains/outputs.tf +++ b/examples/stub_domains/outputs.tf @@ -29,7 +29,7 @@ output "ca_certificate" { } output "service_account" { - description = "The service account to default running nodes as if not overridden in `node_pools`." + description = "The default service account used for running nodes." value = module.gke.service_account } diff --git a/examples/stub_domains_private/README.md b/examples/stub_domains_private/README.md index ee4b89fa7f..205d5fdf76 100644 --- a/examples/stub_domains_private/README.md +++ b/examples/stub_domains_private/README.md @@ -38,7 +38,7 @@ It will: | network | | | project\_id | | | region | | -| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | +| service\_account | The default service account used for running nodes. | | subnetwork | | | zones | List of zones in which the cluster resides | diff --git a/examples/stub_domains_private/outputs.tf b/examples/stub_domains_private/outputs.tf index 0d972dcd88..01a13147c2 100644 --- a/examples/stub_domains_private/outputs.tf +++ b/examples/stub_domains_private/outputs.tf @@ -29,7 +29,7 @@ output "ca_certificate" { } output "service_account" { - description = "The service account to default running nodes as if not overridden in `node_pools`." + description = "The default service account used for running nodes." value = module.gke.service_account } diff --git a/examples/stub_domains_upstream_nameservers/outputs.tf b/examples/stub_domains_upstream_nameservers/outputs.tf index 0d972dcd88..01a13147c2 100644 --- a/examples/stub_domains_upstream_nameservers/outputs.tf +++ b/examples/stub_domains_upstream_nameservers/outputs.tf @@ -29,7 +29,7 @@ output "ca_certificate" { } output "service_account" { - description = "The service account to default running nodes as if not overridden in `node_pools`." + description = "The default service account used for running nodes." value = module.gke.service_account } diff --git a/examples/upstream_nameservers/outputs.tf b/examples/upstream_nameservers/outputs.tf index 0d972dcd88..01a13147c2 100644 --- a/examples/upstream_nameservers/outputs.tf +++ b/examples/upstream_nameservers/outputs.tf @@ -29,7 +29,7 @@ output "ca_certificate" { } output "service_account" { - description = "The service account to default running nodes as if not overridden in `node_pools`." + description = "The default service account used for running nodes." value = module.gke.service_account } diff --git a/examples/workload_metadata_config/outputs.tf b/examples/workload_metadata_config/outputs.tf index 0d972dcd88..01a13147c2 100644 --- a/examples/workload_metadata_config/outputs.tf +++ b/examples/workload_metadata_config/outputs.tf @@ -29,7 +29,7 @@ output "ca_certificate" { } output "service_account" { - description = "The service account to default running nodes as if not overridden in `node_pools`." + description = "The default service account used for running nodes." value = module.gke.service_account } From 934fd12c252f2ea72587b116ec22de56a4f586e1 Mon Sep 17 00:00:00 2001 From: Chris Sng Date: Fri, 25 Oct 2019 11:42:19 +0800 Subject: [PATCH 74/82] Fixes to ensure tests can be ran locally --- examples/node_pool/main.tf | 2 +- examples/node_pool_update_variant_beta/main.tf | 2 +- examples/simple_regional_beta/main.tf | 3 +-- examples/simple_regional_private_beta/main.tf | 3 +-- examples/workload_metadata_config/main.tf | 2 +- test/setup/main.tf | 2 ++ 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/examples/node_pool/main.tf b/examples/node_pool/main.tf index 6662bb84ac..c7a7f852ae 100644 --- a/examples/node_pool/main.tf +++ b/examples/node_pool/main.tf @@ -19,7 +19,7 @@ locals { } provider "google-beta" { - version = "~> 2.12.0" + version = "~> 2.18.0" region = var.region } diff --git a/examples/node_pool_update_variant_beta/main.tf b/examples/node_pool_update_variant_beta/main.tf index 373fd59f30..37b595f793 100644 --- a/examples/node_pool_update_variant_beta/main.tf +++ b/examples/node_pool_update_variant_beta/main.tf @@ -19,7 +19,7 @@ locals { } provider "google-beta" { - version = "~> 2.12.0" + version = "~> 2.18.0" credentials = file(var.credentials_path) region = var.region } diff --git a/examples/simple_regional_beta/main.tf b/examples/simple_regional_beta/main.tf index fc95090ede..5eea5e23c7 100644 --- a/examples/simple_regional_beta/main.tf +++ b/examples/simple_regional_beta/main.tf @@ -19,7 +19,7 @@ locals { } provider "google-beta" { - version = "~> 2.12.0" + version = "~> 2.18.0" credentials = file(var.credentials_path) region = var.region } @@ -42,4 +42,3 @@ module "gke" { data "google_client_config" "default" { } - diff --git a/examples/simple_regional_private_beta/main.tf b/examples/simple_regional_private_beta/main.tf index 0ca1873d86..db6c8a8204 100644 --- a/examples/simple_regional_private_beta/main.tf +++ b/examples/simple_regional_private_beta/main.tf @@ -19,7 +19,7 @@ locals { } provider "google-beta" { - version = "~> 2.12.0" + version = "~> 2.18.0" credentials = file(var.credentials_path) region = var.region } @@ -62,4 +62,3 @@ module "gke" { data "google_client_config" "default" { } - diff --git a/examples/workload_metadata_config/main.tf b/examples/workload_metadata_config/main.tf index f9fb25da5b..3d2254c2da 100644 --- a/examples/workload_metadata_config/main.tf +++ b/examples/workload_metadata_config/main.tf @@ -19,7 +19,7 @@ locals { } provider "google-beta" { - version = "~> 2.12.0" + version = "~> 2.18.0" region = var.region } diff --git a/test/setup/main.tf b/test/setup/main.tf index f974c7408e..70e10c46a3 100644 --- a/test/setup/main.tf +++ b/test/setup/main.tf @@ -24,6 +24,8 @@ module "gke-project" { folder_id = var.folder_id billing_account = var.billing_account + auto_create_network = true + activate_apis = [ "bigquery-json.googleapis.com", "cloudkms.googleapis.com", From 8d8451c7112f483a2fc40424f48820688d7c289b Mon Sep 17 00:00:00 2001 From: pp Date: Fri, 25 Oct 2019 11:29:49 +0300 Subject: [PATCH 75/82] Added sandbox-enabled-local to int tests on CI * Fixed lint --- build/int.cloudbuild.yaml | 20 ++++++++++++++++++++ examples/simple_regional_beta/README.md | 2 +- 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/build/int.cloudbuild.yaml b/build/int.cloudbuild.yaml index 379205db45..a4ce07ab4a 100644 --- a/build/int.cloudbuild.yaml +++ b/build/int.cloudbuild.yaml @@ -221,6 +221,26 @@ steps: - verify workload-metadata-config-local name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy workload-metadata-config-local'] +- id: create sandbox-enabled-local + waitFor: + - prepare + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do create sandbox-enabled-local'] +- id: converge sandbox-enabled-local + waitFor: + - create sandbox-enabled-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge sandbox-enabled-local'] +- id: verify sandbox-enabled-local + waitFor: + - converge sandbox-enabled-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify sandbox-enabled-local'] +- id: destroy sandbox-enabled-local + waitFor: + - verify sandbox-enabled-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy sandbox-enabled-local'] tags: - 'ci' - 'integration' diff --git a/examples/simple_regional_beta/README.md b/examples/simple_regional_beta/README.md index 02d0dba224..72bb221d9f 100644 --- a/examples/simple_regional_beta/README.md +++ b/examples/simple_regional_beta/README.md @@ -37,7 +37,7 @@ This example illustrates how to create a simple cluster with beta features. | network | | | project\_id | | | region | | -| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | +| service\_account | The default service account used for running nodes. | | subnetwork | | | zones | List of zones in which the cluster resides | From 40a08a085ec0503994189c5e21c41f9d1589208c Mon Sep 17 00:00:00 2001 From: pp Date: Mon, 21 Oct 2019 12:36:06 +0300 Subject: [PATCH 76/82] Updated dev tools docker image tag to 0.4.6 * Fixed lint tests * Updated Makefile * Fixed check_generate test function --- Makefile | 16 ++++++++-------- autogen/scripts/wait-for-cluster.sh | 3 ++- build/int.cloudbuild.yaml | 2 +- build/lint.cloudbuild.yaml | 2 +- .../scripts/wait-for-cluster.sh | 3 ++- .../scripts/wait-for-cluster.sh | 3 ++- .../scripts/wait-for-cluster.sh | 3 ++- .../scripts/wait-for-cluster.sh | 3 ++- .../private-cluster/scripts/wait-for-cluster.sh | 3 ++- scripts/wait-for-cluster.sh | 5 +++-- test/task_helper_functions.sh | 16 ++-------------- 11 files changed, 27 insertions(+), 32 deletions(-) diff --git a/Makefile b/Makefile index 52c9deaf8f..736cad34ce 100644 --- a/Makefile +++ b/Makefile @@ -18,7 +18,7 @@ # Make will use bash instead of sh SHELL := /usr/bin/env bash -DOCKER_TAG_VERSION_DEVELOPER_TOOLS := 0.4.5 +DOCKER_TAG_VERSION_DEVELOPER_TOOLS := 0.4.6 DOCKER_IMAGE_DEVELOPER_TOOLS := cft/developer-tools REGISTRY_URL := gcr.io/cloud-foundation-cicd @@ -27,7 +27,7 @@ REGISTRY_URL := gcr.io/cloud-foundation-cicd docker_run: docker run --rm -it \ -e SERVICE_ACCOUNT_JSON \ - -v $(CURDIR):/workspace \ + -v "$(CURDIR)":/workspace \ $(REGISTRY_URL)/${DOCKER_IMAGE_DEVELOPER_TOOLS}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ /bin/bash @@ -39,7 +39,7 @@ docker_test_prepare: -e TF_VAR_org_id \ -e TF_VAR_folder_id \ -e TF_VAR_billing_account \ - -v $(CURDIR):/workspace \ + -v "$(CURDIR)":/workspace \ $(REGISTRY_URL)/${DOCKER_IMAGE_DEVELOPER_TOOLS}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ /usr/local/bin/execute_with_credentials.sh prepare_environment @@ -51,7 +51,7 @@ docker_test_cleanup: -e TF_VAR_org_id \ -e TF_VAR_folder_id \ -e TF_VAR_billing_account \ - -v $(CURDIR):/workspace \ + -v "$(CURDIR)":/workspace \ $(REGISTRY_URL)/${DOCKER_IMAGE_DEVELOPER_TOOLS}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ /usr/local/bin/execute_with_credentials.sh cleanup_environment @@ -60,7 +60,7 @@ docker_test_cleanup: docker_test_integration: docker run --rm -it \ -e SERVICE_ACCOUNT_JSON \ - -v $(CURDIR):/workspace \ + -v "$(CURDIR)":/workspace \ $(REGISTRY_URL)/${DOCKER_IMAGE_DEVELOPER_TOOLS}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ /usr/local/bin/test_integration.sh @@ -68,7 +68,7 @@ docker_test_integration: .PHONY: docker_test_lint docker_test_lint: docker run --rm -it \ - -v $(CURDIR):/workspace \ + -v "$(CURDIR)":/workspace \ $(REGISTRY_URL)/${DOCKER_IMAGE_DEVELOPER_TOOLS}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ /usr/local/bin/test_lint.sh @@ -76,7 +76,7 @@ docker_test_lint: .PHONY: docker_generate_docs docker_generate_docs: docker run --rm -it \ - -v $(CURDIR):/workspace \ + -v "$(CURDIR)":/workspace \ $(REGISTRY_URL)/${DOCKER_IMAGE_DEVELOPER_TOOLS}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ /bin/bash -c 'source /usr/local/bin/task_helper_functions.sh && generate_docs' @@ -84,7 +84,7 @@ docker_generate_docs: .PHONY: docker_generate docker_generate: docker run --rm -it \ - -v $(CURDIR):/workspace \ + -v "$(CURDIR)":/workspace \ $(REGISTRY_URL)/${DOCKER_IMAGE_DEVELOPER_TOOLS}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ /bin/bash -c 'source /usr/local/bin/task_helper_functions.sh && generate' diff --git a/autogen/scripts/wait-for-cluster.sh b/autogen/scripts/wait-for-cluster.sh index 7bf9919b8f..b7019eace1 100755 --- a/autogen/scripts/wait-for-cluster.sh +++ b/autogen/scripts/wait-for-cluster.sh @@ -15,8 +15,9 @@ set -e +# shellcheck disable=SC2034 if [ -n "${GOOGLE_APPLICATION_CREDENTIALS}" ]; then - export CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE=${GOOGLE_APPLICATION_CREDENTIALS} + export CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE="${GOOGLE_APPLICATION_CREDENTIALS}" fi PROJECT=$1 diff --git a/build/int.cloudbuild.yaml b/build/int.cloudbuild.yaml index 379205db45..4d77c4b1f1 100644 --- a/build/int.cloudbuild.yaml +++ b/build/int.cloudbuild.yaml @@ -226,4 +226,4 @@ tags: - 'integration' substitutions: _DOCKER_IMAGE_DEVELOPER_TOOLS: 'cft/developer-tools' - _DOCKER_TAG_VERSION_DEVELOPER_TOOLS: '0.4.5' + _DOCKER_TAG_VERSION_DEVELOPER_TOOLS: '0.4.6' diff --git a/build/lint.cloudbuild.yaml b/build/lint.cloudbuild.yaml index d5b2622e4f..7ba0827bdb 100644 --- a/build/lint.cloudbuild.yaml +++ b/build/lint.cloudbuild.yaml @@ -24,4 +24,4 @@ tags: - 'lint' substitutions: _DOCKER_IMAGE_DEVELOPER_TOOLS: 'cft/developer-tools' - _DOCKER_TAG_VERSION_DEVELOPER_TOOLS: '0.4.5' + _DOCKER_TAG_VERSION_DEVELOPER_TOOLS: '0.4.6' diff --git a/modules/beta-private-cluster-update-variant/scripts/wait-for-cluster.sh b/modules/beta-private-cluster-update-variant/scripts/wait-for-cluster.sh index 7bf9919b8f..b7019eace1 100755 --- a/modules/beta-private-cluster-update-variant/scripts/wait-for-cluster.sh +++ b/modules/beta-private-cluster-update-variant/scripts/wait-for-cluster.sh @@ -15,8 +15,9 @@ set -e +# shellcheck disable=SC2034 if [ -n "${GOOGLE_APPLICATION_CREDENTIALS}" ]; then - export CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE=${GOOGLE_APPLICATION_CREDENTIALS} + export CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE="${GOOGLE_APPLICATION_CREDENTIALS}" fi PROJECT=$1 diff --git a/modules/beta-private-cluster/scripts/wait-for-cluster.sh b/modules/beta-private-cluster/scripts/wait-for-cluster.sh index 7bf9919b8f..b7019eace1 100755 --- a/modules/beta-private-cluster/scripts/wait-for-cluster.sh +++ b/modules/beta-private-cluster/scripts/wait-for-cluster.sh @@ -15,8 +15,9 @@ set -e +# shellcheck disable=SC2034 if [ -n "${GOOGLE_APPLICATION_CREDENTIALS}" ]; then - export CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE=${GOOGLE_APPLICATION_CREDENTIALS} + export CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE="${GOOGLE_APPLICATION_CREDENTIALS}" fi PROJECT=$1 diff --git a/modules/beta-public-cluster/scripts/wait-for-cluster.sh b/modules/beta-public-cluster/scripts/wait-for-cluster.sh index 7bf9919b8f..b7019eace1 100755 --- a/modules/beta-public-cluster/scripts/wait-for-cluster.sh +++ b/modules/beta-public-cluster/scripts/wait-for-cluster.sh @@ -15,8 +15,9 @@ set -e +# shellcheck disable=SC2034 if [ -n "${GOOGLE_APPLICATION_CREDENTIALS}" ]; then - export CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE=${GOOGLE_APPLICATION_CREDENTIALS} + export CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE="${GOOGLE_APPLICATION_CREDENTIALS}" fi PROJECT=$1 diff --git a/modules/private-cluster-update-variant/scripts/wait-for-cluster.sh b/modules/private-cluster-update-variant/scripts/wait-for-cluster.sh index 7bf9919b8f..b7019eace1 100755 --- a/modules/private-cluster-update-variant/scripts/wait-for-cluster.sh +++ b/modules/private-cluster-update-variant/scripts/wait-for-cluster.sh @@ -15,8 +15,9 @@ set -e +# shellcheck disable=SC2034 if [ -n "${GOOGLE_APPLICATION_CREDENTIALS}" ]; then - export CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE=${GOOGLE_APPLICATION_CREDENTIALS} + export CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE="${GOOGLE_APPLICATION_CREDENTIALS}" fi PROJECT=$1 diff --git a/modules/private-cluster/scripts/wait-for-cluster.sh b/modules/private-cluster/scripts/wait-for-cluster.sh index 7bf9919b8f..b7019eace1 100755 --- a/modules/private-cluster/scripts/wait-for-cluster.sh +++ b/modules/private-cluster/scripts/wait-for-cluster.sh @@ -15,8 +15,9 @@ set -e +# shellcheck disable=SC2034 if [ -n "${GOOGLE_APPLICATION_CREDENTIALS}" ]; then - export CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE=${GOOGLE_APPLICATION_CREDENTIALS} + export CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE="${GOOGLE_APPLICATION_CREDENTIALS}" fi PROJECT=$1 diff --git a/scripts/wait-for-cluster.sh b/scripts/wait-for-cluster.sh index 42c9841fec..b7019eace1 100755 --- a/scripts/wait-for-cluster.sh +++ b/scripts/wait-for-cluster.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,8 +15,9 @@ set -e +# shellcheck disable=SC2034 if [ -n "${GOOGLE_APPLICATION_CREDENTIALS}" ]; then - export CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE=${GOOGLE_APPLICATION_CREDENTIALS} + export CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE="${GOOGLE_APPLICATION_CREDENTIALS}" fi PROJECT=$1 diff --git a/test/task_helper_functions.sh b/test/task_helper_functions.sh index 70ab3db5c8..7de28bbdfe 100755 --- a/test/task_helper_functions.sh +++ b/test/task_helper_functions.sh @@ -32,8 +32,9 @@ function check_generate() { --exclude '*/.kitchen' \ --exclude '*/.git' \ /workspace "${tempdir}" >/dev/null 2>/dev/null - cd "${tempdir}" || exit 1 + cd "${tempdir}/workspace" || exit 1 generate >/dev/null 2>/dev/null + generate_docs >/dev/null 2>/dev/null diff -r \ --exclude=".terraform" \ --exclude=".kitchen" \ @@ -49,16 +50,3 @@ function check_generate() { rm -Rf "${tempdir}" return $((rval)) } - -find_files() { - local pth="$1" - shift - find "${pth}" '(' \ - -path '*/.git' -o \ - -path '*/.terraform' -o \ - -path '*/.kitchen' -o \ - -path './autogen' -o \ - -path './test/fixtures/all_examples' -o \ - -path './test/fixtures/shared' ')' \ - -prune -o -type f "$@" -} From 3933a794ddb7ca91f6fde1ea9d696b470c7e37c0 Mon Sep 17 00:00:00 2001 From: Morgante Pell Date: Fri, 25 Oct 2019 11:44:44 -0400 Subject: [PATCH 77/82] Update CHANGELOG.md --- CHANGELOG.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5e609c0eca..efdebd5e9e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,10 @@ Extending the adopted spec, each change should have a link to its corresponding ## [Unreleased] +### Fixed + +* Fixed bug with setting up sandboxing on nodes. [#286] + ## [v5.1.0] - 2019-10-24 ### Added @@ -214,6 +218,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o [v0.3.0]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v0.2.0...v0.3.0 [v0.2.0]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v0.1.0...v0.2.0 +[#286]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/286 [#285]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/285 [#284]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/284 [#282]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/282 From ab02f24887a8e8adffbea589f732b5a15d34af93 Mon Sep 17 00:00:00 2001 From: Aaron Lane Date: Fri, 25 Oct 2019 14:07:19 -0400 Subject: [PATCH 78/82] Add 5.1.1 to CHANGELOG --- CHANGELOG.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index efdebd5e9e..cf6ef22df2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,8 @@ Extending the adopted spec, each change should have a link to its corresponding ## [Unreleased] +## [v5.1.1] - 2019-10-25 + ### Fixed * Fixed bug with setting up sandboxing on nodes. [#286] @@ -202,7 +204,8 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o * Initial release of module. -[Unreleased]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v5.1.0...HEAD +[Unreleased]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v5.1.1...HEAD +[v5.1.1]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v5.1.0...v5.1.1 [v5.1.0]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v5.0.0...v5.1.0 [v5.0.0]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v4.1.0...v5.0.0 [v4.1.0]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v4.0.0...v4.1.0 From 34197c263e23f18096e931aad133d5aaad734020 Mon Sep 17 00:00:00 2001 From: bharathkkb Date: Sat, 26 Oct 2019 13:16:17 -0500 Subject: [PATCH 79/82] add desc, new ci steps, unused resources --- build/int.cloudbuild.yaml | 20 +++++++++++++++++++ .../simple_regional_with_networking/README.md | 6 +++--- .../simple_regional_with_networking/main.tf | 5 ----- .../outputs.tf | 13 +++++++----- .../example.tf | 6 ------ 5 files changed, 31 insertions(+), 19 deletions(-) diff --git a/build/int.cloudbuild.yaml b/build/int.cloudbuild.yaml index a4ce07ab4a..8bf89efe09 100644 --- a/build/int.cloudbuild.yaml +++ b/build/int.cloudbuild.yaml @@ -101,6 +101,26 @@ steps: - verify simple-regional-private-local name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy simple-regional-private-local'] +- id: create simple-regional-with-networking-local + waitFor: + - prepare + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do create simple-regional-with-networking-local'] +- id: converge simple-regional-with-networking-local + waitFor: + - create simple-regional-with-networking-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge simple-regional-with-networking-local'] +- id: verify simple-regional-with-networking-local + waitFor: + - converge simple-regional-with-networking-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify simple-regional-with-networking-local'] +- id: destroy simple-regional-with-networking-local + waitFor: + - verify simple-regional-with-networking-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy simple-regional-with-networking-local'] - id: create simple-zonal-local waitFor: - prepare diff --git a/examples/simple_regional_with_networking/README.md b/examples/simple_regional_with_networking/README.md index 2f9d844a61..8ef0dad0ee 100644 --- a/examples/simple_regional_with_networking/README.md +++ b/examples/simple_regional_with_networking/README.md @@ -19,12 +19,12 @@ This example illustrates how to create a VPC and a simple cluster. | Name | Description | |------|-------------| -| ca\_certificate | | -| client\_token | | +| ca\_certificate | The cluster ca certificate (base64 encoded) | +| client\_token | The bearer token for auth | | cluster\_name | Cluster name | | ip\_range\_pods\_name | The secondary IP range used for pods | | ip\_range\_services\_name | The secondary IP range used for services | -| kubernetes\_endpoint | | +| kubernetes\_endpoint | The cluster endpoint | | location | | | master\_kubernetes\_version | The master Kubernetes version | | network | | diff --git a/examples/simple_regional_with_networking/main.tf b/examples/simple_regional_with_networking/main.tf index 7789d73cdd..7b39615114 100644 --- a/examples/simple_regional_with_networking/main.tf +++ b/examples/simple_regional_with_networking/main.tf @@ -14,11 +14,6 @@ * limitations under the License. */ -provider "google" { - version = "~> 2.12.0" - region = var.region -} - module "gcp-network" { source = "terraform-google-modules/network/google" version = "~> 1.4.0" diff --git a/examples/simple_regional_with_networking/outputs.tf b/examples/simple_regional_with_networking/outputs.tf index a26f0275e0..bb255b54a2 100644 --- a/examples/simple_regional_with_networking/outputs.tf +++ b/examples/simple_regional_with_networking/outputs.tf @@ -15,17 +15,20 @@ */ output "kubernetes_endpoint" { - sensitive = true - value = module.gke.endpoint + description = "The cluster endpoint" + sensitive = true + value = module.gke.endpoint } output "client_token" { - sensitive = true - value = base64encode(data.google_client_config.default.access_token) + description = "The bearer token for auth" + sensitive = true + value = base64encode(data.google_client_config.default.access_token) } output "ca_certificate" { - value = module.gke.ca_certificate + description = "The cluster ca certificate (base64 encoded)" + value = module.gke.ca_certificate } output "service_account" { diff --git a/test/fixtures/simple_regional_with_networking/example.tf b/test/fixtures/simple_regional_with_networking/example.tf index ace90efb3d..c7ae5af76c 100644 --- a/test/fixtures/simple_regional_with_networking/example.tf +++ b/test/fixtures/simple_regional_with_networking/example.tf @@ -14,12 +14,6 @@ * limitations under the License. */ -resource "random_string" "suffix" { - length = 4 - special = false - upper = false -} - module "example" { source = "../../../examples/simple_regional_with_networking" From 0fd2555cfee00e8daf87d1903d09b5b5f0b62f53 Mon Sep 17 00:00:00 2001 From: Chris Sng Date: Tue, 29 Oct 2019 07:15:42 +0800 Subject: [PATCH 80/82] fix example --- examples/simple_regional_beta/main.tf | 5 ++--- scripts/wait-for-cluster.sh | 2 +- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/examples/simple_regional_beta/main.tf b/examples/simple_regional_beta/main.tf index c2e29b1298..0863cc51de 100644 --- a/examples/simple_regional_beta/main.tf +++ b/examples/simple_regional_beta/main.tf @@ -19,9 +19,8 @@ locals { } provider "google-beta" { - version = "~> 2.18.0" - credentials = file(var.credentials_path) - region = var.region + version = "~> 2.18.0" + region = var.region } module "gke" { diff --git a/scripts/wait-for-cluster.sh b/scripts/wait-for-cluster.sh index 42c9841fec..7bf9919b8f 100755 --- a/scripts/wait-for-cluster.sh +++ b/scripts/wait-for-cluster.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. From e45d9125c505b27441ad9f974fd80f73ec3827c3 Mon Sep 17 00:00:00 2001 From: Chris Sng Date: Tue, 29 Oct 2019 12:14:03 +0800 Subject: [PATCH 81/82] Remove enable_release_channel --- autogen/cluster.tf | 8 ++------ autogen/main.tf | 4 ---- autogen/outputs.tf | 5 ----- autogen/variables.tf | 6 ------ main.tf | 1 - modules/beta-private-cluster-update-variant/README.md | 2 -- modules/beta-private-cluster-update-variant/cluster.tf | 8 ++------ modules/beta-private-cluster-update-variant/main.tf | 2 -- modules/beta-private-cluster-update-variant/outputs.tf | 5 ----- modules/beta-private-cluster-update-variant/variables.tf | 6 ------ modules/beta-private-cluster/README.md | 2 -- modules/beta-private-cluster/cluster.tf | 8 ++------ modules/beta-private-cluster/main.tf | 2 -- modules/beta-private-cluster/outputs.tf | 5 ----- modules/beta-private-cluster/variables.tf | 6 ------ modules/beta-public-cluster/README.md | 2 -- modules/beta-public-cluster/cluster.tf | 8 ++------ modules/beta-public-cluster/main.tf | 2 -- modules/beta-public-cluster/outputs.tf | 5 ----- modules/beta-public-cluster/variables.tf | 6 ------ modules/private-cluster-update-variant/main.tf | 1 - modules/private-cluster/main.tf | 1 - 22 files changed, 8 insertions(+), 87 deletions(-) diff --git a/autogen/cluster.tf b/autogen/cluster.tf index 23d9a6b5ff..0742fd1523 100644 --- a/autogen/cluster.tf +++ b/autogen/cluster.tf @@ -46,12 +46,8 @@ resource "google_container_cluster" "primary" { } {% if beta_cluster %} - dynamic "release_channel" { - for_each = local.release_channel - - content { - channel = release_channel.value.channel - } + release_channel { + channel = var.release_channel } {% endif %} diff --git a/autogen/main.tf b/autogen/main.tf index 9ed92d9741..afbd7bf8c1 100644 --- a/autogen/main.tf +++ b/autogen/main.tf @@ -48,10 +48,6 @@ locals { node_version_zonal = var.node_version != "" && ! var.regional ? var.node_version : local.master_version_zonal master_version = var.regional ? local.master_version_regional : local.master_version_zonal node_version = var.regional ? local.node_version_regional : local.node_version_zonal -{% if beta_cluster %} - release_channel = var.enable_release_channel ? [{ channel : var.release_channel }] : [] -{% endif %} - custom_kube_dns_config = length(keys(var.stub_domains)) > 0 upstream_nameservers_config = length(var.upstream_nameservers) > 0 diff --git a/autogen/outputs.tf b/autogen/outputs.tf index 2f3e73fbb0..704569d00e 100644 --- a/autogen/outputs.tf +++ b/autogen/outputs.tf @@ -150,11 +150,6 @@ output "vertical_pod_autoscaling_enabled" { value = local.cluster_vertical_pod_autoscaling_enabled } -output "release_channel_enabled" { - description = "Whether release channel is enabled" - value = var.enable_release_channel -} - output "release_channel" { description = "The release channel of this cluster" value = var.release_channel diff --git a/autogen/variables.tf b/autogen/variables.tf index f24605d906..cbc9a9b4d6 100644 --- a/autogen/variables.tf +++ b/autogen/variables.tf @@ -422,12 +422,6 @@ variable "authenticator_security_group" { default = null } -variable "enable_release_channel" { - type = bool - description = "(Beta) Whether release channel is configured for this cluster." - default = false -} - variable "release_channel" { type = string description = "(Beta) The release channel of this cluster. Accepted values are `UNSPECIFIED`, `RAPID`, `REGULAR` and `STABLE`. Defaults to `UNSPECIFIED`." diff --git a/main.tf b/main.tf index 1090227fd8..a9e1c15810 100644 --- a/main.tf +++ b/main.tf @@ -45,7 +45,6 @@ locals { master_version = var.regional ? local.master_version_regional : local.master_version_zonal node_version = var.regional ? local.node_version_regional : local.node_version_zonal - custom_kube_dns_config = length(keys(var.stub_domains)) > 0 upstream_nameservers_config = length(var.upstream_nameservers) > 0 network_project_id = var.network_project_id != "" ? var.network_project_id : var.project_id diff --git a/modules/beta-private-cluster-update-variant/README.md b/modules/beta-private-cluster-update-variant/README.md index bbe27e3964..fda632e3c4 100644 --- a/modules/beta-private-cluster-update-variant/README.md +++ b/modules/beta-private-cluster-update-variant/README.md @@ -153,7 +153,6 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | enable\_intranode\_visibility | Whether Intra-node visibility is enabled for this cluster. This makes same node pod to pod traffic visible for VPC network | bool | `"false"` | no | | enable\_private\_endpoint | (Beta) Whether the master's internal IP address is used as the cluster endpoint | bool | `"false"` | no | | enable\_private\_nodes | (Beta) Whether nodes have internal IP addresses only | bool | `"false"` | no | -| enable\_release\_channel | (Beta) Whether release channel is configured for this cluster. | bool | `"false"` | no | | enable\_vertical\_pod\_autoscaling | Vertical Pod Autoscaling automatically adjusts the resources of pods controlled by it | bool | `"false"` | no | | grant\_registry\_access | Grants created cluster-specific service account storage.objectViewer role. | bool | `"false"` | no | | horizontal\_pod\_autoscaling | Enable horizontal pod autoscaling addon | bool | `"true"` | no | @@ -228,7 +227,6 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | pod\_security\_policy\_enabled | Whether pod security policy is enabled | | region | Cluster region | | release\_channel | The release channel of this cluster | -| release\_channel\_enabled | Whether release channel is enabled | | service\_account | The service account to default running nodes as if not overridden in `node_pools`. | | type | Cluster type (regional / zonal) | | vertical\_pod\_autoscaling\_enabled | Whether veritical pod autoscaling is enabled | diff --git a/modules/beta-private-cluster-update-variant/cluster.tf b/modules/beta-private-cluster-update-variant/cluster.tf index 366280d7b8..8624bdd4ec 100644 --- a/modules/beta-private-cluster-update-variant/cluster.tf +++ b/modules/beta-private-cluster-update-variant/cluster.tf @@ -41,12 +41,8 @@ resource "google_container_cluster" "primary" { } } - dynamic "release_channel" { - for_each = local.release_channel - - content { - channel = release_channel.value.channel - } + release_channel { + channel = var.release_channel } subnetwork = data.google_compute_subnetwork.gke_subnetwork.self_link diff --git a/modules/beta-private-cluster-update-variant/main.tf b/modules/beta-private-cluster-update-variant/main.tf index 760209d6df..63bf31ac78 100644 --- a/modules/beta-private-cluster-update-variant/main.tf +++ b/modules/beta-private-cluster-update-variant/main.tf @@ -44,8 +44,6 @@ locals { node_version_zonal = var.node_version != "" && ! var.regional ? var.node_version : local.master_version_zonal master_version = var.regional ? local.master_version_regional : local.master_version_zonal node_version = var.regional ? local.node_version_regional : local.node_version_zonal - release_channel = var.enable_release_channel ? [{ channel : var.release_channel }] : [] - custom_kube_dns_config = length(keys(var.stub_domains)) > 0 upstream_nameservers_config = length(var.upstream_nameservers) > 0 diff --git a/modules/beta-private-cluster-update-variant/outputs.tf b/modules/beta-private-cluster-update-variant/outputs.tf index 31a22b2175..956c8c2d5d 100644 --- a/modules/beta-private-cluster-update-variant/outputs.tf +++ b/modules/beta-private-cluster-update-variant/outputs.tf @@ -149,11 +149,6 @@ output "vertical_pod_autoscaling_enabled" { value = local.cluster_vertical_pod_autoscaling_enabled } -output "release_channel_enabled" { - description = "Whether release channel is enabled" - value = var.enable_release_channel -} - output "release_channel" { description = "The release channel of this cluster" value = var.release_channel diff --git a/modules/beta-private-cluster-update-variant/variables.tf b/modules/beta-private-cluster-update-variant/variables.tf index 8df35ee512..4d403b2b30 100644 --- a/modules/beta-private-cluster-update-variant/variables.tf +++ b/modules/beta-private-cluster-update-variant/variables.tf @@ -417,12 +417,6 @@ variable "authenticator_security_group" { default = null } -variable "enable_release_channel" { - type = bool - description = "(Beta) Whether release channel is configured for this cluster." - default = false -} - variable "release_channel" { type = string description = "(Beta) The release channel of this cluster. Accepted values are `UNSPECIFIED`, `RAPID`, `REGULAR` and `STABLE`. Defaults to `UNSPECIFIED`." diff --git a/modules/beta-private-cluster/README.md b/modules/beta-private-cluster/README.md index 4d04e587e7..f56c6957ed 100644 --- a/modules/beta-private-cluster/README.md +++ b/modules/beta-private-cluster/README.md @@ -153,7 +153,6 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | enable\_intranode\_visibility | Whether Intra-node visibility is enabled for this cluster. This makes same node pod to pod traffic visible for VPC network | bool | `"false"` | no | | enable\_private\_endpoint | (Beta) Whether the master's internal IP address is used as the cluster endpoint | bool | `"false"` | no | | enable\_private\_nodes | (Beta) Whether nodes have internal IP addresses only | bool | `"false"` | no | -| enable\_release\_channel | (Beta) Whether release channel is configured for this cluster. | bool | `"false"` | no | | enable\_vertical\_pod\_autoscaling | Vertical Pod Autoscaling automatically adjusts the resources of pods controlled by it | bool | `"false"` | no | | grant\_registry\_access | Grants created cluster-specific service account storage.objectViewer role. | bool | `"false"` | no | | horizontal\_pod\_autoscaling | Enable horizontal pod autoscaling addon | bool | `"true"` | no | @@ -228,7 +227,6 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | pod\_security\_policy\_enabled | Whether pod security policy is enabled | | region | Cluster region | | release\_channel | The release channel of this cluster | -| release\_channel\_enabled | Whether release channel is enabled | | service\_account | The service account to default running nodes as if not overridden in `node_pools`. | | type | Cluster type (regional / zonal) | | vertical\_pod\_autoscaling\_enabled | Whether veritical pod autoscaling is enabled | diff --git a/modules/beta-private-cluster/cluster.tf b/modules/beta-private-cluster/cluster.tf index d0cc6d7c20..2c29d8b81f 100644 --- a/modules/beta-private-cluster/cluster.tf +++ b/modules/beta-private-cluster/cluster.tf @@ -41,12 +41,8 @@ resource "google_container_cluster" "primary" { } } - dynamic "release_channel" { - for_each = local.release_channel - - content { - channel = release_channel.value.channel - } + release_channel { + channel = var.release_channel } subnetwork = data.google_compute_subnetwork.gke_subnetwork.self_link diff --git a/modules/beta-private-cluster/main.tf b/modules/beta-private-cluster/main.tf index 760209d6df..63bf31ac78 100644 --- a/modules/beta-private-cluster/main.tf +++ b/modules/beta-private-cluster/main.tf @@ -44,8 +44,6 @@ locals { node_version_zonal = var.node_version != "" && ! var.regional ? var.node_version : local.master_version_zonal master_version = var.regional ? local.master_version_regional : local.master_version_zonal node_version = var.regional ? local.node_version_regional : local.node_version_zonal - release_channel = var.enable_release_channel ? [{ channel : var.release_channel }] : [] - custom_kube_dns_config = length(keys(var.stub_domains)) > 0 upstream_nameservers_config = length(var.upstream_nameservers) > 0 diff --git a/modules/beta-private-cluster/outputs.tf b/modules/beta-private-cluster/outputs.tf index 31a22b2175..956c8c2d5d 100644 --- a/modules/beta-private-cluster/outputs.tf +++ b/modules/beta-private-cluster/outputs.tf @@ -149,11 +149,6 @@ output "vertical_pod_autoscaling_enabled" { value = local.cluster_vertical_pod_autoscaling_enabled } -output "release_channel_enabled" { - description = "Whether release channel is enabled" - value = var.enable_release_channel -} - output "release_channel" { description = "The release channel of this cluster" value = var.release_channel diff --git a/modules/beta-private-cluster/variables.tf b/modules/beta-private-cluster/variables.tf index 8df35ee512..4d403b2b30 100644 --- a/modules/beta-private-cluster/variables.tf +++ b/modules/beta-private-cluster/variables.tf @@ -417,12 +417,6 @@ variable "authenticator_security_group" { default = null } -variable "enable_release_channel" { - type = bool - description = "(Beta) Whether release channel is configured for this cluster." - default = false -} - variable "release_channel" { type = string description = "(Beta) The release channel of this cluster. Accepted values are `UNSPECIFIED`, `RAPID`, `REGULAR` and `STABLE`. Defaults to `UNSPECIFIED`." diff --git a/modules/beta-public-cluster/README.md b/modules/beta-public-cluster/README.md index 81b887dac9..18697cf134 100644 --- a/modules/beta-public-cluster/README.md +++ b/modules/beta-public-cluster/README.md @@ -145,7 +145,6 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | disable\_legacy\_metadata\_endpoints | Disable the /0.1/ and /v1beta1/ metadata server endpoints on the node. Changing this value will cause all node pools to be recreated. | bool | `"true"` | no | | enable\_binary\_authorization | Enable BinAuthZ Admission controller | string | `"false"` | no | | enable\_intranode\_visibility | Whether Intra-node visibility is enabled for this cluster. This makes same node pod to pod traffic visible for VPC network | bool | `"false"` | no | -| enable\_release\_channel | (Beta) Whether release channel is configured for this cluster. | bool | `"false"` | no | | enable\_vertical\_pod\_autoscaling | Vertical Pod Autoscaling automatically adjusts the resources of pods controlled by it | bool | `"false"` | no | | grant\_registry\_access | Grants created cluster-specific service account storage.objectViewer role. | bool | `"false"` | no | | horizontal\_pod\_autoscaling | Enable horizontal pod autoscaling addon | bool | `"true"` | no | @@ -219,7 +218,6 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | pod\_security\_policy\_enabled | Whether pod security policy is enabled | | region | Cluster region | | release\_channel | The release channel of this cluster | -| release\_channel\_enabled | Whether release channel is enabled | | service\_account | The service account to default running nodes as if not overridden in `node_pools`. | | type | Cluster type (regional / zonal) | | vertical\_pod\_autoscaling\_enabled | Whether veritical pod autoscaling is enabled | diff --git a/modules/beta-public-cluster/cluster.tf b/modules/beta-public-cluster/cluster.tf index 304fcc8df3..073affb113 100644 --- a/modules/beta-public-cluster/cluster.tf +++ b/modules/beta-public-cluster/cluster.tf @@ -41,12 +41,8 @@ resource "google_container_cluster" "primary" { } } - dynamic "release_channel" { - for_each = local.release_channel - - content { - channel = release_channel.value.channel - } + release_channel { + channel = var.release_channel } subnetwork = data.google_compute_subnetwork.gke_subnetwork.self_link diff --git a/modules/beta-public-cluster/main.tf b/modules/beta-public-cluster/main.tf index ec0afeda3b..f0477f9498 100644 --- a/modules/beta-public-cluster/main.tf +++ b/modules/beta-public-cluster/main.tf @@ -44,8 +44,6 @@ locals { node_version_zonal = var.node_version != "" && ! var.regional ? var.node_version : local.master_version_zonal master_version = var.regional ? local.master_version_regional : local.master_version_zonal node_version = var.regional ? local.node_version_regional : local.node_version_zonal - release_channel = var.enable_release_channel ? [{ channel : var.release_channel }] : [] - custom_kube_dns_config = length(keys(var.stub_domains)) > 0 upstream_nameservers_config = length(var.upstream_nameservers) > 0 diff --git a/modules/beta-public-cluster/outputs.tf b/modules/beta-public-cluster/outputs.tf index 31a22b2175..956c8c2d5d 100644 --- a/modules/beta-public-cluster/outputs.tf +++ b/modules/beta-public-cluster/outputs.tf @@ -149,11 +149,6 @@ output "vertical_pod_autoscaling_enabled" { value = local.cluster_vertical_pod_autoscaling_enabled } -output "release_channel_enabled" { - description = "Whether release channel is enabled" - value = var.enable_release_channel -} - output "release_channel" { description = "The release channel of this cluster" value = var.release_channel diff --git a/modules/beta-public-cluster/variables.tf b/modules/beta-public-cluster/variables.tf index 9bf971416a..ae89886ea7 100644 --- a/modules/beta-public-cluster/variables.tf +++ b/modules/beta-public-cluster/variables.tf @@ -393,12 +393,6 @@ variable "authenticator_security_group" { default = null } -variable "enable_release_channel" { - type = bool - description = "(Beta) Whether release channel is configured for this cluster." - default = false -} - variable "release_channel" { type = string description = "(Beta) The release channel of this cluster. Accepted values are `UNSPECIFIED`, `RAPID`, `REGULAR` and `STABLE`. Defaults to `UNSPECIFIED`." diff --git a/modules/private-cluster-update-variant/main.tf b/modules/private-cluster-update-variant/main.tf index aba5e2d79f..2bd1c40d14 100644 --- a/modules/private-cluster-update-variant/main.tf +++ b/modules/private-cluster-update-variant/main.tf @@ -45,7 +45,6 @@ locals { master_version = var.regional ? local.master_version_regional : local.master_version_zonal node_version = var.regional ? local.node_version_regional : local.node_version_zonal - custom_kube_dns_config = length(keys(var.stub_domains)) > 0 upstream_nameservers_config = length(var.upstream_nameservers) > 0 network_project_id = var.network_project_id != "" ? var.network_project_id : var.project_id diff --git a/modules/private-cluster/main.tf b/modules/private-cluster/main.tf index aba5e2d79f..2bd1c40d14 100644 --- a/modules/private-cluster/main.tf +++ b/modules/private-cluster/main.tf @@ -45,7 +45,6 @@ locals { master_version = var.regional ? local.master_version_regional : local.master_version_zonal node_version = var.regional ? local.node_version_regional : local.node_version_zonal - custom_kube_dns_config = length(keys(var.stub_domains)) > 0 upstream_nameservers_config = length(var.upstream_nameservers) > 0 network_project_id = var.network_project_id != "" ? var.network_project_id : var.project_id From 42a0376309903e225c7811ff7bc3a35e8685d908 Mon Sep 17 00:00:00 2001 From: Chris Sng Date: Tue, 29 Oct 2019 14:05:05 +0800 Subject: [PATCH 82/82] exclude release_channel block if null --- autogen/cluster.tf | 8 ++++++-- autogen/main.tf | 4 ++++ autogen/variables.tf | 2 +- main.tf | 1 + modules/beta-private-cluster-update-variant/README.md | 2 +- modules/beta-private-cluster-update-variant/cluster.tf | 8 ++++++-- modules/beta-private-cluster-update-variant/main.tf | 2 ++ modules/beta-private-cluster-update-variant/variables.tf | 2 +- modules/beta-private-cluster/README.md | 2 +- modules/beta-private-cluster/cluster.tf | 8 ++++++-- modules/beta-private-cluster/main.tf | 2 ++ modules/beta-private-cluster/variables.tf | 2 +- modules/beta-public-cluster/README.md | 2 +- modules/beta-public-cluster/cluster.tf | 8 ++++++-- modules/beta-public-cluster/main.tf | 2 ++ modules/beta-public-cluster/variables.tf | 2 +- modules/private-cluster-update-variant/main.tf | 1 + modules/private-cluster/main.tf | 1 + 18 files changed, 44 insertions(+), 15 deletions(-) diff --git a/autogen/cluster.tf b/autogen/cluster.tf index 0742fd1523..23d9a6b5ff 100644 --- a/autogen/cluster.tf +++ b/autogen/cluster.tf @@ -46,8 +46,12 @@ resource "google_container_cluster" "primary" { } {% if beta_cluster %} - release_channel { - channel = var.release_channel + dynamic "release_channel" { + for_each = local.release_channel + + content { + channel = release_channel.value.channel + } } {% endif %} diff --git a/autogen/main.tf b/autogen/main.tf index afbd7bf8c1..30347b9b15 100644 --- a/autogen/main.tf +++ b/autogen/main.tf @@ -48,6 +48,10 @@ locals { node_version_zonal = var.node_version != "" && ! var.regional ? var.node_version : local.master_version_zonal master_version = var.regional ? local.master_version_regional : local.master_version_zonal node_version = var.regional ? local.node_version_regional : local.node_version_zonal +{% if beta_cluster %} + release_channel = var.release_channel != null ? [{ channel : var.release_channel }] : [] +{% endif %} + custom_kube_dns_config = length(keys(var.stub_domains)) > 0 upstream_nameservers_config = length(var.upstream_nameservers) > 0 diff --git a/autogen/variables.tf b/autogen/variables.tf index cbc9a9b4d6..3f2a12f3a9 100644 --- a/autogen/variables.tf +++ b/autogen/variables.tf @@ -425,6 +425,6 @@ variable "authenticator_security_group" { variable "release_channel" { type = string description = "(Beta) The release channel of this cluster. Accepted values are `UNSPECIFIED`, `RAPID`, `REGULAR` and `STABLE`. Defaults to `UNSPECIFIED`." - default = "UNSPECIFIED" + default = null } {% endif %} diff --git a/main.tf b/main.tf index a9e1c15810..1090227fd8 100644 --- a/main.tf +++ b/main.tf @@ -45,6 +45,7 @@ locals { master_version = var.regional ? local.master_version_regional : local.master_version_zonal node_version = var.regional ? local.node_version_regional : local.node_version_zonal + custom_kube_dns_config = length(keys(var.stub_domains)) > 0 upstream_nameservers_config = length(var.upstream_nameservers) > 0 network_project_id = var.network_project_id != "" ? var.network_project_id : var.project_id diff --git a/modules/beta-private-cluster-update-variant/README.md b/modules/beta-private-cluster-update-variant/README.md index fda632e3c4..7a3be69fc4 100644 --- a/modules/beta-private-cluster-update-variant/README.md +++ b/modules/beta-private-cluster-update-variant/README.md @@ -191,7 +191,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | region | The region to host the cluster in (optional if zonal cluster / required if regional) | string | `"null"` | no | | regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | | registry\_project\_id | Project holding the Google Container Registry. If empty, we use the cluster project. If grant_registry_access is true, storage.objectViewer role is assigned on this project. | string | `""` | no | -| release\_channel | (Beta) The release channel of this cluster. Accepted values are `UNSPECIFIED`, `RAPID`, `REGULAR` and `STABLE`. Defaults to `UNSPECIFIED`. | string | `"UNSPECIFIED"` | no | +| release\_channel | (Beta) The release channel of this cluster. Accepted values are `UNSPECIFIED`, `RAPID`, `REGULAR` and `STABLE`. Defaults to `UNSPECIFIED`. | string | `"null"` | no | | remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | | resource\_usage\_export\_dataset\_id | The dataset id for which network egress metering for this cluster will be enabled. If enabled, a daemonset will be created in the cluster to meter network egress traffic. | string | `""` | no | | sandbox\_enabled | (Beta) Enable GKE Sandbox (Do not forget to set `image_type` = `COS_CONTAINERD` and `node_version` = `1.12.7-gke.17` or later to use it). | bool | `"false"` | no | diff --git a/modules/beta-private-cluster-update-variant/cluster.tf b/modules/beta-private-cluster-update-variant/cluster.tf index 8624bdd4ec..366280d7b8 100644 --- a/modules/beta-private-cluster-update-variant/cluster.tf +++ b/modules/beta-private-cluster-update-variant/cluster.tf @@ -41,8 +41,12 @@ resource "google_container_cluster" "primary" { } } - release_channel { - channel = var.release_channel + dynamic "release_channel" { + for_each = local.release_channel + + content { + channel = release_channel.value.channel + } } subnetwork = data.google_compute_subnetwork.gke_subnetwork.self_link diff --git a/modules/beta-private-cluster-update-variant/main.tf b/modules/beta-private-cluster-update-variant/main.tf index 63bf31ac78..2de95c063d 100644 --- a/modules/beta-private-cluster-update-variant/main.tf +++ b/modules/beta-private-cluster-update-variant/main.tf @@ -44,6 +44,8 @@ locals { node_version_zonal = var.node_version != "" && ! var.regional ? var.node_version : local.master_version_zonal master_version = var.regional ? local.master_version_regional : local.master_version_zonal node_version = var.regional ? local.node_version_regional : local.node_version_zonal + release_channel = var.release_channel != null ? [{ channel : var.release_channel }] : [] + custom_kube_dns_config = length(keys(var.stub_domains)) > 0 upstream_nameservers_config = length(var.upstream_nameservers) > 0 diff --git a/modules/beta-private-cluster-update-variant/variables.tf b/modules/beta-private-cluster-update-variant/variables.tf index 4d403b2b30..07461351c1 100644 --- a/modules/beta-private-cluster-update-variant/variables.tf +++ b/modules/beta-private-cluster-update-variant/variables.tf @@ -420,5 +420,5 @@ variable "authenticator_security_group" { variable "release_channel" { type = string description = "(Beta) The release channel of this cluster. Accepted values are `UNSPECIFIED`, `RAPID`, `REGULAR` and `STABLE`. Defaults to `UNSPECIFIED`." - default = "UNSPECIFIED" + default = null } diff --git a/modules/beta-private-cluster/README.md b/modules/beta-private-cluster/README.md index f56c6957ed..daf98949a1 100644 --- a/modules/beta-private-cluster/README.md +++ b/modules/beta-private-cluster/README.md @@ -191,7 +191,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | region | The region to host the cluster in (optional if zonal cluster / required if regional) | string | `"null"` | no | | regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | | registry\_project\_id | Project holding the Google Container Registry. If empty, we use the cluster project. If grant_registry_access is true, storage.objectViewer role is assigned on this project. | string | `""` | no | -| release\_channel | (Beta) The release channel of this cluster. Accepted values are `UNSPECIFIED`, `RAPID`, `REGULAR` and `STABLE`. Defaults to `UNSPECIFIED`. | string | `"UNSPECIFIED"` | no | +| release\_channel | (Beta) The release channel of this cluster. Accepted values are `UNSPECIFIED`, `RAPID`, `REGULAR` and `STABLE`. Defaults to `UNSPECIFIED`. | string | `"null"` | no | | remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | | resource\_usage\_export\_dataset\_id | The dataset id for which network egress metering for this cluster will be enabled. If enabled, a daemonset will be created in the cluster to meter network egress traffic. | string | `""` | no | | sandbox\_enabled | (Beta) Enable GKE Sandbox (Do not forget to set `image_type` = `COS_CONTAINERD` and `node_version` = `1.12.7-gke.17` or later to use it). | bool | `"false"` | no | diff --git a/modules/beta-private-cluster/cluster.tf b/modules/beta-private-cluster/cluster.tf index 2c29d8b81f..d0cc6d7c20 100644 --- a/modules/beta-private-cluster/cluster.tf +++ b/modules/beta-private-cluster/cluster.tf @@ -41,8 +41,12 @@ resource "google_container_cluster" "primary" { } } - release_channel { - channel = var.release_channel + dynamic "release_channel" { + for_each = local.release_channel + + content { + channel = release_channel.value.channel + } } subnetwork = data.google_compute_subnetwork.gke_subnetwork.self_link diff --git a/modules/beta-private-cluster/main.tf b/modules/beta-private-cluster/main.tf index 63bf31ac78..2de95c063d 100644 --- a/modules/beta-private-cluster/main.tf +++ b/modules/beta-private-cluster/main.tf @@ -44,6 +44,8 @@ locals { node_version_zonal = var.node_version != "" && ! var.regional ? var.node_version : local.master_version_zonal master_version = var.regional ? local.master_version_regional : local.master_version_zonal node_version = var.regional ? local.node_version_regional : local.node_version_zonal + release_channel = var.release_channel != null ? [{ channel : var.release_channel }] : [] + custom_kube_dns_config = length(keys(var.stub_domains)) > 0 upstream_nameservers_config = length(var.upstream_nameservers) > 0 diff --git a/modules/beta-private-cluster/variables.tf b/modules/beta-private-cluster/variables.tf index 4d403b2b30..07461351c1 100644 --- a/modules/beta-private-cluster/variables.tf +++ b/modules/beta-private-cluster/variables.tf @@ -420,5 +420,5 @@ variable "authenticator_security_group" { variable "release_channel" { type = string description = "(Beta) The release channel of this cluster. Accepted values are `UNSPECIFIED`, `RAPID`, `REGULAR` and `STABLE`. Defaults to `UNSPECIFIED`." - default = "UNSPECIFIED" + default = null } diff --git a/modules/beta-public-cluster/README.md b/modules/beta-public-cluster/README.md index 18697cf134..98f4526d9f 100644 --- a/modules/beta-public-cluster/README.md +++ b/modules/beta-public-cluster/README.md @@ -182,7 +182,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | region | The region to host the cluster in (optional if zonal cluster / required if regional) | string | `"null"` | no | | regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | | registry\_project\_id | Project holding the Google Container Registry. If empty, we use the cluster project. If grant_registry_access is true, storage.objectViewer role is assigned on this project. | string | `""` | no | -| release\_channel | (Beta) The release channel of this cluster. Accepted values are `UNSPECIFIED`, `RAPID`, `REGULAR` and `STABLE`. Defaults to `UNSPECIFIED`. | string | `"UNSPECIFIED"` | no | +| release\_channel | (Beta) The release channel of this cluster. Accepted values are `UNSPECIFIED`, `RAPID`, `REGULAR` and `STABLE`. Defaults to `UNSPECIFIED`. | string | `"null"` | no | | remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | | resource\_usage\_export\_dataset\_id | The dataset id for which network egress metering for this cluster will be enabled. If enabled, a daemonset will be created in the cluster to meter network egress traffic. | string | `""` | no | | sandbox\_enabled | (Beta) Enable GKE Sandbox (Do not forget to set `image_type` = `COS_CONTAINERD` and `node_version` = `1.12.7-gke.17` or later to use it). | bool | `"false"` | no | diff --git a/modules/beta-public-cluster/cluster.tf b/modules/beta-public-cluster/cluster.tf index 073affb113..304fcc8df3 100644 --- a/modules/beta-public-cluster/cluster.tf +++ b/modules/beta-public-cluster/cluster.tf @@ -41,8 +41,12 @@ resource "google_container_cluster" "primary" { } } - release_channel { - channel = var.release_channel + dynamic "release_channel" { + for_each = local.release_channel + + content { + channel = release_channel.value.channel + } } subnetwork = data.google_compute_subnetwork.gke_subnetwork.self_link diff --git a/modules/beta-public-cluster/main.tf b/modules/beta-public-cluster/main.tf index f0477f9498..9668b6f1ea 100644 --- a/modules/beta-public-cluster/main.tf +++ b/modules/beta-public-cluster/main.tf @@ -44,6 +44,8 @@ locals { node_version_zonal = var.node_version != "" && ! var.regional ? var.node_version : local.master_version_zonal master_version = var.regional ? local.master_version_regional : local.master_version_zonal node_version = var.regional ? local.node_version_regional : local.node_version_zonal + release_channel = var.release_channel != null ? [{ channel : var.release_channel }] : [] + custom_kube_dns_config = length(keys(var.stub_domains)) > 0 upstream_nameservers_config = length(var.upstream_nameservers) > 0 diff --git a/modules/beta-public-cluster/variables.tf b/modules/beta-public-cluster/variables.tf index ae89886ea7..b41e5591b9 100644 --- a/modules/beta-public-cluster/variables.tf +++ b/modules/beta-public-cluster/variables.tf @@ -396,5 +396,5 @@ variable "authenticator_security_group" { variable "release_channel" { type = string description = "(Beta) The release channel of this cluster. Accepted values are `UNSPECIFIED`, `RAPID`, `REGULAR` and `STABLE`. Defaults to `UNSPECIFIED`." - default = "UNSPECIFIED" + default = null } diff --git a/modules/private-cluster-update-variant/main.tf b/modules/private-cluster-update-variant/main.tf index 2bd1c40d14..aba5e2d79f 100644 --- a/modules/private-cluster-update-variant/main.tf +++ b/modules/private-cluster-update-variant/main.tf @@ -45,6 +45,7 @@ locals { master_version = var.regional ? local.master_version_regional : local.master_version_zonal node_version = var.regional ? local.node_version_regional : local.node_version_zonal + custom_kube_dns_config = length(keys(var.stub_domains)) > 0 upstream_nameservers_config = length(var.upstream_nameservers) > 0 network_project_id = var.network_project_id != "" ? var.network_project_id : var.project_id diff --git a/modules/private-cluster/main.tf b/modules/private-cluster/main.tf index 2bd1c40d14..aba5e2d79f 100644 --- a/modules/private-cluster/main.tf +++ b/modules/private-cluster/main.tf @@ -45,6 +45,7 @@ locals { master_version = var.regional ? local.master_version_regional : local.master_version_zonal node_version = var.regional ? local.node_version_regional : local.node_version_zonal + custom_kube_dns_config = length(keys(var.stub_domains)) > 0 upstream_nameservers_config = length(var.upstream_nameservers) > 0 network_project_id = var.network_project_id != "" ? var.network_project_id : var.project_id