diff --git a/.kitchen.yml b/.kitchen.yml index 004559322f..9550f5d9b5 100644 --- a/.kitchen.yml +++ b/.kitchen.yml @@ -201,6 +201,19 @@ suites: backend: gcp controls: - gcp + - name: "simple_windows_node_pool" + driver: + root_module_directory: test/fixtures/simple_windows_node_pool + verifier: + systems: + - name: gcloud + backend: local + controls: + - gcloud + - name: gcp + backend: gcp + controls: + - gcp - name: "deploy_service" driver: root_module_directory: test/fixtures/deploy_service diff --git a/README.md b/README.md index aa6c6418b6..7a069f945b 100644 --- a/README.md +++ b/README.md @@ -200,6 +200,7 @@ Then perform the following commands on the root folder: | subnetwork | The subnetwork to host the cluster in (required) | `string` | n/a | yes | | timeouts | Timeout for cluster operations. | `map(string)` | `{}` | no | | upstream\_nameservers | If specified, the values replace the nameservers taken by default from the node’s /etc/resolv.conf | `list(string)` | `[]` | no | +| windows\_node\_pools | List of maps containing Windows node pools | `list(map(string))` | `[]` | no | | zones | The zones to host the cluster in (optional if regional cluster / required if zonal) | `list(string)` | `[]` | no | ## Outputs @@ -232,6 +233,9 @@ Then perform the following commands on the root folder: ## node_pools variable + +> Use this variable for provisioning linux based node pools. For Windows based node pools use [windows_node_pools](#windows\_node\_pools-variable) + The node_pools variable takes the following parameters: | Name | Description | Default | Requirement | @@ -270,6 +274,11 @@ The node_pools variable takes the following parameters: | tags | The list of instance tags applied to all nodes | | Required | | value | The value for the taint | | Required | | version | The Kubernetes version for the nodes in this pool. Should only be set if auto_upgrade is false | " " | Optional | + +## windows_node_pools variable +The windows_node_pools variable takes the same parameters as [node_pools](#node\_pools-variable) but is reserved for provisioning Windows based node pools only. This variable is introduced to satisfy a [specific requirement](https://cloud.google.com/kubernetes-engine/docs/how-to/creating-a-cluster-windows#create_a_cluster_and_node_pools) for the presence of at least one linux based node pool in the cluster before a windows based node pool can be created. + + ## Requirements Before this module can be used on a project, you must ensure that the following pre-requisites are fulfilled: diff --git a/autogen/main/README.md b/autogen/main/README.md index b2482f977d..b66f50334d 100644 --- a/autogen/main/README.md +++ b/autogen/main/README.md @@ -174,6 +174,9 @@ Then perform the following commands on the root folder: {% if autopilot_cluster != true %} ## node_pools variable + +> Use this variable for provisioning linux based node pools. For Windows based node pools use [windows_node_pools](#windows\_node\_pools-variable) + The node_pools variable takes the following parameters: | Name | Description | Default | Requirement | @@ -228,7 +231,12 @@ The node_pools variable takes the following parameters: | tags | The list of instance tags applied to all nodes | | Required | | value | The value for the taint | | Required | | version | The Kubernetes version for the nodes in this pool. Should only be set if auto_upgrade is false | " " | Optional | + +## windows_node_pools variable +The windows_node_pools variable takes the same parameters as [node_pools](#node\_pools-variable) but is reserved for provisioning Windows based node pools only. This variable is introduced to satisfy a [specific requirement](https://cloud.google.com/kubernetes-engine/docs/how-to/creating-a-cluster-windows#create_a_cluster_and_node_pools) for the presence of at least one linux based node pool in the cluster before a windows based node pool can be created. + {% endif %} + ## Requirements Before this module can be used on a project, you must ensure that the following pre-requisites are fulfilled: diff --git a/autogen/main/cluster.tf.tmpl b/autogen/main/cluster.tf.tmpl index 64d3567a61..c7003e2b10 100644 --- a/autogen/main/cluster.tf.tmpl +++ b/autogen/main/cluster.tf.tmpl @@ -500,7 +500,7 @@ locals { # resources where "ForceNew" is "true". schemaNodeConfig can be found in node_config.go at # https://github.com/terraform-providers/terraform-provider-google/blob/master/google/node_config.go#L22 resource "random_id" "name" { - for_each = local.node_pools + for_each = merge(local.node_pools, local.windows_node_pools) byte_length = 2 prefix = format("%s-", lookup(each.value, "name")) keepers = merge( @@ -569,13 +569,22 @@ resource "random_id" "name" { {% endif %} {% if autopilot_cluster != true %} +{% for i in range(2) %} +{% if i == 0 %} resource "google_container_node_pool" "pools" { +{% else %} +resource "google_container_node_pool" "windows_pools" { +{% endif %} {% if beta_cluster %} provider = google-beta {% else %} provider = google {% endif %} + {% if i == 0 %} for_each = local.node_pools + {% else %} + for_each = local.windows_node_pools + {% endif %} {% if update_variant %} name = { for k, v in random_id.name : k => v.hex }[each.key] {% else %} @@ -756,6 +765,7 @@ resource "google_container_node_pool" "pools" { } } + {% if i == 0 %} dynamic "linux_node_config" { for_each = length(merge( local.node_pools_linux_node_configs_sysctls["all"], @@ -770,6 +780,7 @@ resource "google_container_node_pool" "pools" { } } {% endif %} + {% endif %} shielded_instance_config { enable_secure_boot = lookup(each.value, "enable_secure_boot", false) @@ -790,5 +801,10 @@ resource "google_container_node_pool" "pools" { update = lookup(var.timeouts, "update", "45m") delete = lookup(var.timeouts, "delete", "45m") } + + {% if i == 1 %} + depends_on = [google_container_node_pool.pools[0]] + {% endif %} } +{% endfor %} {% endif %} diff --git a/autogen/main/main.tf.tmpl b/autogen/main/main.tf.tmpl index f14f295214..c82baaefaf 100644 --- a/autogen/main/main.tf.tmpl +++ b/autogen/main/main.tf.tmpl @@ -52,6 +52,8 @@ locals { // Build a map of maps of node pools from a list of objects node_pool_names = [for np in toset(var.node_pools) : np.name] node_pools = zipmap(local.node_pool_names, tolist(toset(var.node_pools))) + windows_node_pool_names = [for np in toset(var.windows_node_pools) : np.name] + windows_node_pools = zipmap(local.windows_node_pool_names, tolist(toset(var.windows_node_pools))) {% endif %} release_channel = var.release_channel != null ? [{ channel : var.release_channel }] : [] @@ -164,8 +166,15 @@ locals { }] {% if autopilot_cluster != true %} - cluster_output_node_pools_names = concat([for np in google_container_node_pool.pools : np.name], [""]) - cluster_output_node_pools_versions = { for np in google_container_node_pool.pools : np.name => np.version } + cluster_output_node_pools_names = concat( + [for np in google_container_node_pool.pools : np.name], [""], + [for np in google_container_node_pool.windows_pools : np.name], [""] + ) + + cluster_output_node_pools_versions = merge( + { for np in google_container_node_pool.pools : np.name => np.version }, + { for np in google_container_node_pool.windows_pools : np.name => np.version }, + ) {% endif %} cluster_master_auth_list_layer1 = local.cluster_output_master_auth diff --git a/autogen/main/variables.tf.tmpl b/autogen/main/variables.tf.tmpl index 4d6ca38cb0..b63db64f60 100644 --- a/autogen/main/variables.tf.tmpl +++ b/autogen/main/variables.tf.tmpl @@ -148,6 +148,12 @@ variable "node_pools" { ] } +variable "windows_node_pools" { + type = list(map(string)) + description = "List of maps containing Windows node pools" + default = [] +} + variable "node_pools_labels" { type = map(map(string)) description = "Map of maps containing node labels by node-pool name" diff --git a/autogen/main/variables_defaults.tf.tmpl b/autogen/main/variables_defaults.tf.tmpl index dd30642eed..1706713eb9 100644 --- a/autogen/main/variables_defaults.tf.tmpl +++ b/autogen/main/variables_defaults.tf.tmpl @@ -28,6 +28,10 @@ locals { [for node_pool in var.node_pools : node_pool["name"]], [for node_pool in var.node_pools : {}] ), + zipmap( + [for node_pool in var.windows_node_pools : node_pool["name"]], + [for node_pool in var.windows_node_pools : {}] + ), var.node_pools_labels ) @@ -38,6 +42,10 @@ locals { [for node_pool in var.node_pools : node_pool["name"]], [for node_pool in var.node_pools : {}] ), + zipmap( + [for node_pool in var.windows_node_pools : node_pool["name"]], + [for node_pool in var.windows_node_pools : {}] + ), var.node_pools_metadata ) @@ -48,6 +56,10 @@ locals { [for node_pool in var.node_pools : node_pool["name"]], [for node_pool in var.node_pools : []] ), + zipmap( + [for node_pool in var.windows_node_pools : node_pool["name"]], + [for node_pool in var.windows_node_pools : []] + ), var.node_pools_taints ) @@ -58,6 +70,10 @@ locals { [for node_pool in var.node_pools : node_pool["name"]], [for node_pool in var.node_pools : []] ), + zipmap( + [for node_pool in var.windows_node_pools : node_pool["name"]], + [for node_pool in var.windows_node_pools : []] + ), var.node_pools_tags ) @@ -68,6 +84,10 @@ locals { [for node_pool in var.node_pools : node_pool["name"]], [for node_pool in var.node_pools : []] ), + zipmap( + [for node_pool in var.windows_node_pools : node_pool["name"]], + [for node_pool in var.windows_node_pools : []] + ), var.node_pools_oauth_scopes ) {% if beta_cluster %} diff --git a/autogen/safer-cluster/main.tf.tmpl b/autogen/safer-cluster/main.tf.tmpl index 991f6621c6..ffcb63fe90 100644 --- a/autogen/safer-cluster/main.tf.tmpl +++ b/autogen/safer-cluster/main.tf.tmpl @@ -87,6 +87,7 @@ module "gke" { initial_node_count = (var.initial_node_count == 0) ? 1 : var.initial_node_count node_pools = var.node_pools + windows_node_pools = var.windows_node_pools node_pools_labels = var.node_pools_labels node_pools_metadata = var.node_pools_metadata node_pools_taints = var.node_pools_taints diff --git a/autogen/safer-cluster/variables.tf.tmpl b/autogen/safer-cluster/variables.tf.tmpl index ee1a1b7ff3..3a9b93148c 100644 --- a/autogen/safer-cluster/variables.tf.tmpl +++ b/autogen/safer-cluster/variables.tf.tmpl @@ -152,6 +152,12 @@ variable "node_pools" { ] } +variable "windows_node_pools" { + type = list(map(string)) + description = "List of maps containing node pools" + default = [] +} + variable "node_pools_labels" { type = map(map(string)) description = "Map of maps containing node labels by node-pool name" diff --git a/build/int.cloudbuild.yaml b/build/int.cloudbuild.yaml index a20e033e2b..e47379503d 100644 --- a/build/int.cloudbuild.yaml +++ b/build/int.cloudbuild.yaml @@ -236,6 +236,21 @@ steps: - verify beta-cluster-local name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy beta-cluster-local'] +- id: converge simple-windows-node-pool-local + waitFor: + - create all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge simple-windows-node-pool-local'] +- id: verify simple-windows-node-pool-local + waitFor: + - converge simple-windows-node-pool-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify simple-windows-node-pool-local'] +- id: destroy simple-windows-node-pool-local + waitFor: + - verify simple-windows-node-pool-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy simple-windows-node-pool-local'] - id: converge deploy-service-local waitFor: - create all diff --git a/cluster.tf b/cluster.tf index a6be1e50e7..0d67bf5bf6 100644 --- a/cluster.tf +++ b/cluster.tf @@ -296,6 +296,7 @@ resource "google_container_cluster" "primary" { resource "google_container_node_pool" "pools" { provider = google for_each = local.node_pools + name = each.key project = var.project_id location = local.location @@ -443,4 +444,159 @@ resource "google_container_node_pool" "pools" { update = lookup(var.timeouts, "update", "45m") delete = lookup(var.timeouts, "delete", "45m") } + +} +resource "google_container_node_pool" "windows_pools" { + provider = google + for_each = local.windows_node_pools + + name = each.key + project = var.project_id + location = local.location + // use node_locations if provided, defaults to cluster level node_locations if not specified + node_locations = lookup(each.value, "node_locations", "") != "" ? split(",", each.value["node_locations"]) : null + + cluster = google_container_cluster.primary.name + + version = lookup(each.value, "auto_upgrade", local.default_auto_upgrade) ? "" : lookup( + each.value, + "version", + google_container_cluster.primary.min_master_version, + ) + + initial_node_count = lookup(each.value, "autoscaling", true) ? lookup( + each.value, + "initial_node_count", + lookup(each.value, "min_count", 1) + ) : null + + max_pods_per_node = lookup(each.value, "max_pods_per_node", null) + + node_count = lookup(each.value, "autoscaling", true) ? null : lookup(each.value, "node_count", 1) + + dynamic "autoscaling" { + for_each = lookup(each.value, "autoscaling", true) ? [each.value] : [] + content { + min_node_count = lookup(autoscaling.value, "min_count", 1) + max_node_count = lookup(autoscaling.value, "max_count", 100) + } + } + + + management { + auto_repair = lookup(each.value, "auto_repair", true) + auto_upgrade = lookup(each.value, "auto_upgrade", local.default_auto_upgrade) + } + + upgrade_settings { + max_surge = lookup(each.value, "max_surge", 1) + max_unavailable = lookup(each.value, "max_unavailable", 0) + } + + node_config { + image_type = lookup(each.value, "image_type", "COS_CONTAINERD") + machine_type = lookup(each.value, "machine_type", "e2-medium") + min_cpu_platform = lookup(each.value, "min_cpu_platform", "") + dynamic "gcfs_config" { + for_each = lookup(each.value, "enable_gcfs", false) ? [true] : [] + content { + enabled = gcfs_config.value + } + } + dynamic "gvnic" { + for_each = lookup(each.value, "enable_gvnic", false) ? [true] : [] + content { + enabled = gvnic.value + } + } + labels = merge( + lookup(lookup(local.node_pools_labels, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(local.node_pools_labels, "default_values", {}), "node_pool", true) ? { "node_pool" = each.value["name"] } : {}, + local.node_pools_labels["all"], + local.node_pools_labels[each.value["name"]], + ) + metadata = merge( + lookup(lookup(local.node_pools_metadata, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(local.node_pools_metadata, "default_values", {}), "node_pool", true) ? { "node_pool" = each.value["name"] } : {}, + local.node_pools_metadata["all"], + local.node_pools_metadata[each.value["name"]], + { + "disable-legacy-endpoints" = var.disable_legacy_metadata_endpoints + }, + ) + dynamic "taint" { + for_each = concat( + local.node_pools_taints["all"], + local.node_pools_taints[each.value["name"]], + ) + content { + effect = taint.value.effect + key = taint.value.key + value = taint.value.value + } + } + tags = concat( + lookup(local.node_pools_tags, "default_values", [true, true])[0] ? [local.cluster_network_tag] : [], + lookup(local.node_pools_tags, "default_values", [true, true])[1] ? ["${local.cluster_network_tag}-${each.value["name"]}"] : [], + local.node_pools_tags["all"], + local.node_pools_tags[each.value["name"]], + ) + + local_ssd_count = lookup(each.value, "local_ssd_count", 0) + disk_size_gb = lookup(each.value, "disk_size_gb", 100) + disk_type = lookup(each.value, "disk_type", "pd-standard") + + + service_account = lookup( + each.value, + "service_account", + local.service_account, + ) + preemptible = lookup(each.value, "preemptible", false) + spot = lookup(each.value, "spot", false) + + oauth_scopes = concat( + local.node_pools_oauth_scopes["all"], + local.node_pools_oauth_scopes[each.value["name"]], + ) + + guest_accelerator = [ + for guest_accelerator in lookup(each.value, "accelerator_count", 0) > 0 ? [{ + type = lookup(each.value, "accelerator_type", "") + count = lookup(each.value, "accelerator_count", 0) + gpu_partition_size = lookup(each.value, "gpu_partition_size", null) + }] : [] : { + type = guest_accelerator["type"] + count = guest_accelerator["count"] + gpu_partition_size = guest_accelerator["gpu_partition_size"] + } + ] + + dynamic "workload_metadata_config" { + for_each = local.cluster_node_metadata_config + + content { + mode = lookup(each.value, "node_metadata", workload_metadata_config.value.mode) + } + } + + + shielded_instance_config { + enable_secure_boot = lookup(each.value, "enable_secure_boot", false) + enable_integrity_monitoring = lookup(each.value, "enable_integrity_monitoring", true) + } + } + + lifecycle { + ignore_changes = [initial_node_count] + + } + + timeouts { + create = lookup(var.timeouts, "create", "45m") + update = lookup(var.timeouts, "update", "45m") + delete = lookup(var.timeouts, "delete", "45m") + } + + depends_on = [google_container_node_pool.pools[0]] } diff --git a/examples/simple_windows_node_pool/README.md b/examples/simple_windows_node_pool/README.md new file mode 100644 index 0000000000..db0c89092d --- /dev/null +++ b/examples/simple_windows_node_pool/README.md @@ -0,0 +1,41 @@ +# Simple Regional Cluster + +This example illustrates how to create a simple cluster with a windows node pool. + + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| cluster\_name\_suffix | A suffix to append to the default cluster name | `string` | `""` | no | +| project\_id | The project ID to host the cluster in | `any` | n/a | yes | +| region | The region to host the cluster in | `string` | `"us-central1"` | no | +| zone | The zone to host the cluster in | `string` | `"us-central1-a"` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| ca\_certificate | n/a | +| client\_token | n/a | +| cluster\_name | Cluster name | +| identity\_namespace | n/a | +| ip\_range\_pods | The secondary IP range used for pods | +| ip\_range\_services | The secondary IP range used for services | +| kubernetes\_endpoint | n/a | +| location | n/a | +| master\_kubernetes\_version | The master Kubernetes version | +| network | n/a | +| project\_id | n/a | +| region | n/a | +| service\_account | The default service account used for running nodes. | +| subnetwork | n/a | +| zones | List of zones in which the cluster resides | + + + +To provision this example, run the following from within this directory: +- `terraform init` to get the plugins +- `terraform plan` to see the infrastructure plan +- `terraform apply` to apply the infrastructure build +- `terraform destroy` to destroy the built infrastructure diff --git a/examples/simple_windows_node_pool/main.tf b/examples/simple_windows_node_pool/main.tf new file mode 100644 index 0000000000..0b63ed502a --- /dev/null +++ b/examples/simple_windows_node_pool/main.tf @@ -0,0 +1,67 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +locals { + cluster_type = "simple-windows-node-pool" +} + +data "google_client_config" "default" {} + +provider "kubernetes" { + host = "https://${module.gke.endpoint}" + token = data.google_client_config.default.access_token + cluster_ca_certificate = base64decode(module.gke.ca_certificate) +} + +module "gke" { + source = "../../modules/beta-public-cluster/" + project_id = var.project_id + regional = false + region = var.region + zones = [var.zone] + + name = "${local.cluster_type}-cluster${var.cluster_name_suffix}" + + network = google_compute_network.main.name + subnetwork = google_compute_subnetwork.main.name + ip_range_pods = google_compute_subnetwork.main.secondary_ip_range[0].range_name + ip_range_services = google_compute_subnetwork.main.secondary_ip_range[1].range_name + + remove_default_node_pool = true + service_account = "create" + release_channel = "REGULAR" + + node_pools = [ + { + name = "pool-01" + autoscaling = false + auto_upgrade = true + node_count = 1 + machine_type = "n2-standard-2" + }, + ] + + windows_node_pools = [ + { + name = "win-pool-01" + autoscaling = false + auto_upgrade = true + node_count = 1 + machine_type = "n2-standard-2" + image_type = "WINDOWS_LTSC" + }, + ] +} diff --git a/examples/simple_windows_node_pool/network.tf b/examples/simple_windows_node_pool/network.tf new file mode 100644 index 0000000000..2e5daccc46 --- /dev/null +++ b/examples/simple_windows_node_pool/network.tf @@ -0,0 +1,45 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +resource "random_string" "suffix" { + length = 4 + special = false + upper = false +} + +resource "google_compute_network" "main" { + name = "cft-gke-test-${random_string.suffix.result}" + auto_create_subnetworks = false + project = var.project_id +} + +resource "google_compute_subnetwork" "main" { + name = "cft-gke-test-${random_string.suffix.result}" + ip_cidr_range = "10.0.0.0/17" + region = var.region + network = google_compute_network.main.self_link + project = var.project_id + + secondary_ip_range { + range_name = "cft-gke-test-pods-${random_string.suffix.result}" + ip_cidr_range = "192.168.0.0/18" + } + + secondary_ip_range { + range_name = "cft-gke-test-services-${random_string.suffix.result}" + ip_cidr_range = "192.168.64.0/18" + } +} diff --git a/examples/simple_windows_node_pool/outputs.tf b/examples/simple_windows_node_pool/outputs.tf new file mode 100644 index 0000000000..594fdafaa2 --- /dev/null +++ b/examples/simple_windows_node_pool/outputs.tf @@ -0,0 +1,35 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +output "kubernetes_endpoint" { + sensitive = true + value = module.gke.endpoint +} + +output "client_token" { + sensitive = true + value = base64encode(data.google_client_config.default.access_token) +} + +output "ca_certificate" { + value = module.gke.ca_certificate + sensitive = true +} + +output "service_account" { + description = "The default service account used for running nodes." + value = module.gke.service_account +} diff --git a/examples/simple_windows_node_pool/test_outputs.tf b/examples/simple_windows_node_pool/test_outputs.tf new file mode 100644 index 0000000000..c6c0a5013f --- /dev/null +++ b/examples/simple_windows_node_pool/test_outputs.tf @@ -0,0 +1,67 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// These outputs are used to test the module with kitchen-terraform +// They do not need to be included in real-world uses of this module + +output "project_id" { + value = var.project_id +} + +output "region" { + value = module.gke.region +} + +output "cluster_name" { + description = "Cluster name" + value = module.gke.name +} + +output "network" { + value = google_compute_network.main.name +} + +output "subnetwork" { + value = google_compute_subnetwork.main.name +} + +output "location" { + value = module.gke.location +} + +output "ip_range_pods" { + description = "The secondary IP range used for pods" + value = google_compute_subnetwork.main.secondary_ip_range[0].range_name +} + +output "ip_range_services" { + description = "The secondary IP range used for services" + value = google_compute_subnetwork.main.secondary_ip_range[1].range_name +} + +output "zones" { + description = "List of zones in which the cluster resides" + value = module.gke.zones +} + +output "master_kubernetes_version" { + description = "The master Kubernetes version" + value = module.gke.master_version +} + +output "identity_namespace" { + value = module.gke.identity_namespace +} diff --git a/examples/simple_windows_node_pool/variables.tf b/examples/simple_windows_node_pool/variables.tf new file mode 100644 index 0000000000..c02931ccd9 --- /dev/null +++ b/examples/simple_windows_node_pool/variables.tf @@ -0,0 +1,35 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +variable "project_id" { + description = "The project ID to host the cluster in" +} + +variable "cluster_name_suffix" { + description = "A suffix to append to the default cluster name" + default = "" +} + +variable "region" { + description = "The region to host the cluster in" + default = "us-central1" +} + +variable "zone" { + type = string + description = "The zone to host the cluster in" + default = "us-central1-a" +} diff --git a/examples/simple_windows_node_pool/versions.tf b/examples/simple_windows_node_pool/versions.tf new file mode 100644 index 0000000000..9d7a496483 --- /dev/null +++ b/examples/simple_windows_node_pool/versions.tf @@ -0,0 +1,31 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +terraform { + required_version = ">= 0.13" + required_providers { + google = { + source = "hashicorp/google" + } + google-beta = { + source = "hashicorp/google-beta" + version = "~> 4.0" + } + kubernetes = { + source = "hashicorp/kubernetes" + } + } +} diff --git a/main.tf b/main.tf index 8eb8b7c473..978775e0ee 100644 --- a/main.tf +++ b/main.tf @@ -45,8 +45,10 @@ locals { master_version_zonal = var.kubernetes_version != "latest" ? var.kubernetes_version : data.google_container_engine_versions.zone.latest_master_version master_version = var.regional ? local.master_version_regional : local.master_version_zonal // Build a map of maps of node pools from a list of objects - node_pool_names = [for np in toset(var.node_pools) : np.name] - node_pools = zipmap(local.node_pool_names, tolist(toset(var.node_pools))) + node_pool_names = [for np in toset(var.node_pools) : np.name] + node_pools = zipmap(local.node_pool_names, tolist(toset(var.node_pools))) + windows_node_pool_names = [for np in toset(var.windows_node_pools) : np.name] + windows_node_pools = zipmap(local.windows_node_pool_names, tolist(toset(var.windows_node_pools))) release_channel = var.release_channel != null ? [{ channel : var.release_channel }] : [] @@ -113,8 +115,15 @@ locals { cidr_blocks : var.master_authorized_networks }] - cluster_output_node_pools_names = concat([for np in google_container_node_pool.pools : np.name], [""]) - cluster_output_node_pools_versions = { for np in google_container_node_pool.pools : np.name => np.version } + cluster_output_node_pools_names = concat( + [for np in google_container_node_pool.pools : np.name], [""], + [for np in google_container_node_pool.windows_pools : np.name], [""] + ) + + cluster_output_node_pools_versions = merge( + { for np in google_container_node_pool.pools : np.name => np.version }, + { for np in google_container_node_pool.windows_pools : np.name => np.version }, + ) cluster_master_auth_list_layer1 = local.cluster_output_master_auth cluster_master_auth_list_layer2 = local.cluster_master_auth_list_layer1[0] diff --git a/modules/beta-autopilot-private-cluster/README.md b/modules/beta-autopilot-private-cluster/README.md index bf0188ffc4..084ac9c49c 100644 --- a/modules/beta-autopilot-private-cluster/README.md +++ b/modules/beta-autopilot-private-cluster/README.md @@ -166,6 +166,7 @@ Then perform the following commands on the root folder: + ## Requirements Before this module can be used on a project, you must ensure that the following pre-requisites are fulfilled: diff --git a/modules/beta-autopilot-public-cluster/README.md b/modules/beta-autopilot-public-cluster/README.md index d0abf02521..43d933088f 100644 --- a/modules/beta-autopilot-public-cluster/README.md +++ b/modules/beta-autopilot-public-cluster/README.md @@ -153,6 +153,7 @@ Then perform the following commands on the root folder: + ## Requirements Before this module can be used on a project, you must ensure that the following pre-requisites are fulfilled: diff --git a/modules/beta-private-cluster-update-variant/README.md b/modules/beta-private-cluster-update-variant/README.md index cacb841390..a5969fef1c 100644 --- a/modules/beta-private-cluster-update-variant/README.md +++ b/modules/beta-private-cluster-update-variant/README.md @@ -258,6 +258,7 @@ Then perform the following commands on the root folder: | subnetwork | The subnetwork to host the cluster in (required) | `string` | n/a | yes | | timeouts | Timeout for cluster operations. | `map(string)` | `{}` | no | | upstream\_nameservers | If specified, the values replace the nameservers taken by default from the node’s /etc/resolv.conf | `list(string)` | `[]` | no | +| windows\_node\_pools | List of maps containing Windows node pools | `list(map(string))` | `[]` | no | | zones | The zones to host the cluster in (optional if regional cluster / required if zonal) | `list(string)` | `[]` | no | ## Outputs @@ -300,6 +301,9 @@ Then perform the following commands on the root folder: ## node_pools variable + +> Use this variable for provisioning linux based node pools. For Windows based node pools use [windows_node_pools](#windows\_node\_pools-variable) + The node_pools variable takes the following parameters: | Name | Description | Default | Requirement | @@ -346,6 +350,11 @@ The node_pools variable takes the following parameters: | tags | The list of instance tags applied to all nodes | | Required | | value | The value for the taint | | Required | | version | The Kubernetes version for the nodes in this pool. Should only be set if auto_upgrade is false | " " | Optional | + +## windows_node_pools variable +The windows_node_pools variable takes the same parameters as [node_pools](#node\_pools-variable) but is reserved for provisioning Windows based node pools only. This variable is introduced to satisfy a [specific requirement](https://cloud.google.com/kubernetes-engine/docs/how-to/creating-a-cluster-windows#create_a_cluster_and_node_pools) for the presence of at least one linux based node pool in the cluster before a windows based node pool can be created. + + ## Requirements Before this module can be used on a project, you must ensure that the following pre-requisites are fulfilled: diff --git a/modules/beta-private-cluster-update-variant/cluster.tf b/modules/beta-private-cluster-update-variant/cluster.tf index 34d1a1e1cf..6e0383677d 100644 --- a/modules/beta-private-cluster-update-variant/cluster.tf +++ b/modules/beta-private-cluster-update-variant/cluster.tf @@ -438,7 +438,7 @@ locals { # resources where "ForceNew" is "true". schemaNodeConfig can be found in node_config.go at # https://github.com/terraform-providers/terraform-provider-google/blob/master/google/node_config.go#L22 resource "random_id" "name" { - for_each = local.node_pools + for_each = merge(local.node_pools, local.windows_node_pools) byte_length = 2 prefix = format("%s-", lookup(each.value, "name")) keepers = merge( @@ -508,6 +508,7 @@ resource "random_id" "name" { resource "google_container_node_pool" "pools" { provider = google-beta for_each = local.node_pools + name = { for k, v in random_id.name : k => v.hex }[each.key] project = var.project_id location = local.location @@ -710,4 +711,201 @@ resource "google_container_node_pool" "pools" { update = lookup(var.timeouts, "update", "45m") delete = lookup(var.timeouts, "delete", "45m") } + +} +resource "google_container_node_pool" "windows_pools" { + provider = google-beta + for_each = local.windows_node_pools + + name = { for k, v in random_id.name : k => v.hex }[each.key] + project = var.project_id + location = local.location + // use node_locations if provided, defaults to cluster level node_locations if not specified + node_locations = lookup(each.value, "node_locations", "") != "" ? split(",", each.value["node_locations"]) : null + + cluster = google_container_cluster.primary.name + + version = lookup(each.value, "auto_upgrade", local.default_auto_upgrade) ? "" : lookup( + each.value, + "version", + google_container_cluster.primary.min_master_version, + ) + + initial_node_count = lookup(each.value, "autoscaling", true) ? lookup( + each.value, + "initial_node_count", + lookup(each.value, "min_count", 1) + ) : null + + max_pods_per_node = lookup(each.value, "max_pods_per_node", null) + + node_count = lookup(each.value, "autoscaling", true) ? null : lookup(each.value, "node_count", 1) + + dynamic "autoscaling" { + for_each = lookup(each.value, "autoscaling", true) ? [each.value] : [] + content { + min_node_count = lookup(autoscaling.value, "min_count", 1) + max_node_count = lookup(autoscaling.value, "max_count", 100) + } + } + + dynamic "placement_policy" { + for_each = length(lookup(each.value, "placement_policy", "")) > 0 ? [each.value] : [] + content { + type = lookup(placement_policy.value, "placement_policy", null) + } + } + + dynamic "network_config" { + for_each = length(lookup(each.value, "pod_range", "")) > 0 ? [each.value] : [] + content { + pod_range = lookup(network_config.value, "pod_range", null) + } + } + + management { + auto_repair = lookup(each.value, "auto_repair", true) + auto_upgrade = lookup(each.value, "auto_upgrade", local.default_auto_upgrade) + } + + upgrade_settings { + max_surge = lookup(each.value, "max_surge", 1) + max_unavailable = lookup(each.value, "max_unavailable", 0) + } + + node_config { + image_type = lookup(each.value, "image_type", "COS_CONTAINERD") + machine_type = lookup(each.value, "machine_type", "e2-medium") + min_cpu_platform = lookup(each.value, "min_cpu_platform", "") + dynamic "gcfs_config" { + for_each = lookup(each.value, "enable_gcfs", false) ? [true] : [] + content { + enabled = gcfs_config.value + } + } + dynamic "gvnic" { + for_each = lookup(each.value, "enable_gvnic", false) ? [true] : [] + content { + enabled = gvnic.value + } + } + labels = merge( + lookup(lookup(local.node_pools_labels, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(local.node_pools_labels, "default_values", {}), "node_pool", true) ? { "node_pool" = each.value["name"] } : {}, + local.node_pools_labels["all"], + local.node_pools_labels[each.value["name"]], + ) + metadata = merge( + lookup(lookup(local.node_pools_metadata, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(local.node_pools_metadata, "default_values", {}), "node_pool", true) ? { "node_pool" = each.value["name"] } : {}, + local.node_pools_metadata["all"], + local.node_pools_metadata[each.value["name"]], + { + "disable-legacy-endpoints" = var.disable_legacy_metadata_endpoints + }, + ) + dynamic "taint" { + for_each = concat( + local.node_pools_taints["all"], + local.node_pools_taints[each.value["name"]], + ) + content { + effect = taint.value.effect + key = taint.value.key + value = taint.value.value + } + } + tags = concat( + lookup(local.node_pools_tags, "default_values", [true, true])[0] ? [local.cluster_network_tag] : [], + lookup(local.node_pools_tags, "default_values", [true, true])[1] ? ["${local.cluster_network_tag}-${each.value["name"]}"] : [], + local.node_pools_tags["all"], + local.node_pools_tags[each.value["name"]], + ) + + local_ssd_count = lookup(each.value, "local_ssd_count", 0) + disk_size_gb = lookup(each.value, "disk_size_gb", 100) + disk_type = lookup(each.value, "disk_type", "pd-standard") + + dynamic "ephemeral_storage_config" { + for_each = lookup(each.value, "local_ssd_ephemeral_count", 0) > 0 ? [each.value.local_ssd_ephemeral_count] : [] + content { + local_ssd_count = ephemeral_storage_config.value + } + } + + service_account = lookup( + each.value, + "service_account", + local.service_account, + ) + preemptible = lookup(each.value, "preemptible", false) + spot = lookup(each.value, "spot", false) + + oauth_scopes = concat( + local.node_pools_oauth_scopes["all"], + local.node_pools_oauth_scopes[each.value["name"]], + ) + + guest_accelerator = [ + for guest_accelerator in lookup(each.value, "accelerator_count", 0) > 0 ? [{ + type = lookup(each.value, "accelerator_type", "") + count = lookup(each.value, "accelerator_count", 0) + gpu_partition_size = lookup(each.value, "gpu_partition_size", null) + }] : [] : { + type = guest_accelerator["type"] + count = guest_accelerator["count"] + gpu_partition_size = guest_accelerator["gpu_partition_size"] + } + ] + + dynamic "workload_metadata_config" { + for_each = local.cluster_node_metadata_config + + content { + mode = lookup(each.value, "node_metadata", workload_metadata_config.value.mode) + } + } + + dynamic "sandbox_config" { + for_each = tobool((lookup(each.value, "sandbox_enabled", var.sandbox_enabled))) ? ["gvisor"] : [] + content { + sandbox_type = sandbox_config.value + } + } + + boot_disk_kms_key = lookup(each.value, "boot_disk_kms_key", "") + + dynamic "kubelet_config" { + for_each = length(setintersection( + keys(each.value), + ["cpu_manager_policy", "cpu_cfs_quota", "cpu_cfs_quota_period"] + )) != 0 ? [1] : [] + + content { + cpu_manager_policy = lookup(each.value, "cpu_manager_policy", "static") + cpu_cfs_quota = lookup(each.value, "cpu_cfs_quota", null) + cpu_cfs_quota_period = lookup(each.value, "cpu_cfs_quota_period", null) + } + } + + + shielded_instance_config { + enable_secure_boot = lookup(each.value, "enable_secure_boot", false) + enable_integrity_monitoring = lookup(each.value, "enable_integrity_monitoring", true) + } + } + + lifecycle { + ignore_changes = [initial_node_count] + + create_before_destroy = true + } + + timeouts { + create = lookup(var.timeouts, "create", "45m") + update = lookup(var.timeouts, "update", "45m") + delete = lookup(var.timeouts, "delete", "45m") + } + + depends_on = [google_container_node_pool.pools[0]] } diff --git a/modules/beta-private-cluster-update-variant/main.tf b/modules/beta-private-cluster-update-variant/main.tf index 1a56147441..48a24cd483 100644 --- a/modules/beta-private-cluster-update-variant/main.tf +++ b/modules/beta-private-cluster-update-variant/main.tf @@ -45,8 +45,10 @@ locals { master_version_zonal = var.kubernetes_version != "latest" ? var.kubernetes_version : data.google_container_engine_versions.zone.latest_master_version master_version = var.regional ? local.master_version_regional : local.master_version_zonal // Build a map of maps of node pools from a list of objects - node_pool_names = [for np in toset(var.node_pools) : np.name] - node_pools = zipmap(local.node_pool_names, tolist(toset(var.node_pools))) + node_pool_names = [for np in toset(var.node_pools) : np.name] + node_pools = zipmap(local.node_pool_names, tolist(toset(var.node_pools))) + windows_node_pool_names = [for np in toset(var.windows_node_pools) : np.name] + windows_node_pools = zipmap(local.windows_node_pool_names, tolist(toset(var.windows_node_pools))) release_channel = var.release_channel != null ? [{ channel : var.release_channel }] : [] @@ -138,8 +140,15 @@ locals { cidr_blocks : var.master_authorized_networks }] - cluster_output_node_pools_names = concat([for np in google_container_node_pool.pools : np.name], [""]) - cluster_output_node_pools_versions = { for np in google_container_node_pool.pools : np.name => np.version } + cluster_output_node_pools_names = concat( + [for np in google_container_node_pool.pools : np.name], [""], + [for np in google_container_node_pool.windows_pools : np.name], [""] + ) + + cluster_output_node_pools_versions = merge( + { for np in google_container_node_pool.pools : np.name => np.version }, + { for np in google_container_node_pool.windows_pools : np.name => np.version }, + ) cluster_master_auth_list_layer1 = local.cluster_output_master_auth cluster_master_auth_list_layer2 = local.cluster_master_auth_list_layer1[0] diff --git a/modules/beta-private-cluster-update-variant/variables.tf b/modules/beta-private-cluster-update-variant/variables.tf index 94ef3de293..6fb862ef97 100644 --- a/modules/beta-private-cluster-update-variant/variables.tf +++ b/modules/beta-private-cluster-update-variant/variables.tf @@ -147,6 +147,12 @@ variable "node_pools" { ] } +variable "windows_node_pools" { + type = list(map(string)) + description = "List of maps containing Windows node pools" + default = [] +} + variable "node_pools_labels" { type = map(map(string)) description = "Map of maps containing node labels by node-pool name" diff --git a/modules/beta-private-cluster-update-variant/variables_defaults.tf b/modules/beta-private-cluster-update-variant/variables_defaults.tf index ee5d60e6c7..cc65ac9e8b 100644 --- a/modules/beta-private-cluster-update-variant/variables_defaults.tf +++ b/modules/beta-private-cluster-update-variant/variables_defaults.tf @@ -27,6 +27,10 @@ locals { [for node_pool in var.node_pools : node_pool["name"]], [for node_pool in var.node_pools : {}] ), + zipmap( + [for node_pool in var.windows_node_pools : node_pool["name"]], + [for node_pool in var.windows_node_pools : {}] + ), var.node_pools_labels ) @@ -37,6 +41,10 @@ locals { [for node_pool in var.node_pools : node_pool["name"]], [for node_pool in var.node_pools : {}] ), + zipmap( + [for node_pool in var.windows_node_pools : node_pool["name"]], + [for node_pool in var.windows_node_pools : {}] + ), var.node_pools_metadata ) @@ -47,6 +55,10 @@ locals { [for node_pool in var.node_pools : node_pool["name"]], [for node_pool in var.node_pools : []] ), + zipmap( + [for node_pool in var.windows_node_pools : node_pool["name"]], + [for node_pool in var.windows_node_pools : []] + ), var.node_pools_taints ) @@ -57,6 +69,10 @@ locals { [for node_pool in var.node_pools : node_pool["name"]], [for node_pool in var.node_pools : []] ), + zipmap( + [for node_pool in var.windows_node_pools : node_pool["name"]], + [for node_pool in var.windows_node_pools : []] + ), var.node_pools_tags ) @@ -67,6 +83,10 @@ locals { [for node_pool in var.node_pools : node_pool["name"]], [for node_pool in var.node_pools : []] ), + zipmap( + [for node_pool in var.windows_node_pools : node_pool["name"]], + [for node_pool in var.windows_node_pools : []] + ), var.node_pools_oauth_scopes ) diff --git a/modules/beta-private-cluster/README.md b/modules/beta-private-cluster/README.md index cf3fe7381c..3e40ef386c 100644 --- a/modules/beta-private-cluster/README.md +++ b/modules/beta-private-cluster/README.md @@ -236,6 +236,7 @@ Then perform the following commands on the root folder: | subnetwork | The subnetwork to host the cluster in (required) | `string` | n/a | yes | | timeouts | Timeout for cluster operations. | `map(string)` | `{}` | no | | upstream\_nameservers | If specified, the values replace the nameservers taken by default from the node’s /etc/resolv.conf | `list(string)` | `[]` | no | +| windows\_node\_pools | List of maps containing Windows node pools | `list(map(string))` | `[]` | no | | zones | The zones to host the cluster in (optional if regional cluster / required if zonal) | `list(string)` | `[]` | no | ## Outputs @@ -278,6 +279,9 @@ Then perform the following commands on the root folder: ## node_pools variable + +> Use this variable for provisioning linux based node pools. For Windows based node pools use [windows_node_pools](#windows\_node\_pools-variable) + The node_pools variable takes the following parameters: | Name | Description | Default | Requirement | @@ -324,6 +328,11 @@ The node_pools variable takes the following parameters: | tags | The list of instance tags applied to all nodes | | Required | | value | The value for the taint | | Required | | version | The Kubernetes version for the nodes in this pool. Should only be set if auto_upgrade is false | " " | Optional | + +## windows_node_pools variable +The windows_node_pools variable takes the same parameters as [node_pools](#node\_pools-variable) but is reserved for provisioning Windows based node pools only. This variable is introduced to satisfy a [specific requirement](https://cloud.google.com/kubernetes-engine/docs/how-to/creating-a-cluster-windows#create_a_cluster_and_node_pools) for the presence of at least one linux based node pool in the cluster before a windows based node pool can be created. + + ## Requirements Before this module can be used on a project, you must ensure that the following pre-requisites are fulfilled: diff --git a/modules/beta-private-cluster/cluster.tf b/modules/beta-private-cluster/cluster.tf index 37a9b6b8ef..2c12b36606 100644 --- a/modules/beta-private-cluster/cluster.tf +++ b/modules/beta-private-cluster/cluster.tf @@ -417,6 +417,7 @@ resource "google_container_cluster" "primary" { resource "google_container_node_pool" "pools" { provider = google-beta for_each = local.node_pools + name = each.key project = var.project_id location = local.location @@ -618,4 +619,200 @@ resource "google_container_node_pool" "pools" { update = lookup(var.timeouts, "update", "45m") delete = lookup(var.timeouts, "delete", "45m") } + +} +resource "google_container_node_pool" "windows_pools" { + provider = google-beta + for_each = local.windows_node_pools + + name = each.key + project = var.project_id + location = local.location + // use node_locations if provided, defaults to cluster level node_locations if not specified + node_locations = lookup(each.value, "node_locations", "") != "" ? split(",", each.value["node_locations"]) : null + + cluster = google_container_cluster.primary.name + + version = lookup(each.value, "auto_upgrade", local.default_auto_upgrade) ? "" : lookup( + each.value, + "version", + google_container_cluster.primary.min_master_version, + ) + + initial_node_count = lookup(each.value, "autoscaling", true) ? lookup( + each.value, + "initial_node_count", + lookup(each.value, "min_count", 1) + ) : null + + max_pods_per_node = lookup(each.value, "max_pods_per_node", null) + + node_count = lookup(each.value, "autoscaling", true) ? null : lookup(each.value, "node_count", 1) + + dynamic "autoscaling" { + for_each = lookup(each.value, "autoscaling", true) ? [each.value] : [] + content { + min_node_count = lookup(autoscaling.value, "min_count", 1) + max_node_count = lookup(autoscaling.value, "max_count", 100) + } + } + + dynamic "placement_policy" { + for_each = length(lookup(each.value, "placement_policy", "")) > 0 ? [each.value] : [] + content { + type = lookup(placement_policy.value, "placement_policy", null) + } + } + + dynamic "network_config" { + for_each = length(lookup(each.value, "pod_range", "")) > 0 ? [each.value] : [] + content { + pod_range = lookup(network_config.value, "pod_range", null) + } + } + + management { + auto_repair = lookup(each.value, "auto_repair", true) + auto_upgrade = lookup(each.value, "auto_upgrade", local.default_auto_upgrade) + } + + upgrade_settings { + max_surge = lookup(each.value, "max_surge", 1) + max_unavailable = lookup(each.value, "max_unavailable", 0) + } + + node_config { + image_type = lookup(each.value, "image_type", "COS_CONTAINERD") + machine_type = lookup(each.value, "machine_type", "e2-medium") + min_cpu_platform = lookup(each.value, "min_cpu_platform", "") + dynamic "gcfs_config" { + for_each = lookup(each.value, "enable_gcfs", false) ? [true] : [] + content { + enabled = gcfs_config.value + } + } + dynamic "gvnic" { + for_each = lookup(each.value, "enable_gvnic", false) ? [true] : [] + content { + enabled = gvnic.value + } + } + labels = merge( + lookup(lookup(local.node_pools_labels, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(local.node_pools_labels, "default_values", {}), "node_pool", true) ? { "node_pool" = each.value["name"] } : {}, + local.node_pools_labels["all"], + local.node_pools_labels[each.value["name"]], + ) + metadata = merge( + lookup(lookup(local.node_pools_metadata, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(local.node_pools_metadata, "default_values", {}), "node_pool", true) ? { "node_pool" = each.value["name"] } : {}, + local.node_pools_metadata["all"], + local.node_pools_metadata[each.value["name"]], + { + "disable-legacy-endpoints" = var.disable_legacy_metadata_endpoints + }, + ) + dynamic "taint" { + for_each = concat( + local.node_pools_taints["all"], + local.node_pools_taints[each.value["name"]], + ) + content { + effect = taint.value.effect + key = taint.value.key + value = taint.value.value + } + } + tags = concat( + lookup(local.node_pools_tags, "default_values", [true, true])[0] ? [local.cluster_network_tag] : [], + lookup(local.node_pools_tags, "default_values", [true, true])[1] ? ["${local.cluster_network_tag}-${each.value["name"]}"] : [], + local.node_pools_tags["all"], + local.node_pools_tags[each.value["name"]], + ) + + local_ssd_count = lookup(each.value, "local_ssd_count", 0) + disk_size_gb = lookup(each.value, "disk_size_gb", 100) + disk_type = lookup(each.value, "disk_type", "pd-standard") + + dynamic "ephemeral_storage_config" { + for_each = lookup(each.value, "local_ssd_ephemeral_count", 0) > 0 ? [each.value.local_ssd_ephemeral_count] : [] + content { + local_ssd_count = ephemeral_storage_config.value + } + } + + service_account = lookup( + each.value, + "service_account", + local.service_account, + ) + preemptible = lookup(each.value, "preemptible", false) + spot = lookup(each.value, "spot", false) + + oauth_scopes = concat( + local.node_pools_oauth_scopes["all"], + local.node_pools_oauth_scopes[each.value["name"]], + ) + + guest_accelerator = [ + for guest_accelerator in lookup(each.value, "accelerator_count", 0) > 0 ? [{ + type = lookup(each.value, "accelerator_type", "") + count = lookup(each.value, "accelerator_count", 0) + gpu_partition_size = lookup(each.value, "gpu_partition_size", null) + }] : [] : { + type = guest_accelerator["type"] + count = guest_accelerator["count"] + gpu_partition_size = guest_accelerator["gpu_partition_size"] + } + ] + + dynamic "workload_metadata_config" { + for_each = local.cluster_node_metadata_config + + content { + mode = lookup(each.value, "node_metadata", workload_metadata_config.value.mode) + } + } + + dynamic "sandbox_config" { + for_each = tobool((lookup(each.value, "sandbox_enabled", var.sandbox_enabled))) ? ["gvisor"] : [] + content { + sandbox_type = sandbox_config.value + } + } + + boot_disk_kms_key = lookup(each.value, "boot_disk_kms_key", "") + + dynamic "kubelet_config" { + for_each = length(setintersection( + keys(each.value), + ["cpu_manager_policy", "cpu_cfs_quota", "cpu_cfs_quota_period"] + )) != 0 ? [1] : [] + + content { + cpu_manager_policy = lookup(each.value, "cpu_manager_policy", "static") + cpu_cfs_quota = lookup(each.value, "cpu_cfs_quota", null) + cpu_cfs_quota_period = lookup(each.value, "cpu_cfs_quota_period", null) + } + } + + + shielded_instance_config { + enable_secure_boot = lookup(each.value, "enable_secure_boot", false) + enable_integrity_monitoring = lookup(each.value, "enable_integrity_monitoring", true) + } + } + + lifecycle { + ignore_changes = [initial_node_count] + + } + + timeouts { + create = lookup(var.timeouts, "create", "45m") + update = lookup(var.timeouts, "update", "45m") + delete = lookup(var.timeouts, "delete", "45m") + } + + depends_on = [google_container_node_pool.pools[0]] } diff --git a/modules/beta-private-cluster/main.tf b/modules/beta-private-cluster/main.tf index 1a56147441..48a24cd483 100644 --- a/modules/beta-private-cluster/main.tf +++ b/modules/beta-private-cluster/main.tf @@ -45,8 +45,10 @@ locals { master_version_zonal = var.kubernetes_version != "latest" ? var.kubernetes_version : data.google_container_engine_versions.zone.latest_master_version master_version = var.regional ? local.master_version_regional : local.master_version_zonal // Build a map of maps of node pools from a list of objects - node_pool_names = [for np in toset(var.node_pools) : np.name] - node_pools = zipmap(local.node_pool_names, tolist(toset(var.node_pools))) + node_pool_names = [for np in toset(var.node_pools) : np.name] + node_pools = zipmap(local.node_pool_names, tolist(toset(var.node_pools))) + windows_node_pool_names = [for np in toset(var.windows_node_pools) : np.name] + windows_node_pools = zipmap(local.windows_node_pool_names, tolist(toset(var.windows_node_pools))) release_channel = var.release_channel != null ? [{ channel : var.release_channel }] : [] @@ -138,8 +140,15 @@ locals { cidr_blocks : var.master_authorized_networks }] - cluster_output_node_pools_names = concat([for np in google_container_node_pool.pools : np.name], [""]) - cluster_output_node_pools_versions = { for np in google_container_node_pool.pools : np.name => np.version } + cluster_output_node_pools_names = concat( + [for np in google_container_node_pool.pools : np.name], [""], + [for np in google_container_node_pool.windows_pools : np.name], [""] + ) + + cluster_output_node_pools_versions = merge( + { for np in google_container_node_pool.pools : np.name => np.version }, + { for np in google_container_node_pool.windows_pools : np.name => np.version }, + ) cluster_master_auth_list_layer1 = local.cluster_output_master_auth cluster_master_auth_list_layer2 = local.cluster_master_auth_list_layer1[0] diff --git a/modules/beta-private-cluster/variables.tf b/modules/beta-private-cluster/variables.tf index 94ef3de293..6fb862ef97 100644 --- a/modules/beta-private-cluster/variables.tf +++ b/modules/beta-private-cluster/variables.tf @@ -147,6 +147,12 @@ variable "node_pools" { ] } +variable "windows_node_pools" { + type = list(map(string)) + description = "List of maps containing Windows node pools" + default = [] +} + variable "node_pools_labels" { type = map(map(string)) description = "Map of maps containing node labels by node-pool name" diff --git a/modules/beta-private-cluster/variables_defaults.tf b/modules/beta-private-cluster/variables_defaults.tf index ee5d60e6c7..cc65ac9e8b 100644 --- a/modules/beta-private-cluster/variables_defaults.tf +++ b/modules/beta-private-cluster/variables_defaults.tf @@ -27,6 +27,10 @@ locals { [for node_pool in var.node_pools : node_pool["name"]], [for node_pool in var.node_pools : {}] ), + zipmap( + [for node_pool in var.windows_node_pools : node_pool["name"]], + [for node_pool in var.windows_node_pools : {}] + ), var.node_pools_labels ) @@ -37,6 +41,10 @@ locals { [for node_pool in var.node_pools : node_pool["name"]], [for node_pool in var.node_pools : {}] ), + zipmap( + [for node_pool in var.windows_node_pools : node_pool["name"]], + [for node_pool in var.windows_node_pools : {}] + ), var.node_pools_metadata ) @@ -47,6 +55,10 @@ locals { [for node_pool in var.node_pools : node_pool["name"]], [for node_pool in var.node_pools : []] ), + zipmap( + [for node_pool in var.windows_node_pools : node_pool["name"]], + [for node_pool in var.windows_node_pools : []] + ), var.node_pools_taints ) @@ -57,6 +69,10 @@ locals { [for node_pool in var.node_pools : node_pool["name"]], [for node_pool in var.node_pools : []] ), + zipmap( + [for node_pool in var.windows_node_pools : node_pool["name"]], + [for node_pool in var.windows_node_pools : []] + ), var.node_pools_tags ) @@ -67,6 +83,10 @@ locals { [for node_pool in var.node_pools : node_pool["name"]], [for node_pool in var.node_pools : []] ), + zipmap( + [for node_pool in var.windows_node_pools : node_pool["name"]], + [for node_pool in var.windows_node_pools : []] + ), var.node_pools_oauth_scopes ) diff --git a/modules/beta-public-cluster-update-variant/README.md b/modules/beta-public-cluster-update-variant/README.md index bc6a5bdfcd..c384129607 100644 --- a/modules/beta-public-cluster-update-variant/README.md +++ b/modules/beta-public-cluster-update-variant/README.md @@ -247,6 +247,7 @@ Then perform the following commands on the root folder: | subnetwork | The subnetwork to host the cluster in (required) | `string` | n/a | yes | | timeouts | Timeout for cluster operations. | `map(string)` | `{}` | no | | upstream\_nameservers | If specified, the values replace the nameservers taken by default from the node’s /etc/resolv.conf | `list(string)` | `[]` | no | +| windows\_node\_pools | List of maps containing Windows node pools | `list(map(string))` | `[]` | no | | zones | The zones to host the cluster in (optional if regional cluster / required if zonal) | `list(string)` | `[]` | no | ## Outputs @@ -287,6 +288,9 @@ Then perform the following commands on the root folder: ## node_pools variable + +> Use this variable for provisioning linux based node pools. For Windows based node pools use [windows_node_pools](#windows\_node\_pools-variable) + The node_pools variable takes the following parameters: | Name | Description | Default | Requirement | @@ -333,6 +337,11 @@ The node_pools variable takes the following parameters: | tags | The list of instance tags applied to all nodes | | Required | | value | The value for the taint | | Required | | version | The Kubernetes version for the nodes in this pool. Should only be set if auto_upgrade is false | " " | Optional | + +## windows_node_pools variable +The windows_node_pools variable takes the same parameters as [node_pools](#node\_pools-variable) but is reserved for provisioning Windows based node pools only. This variable is introduced to satisfy a [specific requirement](https://cloud.google.com/kubernetes-engine/docs/how-to/creating-a-cluster-windows#create_a_cluster_and_node_pools) for the presence of at least one linux based node pool in the cluster before a windows based node pool can be created. + + ## Requirements Before this module can be used on a project, you must ensure that the following pre-requisites are fulfilled: diff --git a/modules/beta-public-cluster-update-variant/cluster.tf b/modules/beta-public-cluster-update-variant/cluster.tf index 68ef8dad72..908e346eb0 100644 --- a/modules/beta-public-cluster-update-variant/cluster.tf +++ b/modules/beta-public-cluster-update-variant/cluster.tf @@ -419,7 +419,7 @@ locals { # resources where "ForceNew" is "true". schemaNodeConfig can be found in node_config.go at # https://github.com/terraform-providers/terraform-provider-google/blob/master/google/node_config.go#L22 resource "random_id" "name" { - for_each = local.node_pools + for_each = merge(local.node_pools, local.windows_node_pools) byte_length = 2 prefix = format("%s-", lookup(each.value, "name")) keepers = merge( @@ -489,6 +489,7 @@ resource "random_id" "name" { resource "google_container_node_pool" "pools" { provider = google-beta for_each = local.node_pools + name = { for k, v in random_id.name : k => v.hex }[each.key] project = var.project_id location = local.location @@ -691,4 +692,201 @@ resource "google_container_node_pool" "pools" { update = lookup(var.timeouts, "update", "45m") delete = lookup(var.timeouts, "delete", "45m") } + +} +resource "google_container_node_pool" "windows_pools" { + provider = google-beta + for_each = local.windows_node_pools + + name = { for k, v in random_id.name : k => v.hex }[each.key] + project = var.project_id + location = local.location + // use node_locations if provided, defaults to cluster level node_locations if not specified + node_locations = lookup(each.value, "node_locations", "") != "" ? split(",", each.value["node_locations"]) : null + + cluster = google_container_cluster.primary.name + + version = lookup(each.value, "auto_upgrade", local.default_auto_upgrade) ? "" : lookup( + each.value, + "version", + google_container_cluster.primary.min_master_version, + ) + + initial_node_count = lookup(each.value, "autoscaling", true) ? lookup( + each.value, + "initial_node_count", + lookup(each.value, "min_count", 1) + ) : null + + max_pods_per_node = lookup(each.value, "max_pods_per_node", null) + + node_count = lookup(each.value, "autoscaling", true) ? null : lookup(each.value, "node_count", 1) + + dynamic "autoscaling" { + for_each = lookup(each.value, "autoscaling", true) ? [each.value] : [] + content { + min_node_count = lookup(autoscaling.value, "min_count", 1) + max_node_count = lookup(autoscaling.value, "max_count", 100) + } + } + + dynamic "placement_policy" { + for_each = length(lookup(each.value, "placement_policy", "")) > 0 ? [each.value] : [] + content { + type = lookup(placement_policy.value, "placement_policy", null) + } + } + + dynamic "network_config" { + for_each = length(lookup(each.value, "pod_range", "")) > 0 ? [each.value] : [] + content { + pod_range = lookup(network_config.value, "pod_range", null) + } + } + + management { + auto_repair = lookup(each.value, "auto_repair", true) + auto_upgrade = lookup(each.value, "auto_upgrade", local.default_auto_upgrade) + } + + upgrade_settings { + max_surge = lookup(each.value, "max_surge", 1) + max_unavailable = lookup(each.value, "max_unavailable", 0) + } + + node_config { + image_type = lookup(each.value, "image_type", "COS_CONTAINERD") + machine_type = lookup(each.value, "machine_type", "e2-medium") + min_cpu_platform = lookup(each.value, "min_cpu_platform", "") + dynamic "gcfs_config" { + for_each = lookup(each.value, "enable_gcfs", false) ? [true] : [] + content { + enabled = gcfs_config.value + } + } + dynamic "gvnic" { + for_each = lookup(each.value, "enable_gvnic", false) ? [true] : [] + content { + enabled = gvnic.value + } + } + labels = merge( + lookup(lookup(local.node_pools_labels, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(local.node_pools_labels, "default_values", {}), "node_pool", true) ? { "node_pool" = each.value["name"] } : {}, + local.node_pools_labels["all"], + local.node_pools_labels[each.value["name"]], + ) + metadata = merge( + lookup(lookup(local.node_pools_metadata, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(local.node_pools_metadata, "default_values", {}), "node_pool", true) ? { "node_pool" = each.value["name"] } : {}, + local.node_pools_metadata["all"], + local.node_pools_metadata[each.value["name"]], + { + "disable-legacy-endpoints" = var.disable_legacy_metadata_endpoints + }, + ) + dynamic "taint" { + for_each = concat( + local.node_pools_taints["all"], + local.node_pools_taints[each.value["name"]], + ) + content { + effect = taint.value.effect + key = taint.value.key + value = taint.value.value + } + } + tags = concat( + lookup(local.node_pools_tags, "default_values", [true, true])[0] ? [local.cluster_network_tag] : [], + lookup(local.node_pools_tags, "default_values", [true, true])[1] ? ["${local.cluster_network_tag}-${each.value["name"]}"] : [], + local.node_pools_tags["all"], + local.node_pools_tags[each.value["name"]], + ) + + local_ssd_count = lookup(each.value, "local_ssd_count", 0) + disk_size_gb = lookup(each.value, "disk_size_gb", 100) + disk_type = lookup(each.value, "disk_type", "pd-standard") + + dynamic "ephemeral_storage_config" { + for_each = lookup(each.value, "local_ssd_ephemeral_count", 0) > 0 ? [each.value.local_ssd_ephemeral_count] : [] + content { + local_ssd_count = ephemeral_storage_config.value + } + } + + service_account = lookup( + each.value, + "service_account", + local.service_account, + ) + preemptible = lookup(each.value, "preemptible", false) + spot = lookup(each.value, "spot", false) + + oauth_scopes = concat( + local.node_pools_oauth_scopes["all"], + local.node_pools_oauth_scopes[each.value["name"]], + ) + + guest_accelerator = [ + for guest_accelerator in lookup(each.value, "accelerator_count", 0) > 0 ? [{ + type = lookup(each.value, "accelerator_type", "") + count = lookup(each.value, "accelerator_count", 0) + gpu_partition_size = lookup(each.value, "gpu_partition_size", null) + }] : [] : { + type = guest_accelerator["type"] + count = guest_accelerator["count"] + gpu_partition_size = guest_accelerator["gpu_partition_size"] + } + ] + + dynamic "workload_metadata_config" { + for_each = local.cluster_node_metadata_config + + content { + mode = lookup(each.value, "node_metadata", workload_metadata_config.value.mode) + } + } + + dynamic "sandbox_config" { + for_each = tobool((lookup(each.value, "sandbox_enabled", var.sandbox_enabled))) ? ["gvisor"] : [] + content { + sandbox_type = sandbox_config.value + } + } + + boot_disk_kms_key = lookup(each.value, "boot_disk_kms_key", "") + + dynamic "kubelet_config" { + for_each = length(setintersection( + keys(each.value), + ["cpu_manager_policy", "cpu_cfs_quota", "cpu_cfs_quota_period"] + )) != 0 ? [1] : [] + + content { + cpu_manager_policy = lookup(each.value, "cpu_manager_policy", "static") + cpu_cfs_quota = lookup(each.value, "cpu_cfs_quota", null) + cpu_cfs_quota_period = lookup(each.value, "cpu_cfs_quota_period", null) + } + } + + + shielded_instance_config { + enable_secure_boot = lookup(each.value, "enable_secure_boot", false) + enable_integrity_monitoring = lookup(each.value, "enable_integrity_monitoring", true) + } + } + + lifecycle { + ignore_changes = [initial_node_count] + + create_before_destroy = true + } + + timeouts { + create = lookup(var.timeouts, "create", "45m") + update = lookup(var.timeouts, "update", "45m") + delete = lookup(var.timeouts, "delete", "45m") + } + + depends_on = [google_container_node_pool.pools[0]] } diff --git a/modules/beta-public-cluster-update-variant/main.tf b/modules/beta-public-cluster-update-variant/main.tf index 6c48ce14a6..b0e1993666 100644 --- a/modules/beta-public-cluster-update-variant/main.tf +++ b/modules/beta-public-cluster-update-variant/main.tf @@ -45,8 +45,10 @@ locals { master_version_zonal = var.kubernetes_version != "latest" ? var.kubernetes_version : data.google_container_engine_versions.zone.latest_master_version master_version = var.regional ? local.master_version_regional : local.master_version_zonal // Build a map of maps of node pools from a list of objects - node_pool_names = [for np in toset(var.node_pools) : np.name] - node_pools = zipmap(local.node_pool_names, tolist(toset(var.node_pools))) + node_pool_names = [for np in toset(var.node_pools) : np.name] + node_pools = zipmap(local.node_pool_names, tolist(toset(var.node_pools))) + windows_node_pool_names = [for np in toset(var.windows_node_pools) : np.name] + windows_node_pools = zipmap(local.windows_node_pool_names, tolist(toset(var.windows_node_pools))) release_channel = var.release_channel != null ? [{ channel : var.release_channel }] : [] @@ -137,8 +139,15 @@ locals { cidr_blocks : var.master_authorized_networks }] - cluster_output_node_pools_names = concat([for np in google_container_node_pool.pools : np.name], [""]) - cluster_output_node_pools_versions = { for np in google_container_node_pool.pools : np.name => np.version } + cluster_output_node_pools_names = concat( + [for np in google_container_node_pool.pools : np.name], [""], + [for np in google_container_node_pool.windows_pools : np.name], [""] + ) + + cluster_output_node_pools_versions = merge( + { for np in google_container_node_pool.pools : np.name => np.version }, + { for np in google_container_node_pool.windows_pools : np.name => np.version }, + ) cluster_master_auth_list_layer1 = local.cluster_output_master_auth cluster_master_auth_list_layer2 = local.cluster_master_auth_list_layer1[0] diff --git a/modules/beta-public-cluster-update-variant/variables.tf b/modules/beta-public-cluster-update-variant/variables.tf index 1a88cb0192..6b0dbdd42b 100644 --- a/modules/beta-public-cluster-update-variant/variables.tf +++ b/modules/beta-public-cluster-update-variant/variables.tf @@ -147,6 +147,12 @@ variable "node_pools" { ] } +variable "windows_node_pools" { + type = list(map(string)) + description = "List of maps containing Windows node pools" + default = [] +} + variable "node_pools_labels" { type = map(map(string)) description = "Map of maps containing node labels by node-pool name" diff --git a/modules/beta-public-cluster-update-variant/variables_defaults.tf b/modules/beta-public-cluster-update-variant/variables_defaults.tf index ee5d60e6c7..cc65ac9e8b 100644 --- a/modules/beta-public-cluster-update-variant/variables_defaults.tf +++ b/modules/beta-public-cluster-update-variant/variables_defaults.tf @@ -27,6 +27,10 @@ locals { [for node_pool in var.node_pools : node_pool["name"]], [for node_pool in var.node_pools : {}] ), + zipmap( + [for node_pool in var.windows_node_pools : node_pool["name"]], + [for node_pool in var.windows_node_pools : {}] + ), var.node_pools_labels ) @@ -37,6 +41,10 @@ locals { [for node_pool in var.node_pools : node_pool["name"]], [for node_pool in var.node_pools : {}] ), + zipmap( + [for node_pool in var.windows_node_pools : node_pool["name"]], + [for node_pool in var.windows_node_pools : {}] + ), var.node_pools_metadata ) @@ -47,6 +55,10 @@ locals { [for node_pool in var.node_pools : node_pool["name"]], [for node_pool in var.node_pools : []] ), + zipmap( + [for node_pool in var.windows_node_pools : node_pool["name"]], + [for node_pool in var.windows_node_pools : []] + ), var.node_pools_taints ) @@ -57,6 +69,10 @@ locals { [for node_pool in var.node_pools : node_pool["name"]], [for node_pool in var.node_pools : []] ), + zipmap( + [for node_pool in var.windows_node_pools : node_pool["name"]], + [for node_pool in var.windows_node_pools : []] + ), var.node_pools_tags ) @@ -67,6 +83,10 @@ locals { [for node_pool in var.node_pools : node_pool["name"]], [for node_pool in var.node_pools : []] ), + zipmap( + [for node_pool in var.windows_node_pools : node_pool["name"]], + [for node_pool in var.windows_node_pools : []] + ), var.node_pools_oauth_scopes ) diff --git a/modules/beta-public-cluster/README.md b/modules/beta-public-cluster/README.md index a2d47544ac..ac5ddfa7bd 100644 --- a/modules/beta-public-cluster/README.md +++ b/modules/beta-public-cluster/README.md @@ -225,6 +225,7 @@ Then perform the following commands on the root folder: | subnetwork | The subnetwork to host the cluster in (required) | `string` | n/a | yes | | timeouts | Timeout for cluster operations. | `map(string)` | `{}` | no | | upstream\_nameservers | If specified, the values replace the nameservers taken by default from the node’s /etc/resolv.conf | `list(string)` | `[]` | no | +| windows\_node\_pools | List of maps containing Windows node pools | `list(map(string))` | `[]` | no | | zones | The zones to host the cluster in (optional if regional cluster / required if zonal) | `list(string)` | `[]` | no | ## Outputs @@ -265,6 +266,9 @@ Then perform the following commands on the root folder: ## node_pools variable + +> Use this variable for provisioning linux based node pools. For Windows based node pools use [windows_node_pools](#windows\_node\_pools-variable) + The node_pools variable takes the following parameters: | Name | Description | Default | Requirement | @@ -311,6 +315,11 @@ The node_pools variable takes the following parameters: | tags | The list of instance tags applied to all nodes | | Required | | value | The value for the taint | | Required | | version | The Kubernetes version for the nodes in this pool. Should only be set if auto_upgrade is false | " " | Optional | + +## windows_node_pools variable +The windows_node_pools variable takes the same parameters as [node_pools](#node\_pools-variable) but is reserved for provisioning Windows based node pools only. This variable is introduced to satisfy a [specific requirement](https://cloud.google.com/kubernetes-engine/docs/how-to/creating-a-cluster-windows#create_a_cluster_and_node_pools) for the presence of at least one linux based node pool in the cluster before a windows based node pool can be created. + + ## Requirements Before this module can be used on a project, you must ensure that the following pre-requisites are fulfilled: diff --git a/modules/beta-public-cluster/cluster.tf b/modules/beta-public-cluster/cluster.tf index 345aa6013f..e87bc9603f 100644 --- a/modules/beta-public-cluster/cluster.tf +++ b/modules/beta-public-cluster/cluster.tf @@ -398,6 +398,7 @@ resource "google_container_cluster" "primary" { resource "google_container_node_pool" "pools" { provider = google-beta for_each = local.node_pools + name = each.key project = var.project_id location = local.location @@ -599,4 +600,200 @@ resource "google_container_node_pool" "pools" { update = lookup(var.timeouts, "update", "45m") delete = lookup(var.timeouts, "delete", "45m") } + +} +resource "google_container_node_pool" "windows_pools" { + provider = google-beta + for_each = local.windows_node_pools + + name = each.key + project = var.project_id + location = local.location + // use node_locations if provided, defaults to cluster level node_locations if not specified + node_locations = lookup(each.value, "node_locations", "") != "" ? split(",", each.value["node_locations"]) : null + + cluster = google_container_cluster.primary.name + + version = lookup(each.value, "auto_upgrade", local.default_auto_upgrade) ? "" : lookup( + each.value, + "version", + google_container_cluster.primary.min_master_version, + ) + + initial_node_count = lookup(each.value, "autoscaling", true) ? lookup( + each.value, + "initial_node_count", + lookup(each.value, "min_count", 1) + ) : null + + max_pods_per_node = lookup(each.value, "max_pods_per_node", null) + + node_count = lookup(each.value, "autoscaling", true) ? null : lookup(each.value, "node_count", 1) + + dynamic "autoscaling" { + for_each = lookup(each.value, "autoscaling", true) ? [each.value] : [] + content { + min_node_count = lookup(autoscaling.value, "min_count", 1) + max_node_count = lookup(autoscaling.value, "max_count", 100) + } + } + + dynamic "placement_policy" { + for_each = length(lookup(each.value, "placement_policy", "")) > 0 ? [each.value] : [] + content { + type = lookup(placement_policy.value, "placement_policy", null) + } + } + + dynamic "network_config" { + for_each = length(lookup(each.value, "pod_range", "")) > 0 ? [each.value] : [] + content { + pod_range = lookup(network_config.value, "pod_range", null) + } + } + + management { + auto_repair = lookup(each.value, "auto_repair", true) + auto_upgrade = lookup(each.value, "auto_upgrade", local.default_auto_upgrade) + } + + upgrade_settings { + max_surge = lookup(each.value, "max_surge", 1) + max_unavailable = lookup(each.value, "max_unavailable", 0) + } + + node_config { + image_type = lookup(each.value, "image_type", "COS_CONTAINERD") + machine_type = lookup(each.value, "machine_type", "e2-medium") + min_cpu_platform = lookup(each.value, "min_cpu_platform", "") + dynamic "gcfs_config" { + for_each = lookup(each.value, "enable_gcfs", false) ? [true] : [] + content { + enabled = gcfs_config.value + } + } + dynamic "gvnic" { + for_each = lookup(each.value, "enable_gvnic", false) ? [true] : [] + content { + enabled = gvnic.value + } + } + labels = merge( + lookup(lookup(local.node_pools_labels, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(local.node_pools_labels, "default_values", {}), "node_pool", true) ? { "node_pool" = each.value["name"] } : {}, + local.node_pools_labels["all"], + local.node_pools_labels[each.value["name"]], + ) + metadata = merge( + lookup(lookup(local.node_pools_metadata, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(local.node_pools_metadata, "default_values", {}), "node_pool", true) ? { "node_pool" = each.value["name"] } : {}, + local.node_pools_metadata["all"], + local.node_pools_metadata[each.value["name"]], + { + "disable-legacy-endpoints" = var.disable_legacy_metadata_endpoints + }, + ) + dynamic "taint" { + for_each = concat( + local.node_pools_taints["all"], + local.node_pools_taints[each.value["name"]], + ) + content { + effect = taint.value.effect + key = taint.value.key + value = taint.value.value + } + } + tags = concat( + lookup(local.node_pools_tags, "default_values", [true, true])[0] ? [local.cluster_network_tag] : [], + lookup(local.node_pools_tags, "default_values", [true, true])[1] ? ["${local.cluster_network_tag}-${each.value["name"]}"] : [], + local.node_pools_tags["all"], + local.node_pools_tags[each.value["name"]], + ) + + local_ssd_count = lookup(each.value, "local_ssd_count", 0) + disk_size_gb = lookup(each.value, "disk_size_gb", 100) + disk_type = lookup(each.value, "disk_type", "pd-standard") + + dynamic "ephemeral_storage_config" { + for_each = lookup(each.value, "local_ssd_ephemeral_count", 0) > 0 ? [each.value.local_ssd_ephemeral_count] : [] + content { + local_ssd_count = ephemeral_storage_config.value + } + } + + service_account = lookup( + each.value, + "service_account", + local.service_account, + ) + preemptible = lookup(each.value, "preemptible", false) + spot = lookup(each.value, "spot", false) + + oauth_scopes = concat( + local.node_pools_oauth_scopes["all"], + local.node_pools_oauth_scopes[each.value["name"]], + ) + + guest_accelerator = [ + for guest_accelerator in lookup(each.value, "accelerator_count", 0) > 0 ? [{ + type = lookup(each.value, "accelerator_type", "") + count = lookup(each.value, "accelerator_count", 0) + gpu_partition_size = lookup(each.value, "gpu_partition_size", null) + }] : [] : { + type = guest_accelerator["type"] + count = guest_accelerator["count"] + gpu_partition_size = guest_accelerator["gpu_partition_size"] + } + ] + + dynamic "workload_metadata_config" { + for_each = local.cluster_node_metadata_config + + content { + mode = lookup(each.value, "node_metadata", workload_metadata_config.value.mode) + } + } + + dynamic "sandbox_config" { + for_each = tobool((lookup(each.value, "sandbox_enabled", var.sandbox_enabled))) ? ["gvisor"] : [] + content { + sandbox_type = sandbox_config.value + } + } + + boot_disk_kms_key = lookup(each.value, "boot_disk_kms_key", "") + + dynamic "kubelet_config" { + for_each = length(setintersection( + keys(each.value), + ["cpu_manager_policy", "cpu_cfs_quota", "cpu_cfs_quota_period"] + )) != 0 ? [1] : [] + + content { + cpu_manager_policy = lookup(each.value, "cpu_manager_policy", "static") + cpu_cfs_quota = lookup(each.value, "cpu_cfs_quota", null) + cpu_cfs_quota_period = lookup(each.value, "cpu_cfs_quota_period", null) + } + } + + + shielded_instance_config { + enable_secure_boot = lookup(each.value, "enable_secure_boot", false) + enable_integrity_monitoring = lookup(each.value, "enable_integrity_monitoring", true) + } + } + + lifecycle { + ignore_changes = [initial_node_count] + + } + + timeouts { + create = lookup(var.timeouts, "create", "45m") + update = lookup(var.timeouts, "update", "45m") + delete = lookup(var.timeouts, "delete", "45m") + } + + depends_on = [google_container_node_pool.pools[0]] } diff --git a/modules/beta-public-cluster/main.tf b/modules/beta-public-cluster/main.tf index 6c48ce14a6..b0e1993666 100644 --- a/modules/beta-public-cluster/main.tf +++ b/modules/beta-public-cluster/main.tf @@ -45,8 +45,10 @@ locals { master_version_zonal = var.kubernetes_version != "latest" ? var.kubernetes_version : data.google_container_engine_versions.zone.latest_master_version master_version = var.regional ? local.master_version_regional : local.master_version_zonal // Build a map of maps of node pools from a list of objects - node_pool_names = [for np in toset(var.node_pools) : np.name] - node_pools = zipmap(local.node_pool_names, tolist(toset(var.node_pools))) + node_pool_names = [for np in toset(var.node_pools) : np.name] + node_pools = zipmap(local.node_pool_names, tolist(toset(var.node_pools))) + windows_node_pool_names = [for np in toset(var.windows_node_pools) : np.name] + windows_node_pools = zipmap(local.windows_node_pool_names, tolist(toset(var.windows_node_pools))) release_channel = var.release_channel != null ? [{ channel : var.release_channel }] : [] @@ -137,8 +139,15 @@ locals { cidr_blocks : var.master_authorized_networks }] - cluster_output_node_pools_names = concat([for np in google_container_node_pool.pools : np.name], [""]) - cluster_output_node_pools_versions = { for np in google_container_node_pool.pools : np.name => np.version } + cluster_output_node_pools_names = concat( + [for np in google_container_node_pool.pools : np.name], [""], + [for np in google_container_node_pool.windows_pools : np.name], [""] + ) + + cluster_output_node_pools_versions = merge( + { for np in google_container_node_pool.pools : np.name => np.version }, + { for np in google_container_node_pool.windows_pools : np.name => np.version }, + ) cluster_master_auth_list_layer1 = local.cluster_output_master_auth cluster_master_auth_list_layer2 = local.cluster_master_auth_list_layer1[0] diff --git a/modules/beta-public-cluster/variables.tf b/modules/beta-public-cluster/variables.tf index 1a88cb0192..6b0dbdd42b 100644 --- a/modules/beta-public-cluster/variables.tf +++ b/modules/beta-public-cluster/variables.tf @@ -147,6 +147,12 @@ variable "node_pools" { ] } +variable "windows_node_pools" { + type = list(map(string)) + description = "List of maps containing Windows node pools" + default = [] +} + variable "node_pools_labels" { type = map(map(string)) description = "Map of maps containing node labels by node-pool name" diff --git a/modules/beta-public-cluster/variables_defaults.tf b/modules/beta-public-cluster/variables_defaults.tf index ee5d60e6c7..cc65ac9e8b 100644 --- a/modules/beta-public-cluster/variables_defaults.tf +++ b/modules/beta-public-cluster/variables_defaults.tf @@ -27,6 +27,10 @@ locals { [for node_pool in var.node_pools : node_pool["name"]], [for node_pool in var.node_pools : {}] ), + zipmap( + [for node_pool in var.windows_node_pools : node_pool["name"]], + [for node_pool in var.windows_node_pools : {}] + ), var.node_pools_labels ) @@ -37,6 +41,10 @@ locals { [for node_pool in var.node_pools : node_pool["name"]], [for node_pool in var.node_pools : {}] ), + zipmap( + [for node_pool in var.windows_node_pools : node_pool["name"]], + [for node_pool in var.windows_node_pools : {}] + ), var.node_pools_metadata ) @@ -47,6 +55,10 @@ locals { [for node_pool in var.node_pools : node_pool["name"]], [for node_pool in var.node_pools : []] ), + zipmap( + [for node_pool in var.windows_node_pools : node_pool["name"]], + [for node_pool in var.windows_node_pools : []] + ), var.node_pools_taints ) @@ -57,6 +69,10 @@ locals { [for node_pool in var.node_pools : node_pool["name"]], [for node_pool in var.node_pools : []] ), + zipmap( + [for node_pool in var.windows_node_pools : node_pool["name"]], + [for node_pool in var.windows_node_pools : []] + ), var.node_pools_tags ) @@ -67,6 +83,10 @@ locals { [for node_pool in var.node_pools : node_pool["name"]], [for node_pool in var.node_pools : []] ), + zipmap( + [for node_pool in var.windows_node_pools : node_pool["name"]], + [for node_pool in var.windows_node_pools : []] + ), var.node_pools_oauth_scopes ) diff --git a/modules/private-cluster-update-variant/README.md b/modules/private-cluster-update-variant/README.md index 1473c63425..afcca1ddcc 100644 --- a/modules/private-cluster-update-variant/README.md +++ b/modules/private-cluster-update-variant/README.md @@ -232,6 +232,7 @@ Then perform the following commands on the root folder: | subnetwork | The subnetwork to host the cluster in (required) | `string` | n/a | yes | | timeouts | Timeout for cluster operations. | `map(string)` | `{}` | no | | upstream\_nameservers | If specified, the values replace the nameservers taken by default from the node’s /etc/resolv.conf | `list(string)` | `[]` | no | +| windows\_node\_pools | List of maps containing Windows node pools | `list(map(string))` | `[]` | no | | zones | The zones to host the cluster in (optional if regional cluster / required if zonal) | `list(string)` | `[]` | no | ## Outputs @@ -266,6 +267,9 @@ Then perform the following commands on the root folder: ## node_pools variable + +> Use this variable for provisioning linux based node pools. For Windows based node pools use [windows_node_pools](#windows\_node\_pools-variable) + The node_pools variable takes the following parameters: | Name | Description | Default | Requirement | @@ -304,6 +308,11 @@ The node_pools variable takes the following parameters: | tags | The list of instance tags applied to all nodes | | Required | | value | The value for the taint | | Required | | version | The Kubernetes version for the nodes in this pool. Should only be set if auto_upgrade is false | " " | Optional | + +## windows_node_pools variable +The windows_node_pools variable takes the same parameters as [node_pools](#node\_pools-variable) but is reserved for provisioning Windows based node pools only. This variable is introduced to satisfy a [specific requirement](https://cloud.google.com/kubernetes-engine/docs/how-to/creating-a-cluster-windows#create_a_cluster_and_node_pools) for the presence of at least one linux based node pool in the cluster before a windows based node pool can be created. + + ## Requirements Before this module can be used on a project, you must ensure that the following pre-requisites are fulfilled: diff --git a/modules/private-cluster-update-variant/cluster.tf b/modules/private-cluster-update-variant/cluster.tf index f5e49e1902..870c3bd6c4 100644 --- a/modules/private-cluster-update-variant/cluster.tf +++ b/modules/private-cluster-update-variant/cluster.tf @@ -330,7 +330,7 @@ locals { # resources where "ForceNew" is "true". schemaNodeConfig can be found in node_config.go at # https://github.com/terraform-providers/terraform-provider-google/blob/master/google/node_config.go#L22 resource "random_id" "name" { - for_each = local.node_pools + for_each = merge(local.node_pools, local.windows_node_pools) byte_length = 2 prefix = format("%s-", lookup(each.value, "name")) keepers = merge( @@ -400,6 +400,161 @@ resource "random_id" "name" { resource "google_container_node_pool" "pools" { provider = google for_each = local.node_pools + + name = { for k, v in random_id.name : k => v.hex }[each.key] + project = var.project_id + location = local.location + // use node_locations if provided, defaults to cluster level node_locations if not specified + node_locations = lookup(each.value, "node_locations", "") != "" ? split(",", each.value["node_locations"]) : null + + cluster = google_container_cluster.primary.name + + version = lookup(each.value, "auto_upgrade", local.default_auto_upgrade) ? "" : lookup( + each.value, + "version", + google_container_cluster.primary.min_master_version, + ) + + initial_node_count = lookup(each.value, "autoscaling", true) ? lookup( + each.value, + "initial_node_count", + lookup(each.value, "min_count", 1) + ) : null + + max_pods_per_node = lookup(each.value, "max_pods_per_node", null) + + node_count = lookup(each.value, "autoscaling", true) ? null : lookup(each.value, "node_count", 1) + + dynamic "autoscaling" { + for_each = lookup(each.value, "autoscaling", true) ? [each.value] : [] + content { + min_node_count = lookup(autoscaling.value, "min_count", 1) + max_node_count = lookup(autoscaling.value, "max_count", 100) + } + } + + + management { + auto_repair = lookup(each.value, "auto_repair", true) + auto_upgrade = lookup(each.value, "auto_upgrade", local.default_auto_upgrade) + } + + upgrade_settings { + max_surge = lookup(each.value, "max_surge", 1) + max_unavailable = lookup(each.value, "max_unavailable", 0) + } + + node_config { + image_type = lookup(each.value, "image_type", "COS_CONTAINERD") + machine_type = lookup(each.value, "machine_type", "e2-medium") + min_cpu_platform = lookup(each.value, "min_cpu_platform", "") + dynamic "gcfs_config" { + for_each = lookup(each.value, "enable_gcfs", false) ? [true] : [] + content { + enabled = gcfs_config.value + } + } + dynamic "gvnic" { + for_each = lookup(each.value, "enable_gvnic", false) ? [true] : [] + content { + enabled = gvnic.value + } + } + labels = merge( + lookup(lookup(local.node_pools_labels, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(local.node_pools_labels, "default_values", {}), "node_pool", true) ? { "node_pool" = each.value["name"] } : {}, + local.node_pools_labels["all"], + local.node_pools_labels[each.value["name"]], + ) + metadata = merge( + lookup(lookup(local.node_pools_metadata, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(local.node_pools_metadata, "default_values", {}), "node_pool", true) ? { "node_pool" = each.value["name"] } : {}, + local.node_pools_metadata["all"], + local.node_pools_metadata[each.value["name"]], + { + "disable-legacy-endpoints" = var.disable_legacy_metadata_endpoints + }, + ) + dynamic "taint" { + for_each = concat( + local.node_pools_taints["all"], + local.node_pools_taints[each.value["name"]], + ) + content { + effect = taint.value.effect + key = taint.value.key + value = taint.value.value + } + } + tags = concat( + lookup(local.node_pools_tags, "default_values", [true, true])[0] ? [local.cluster_network_tag] : [], + lookup(local.node_pools_tags, "default_values", [true, true])[1] ? ["${local.cluster_network_tag}-${each.value["name"]}"] : [], + local.node_pools_tags["all"], + local.node_pools_tags[each.value["name"]], + ) + + local_ssd_count = lookup(each.value, "local_ssd_count", 0) + disk_size_gb = lookup(each.value, "disk_size_gb", 100) + disk_type = lookup(each.value, "disk_type", "pd-standard") + + + service_account = lookup( + each.value, + "service_account", + local.service_account, + ) + preemptible = lookup(each.value, "preemptible", false) + spot = lookup(each.value, "spot", false) + + oauth_scopes = concat( + local.node_pools_oauth_scopes["all"], + local.node_pools_oauth_scopes[each.value["name"]], + ) + + guest_accelerator = [ + for guest_accelerator in lookup(each.value, "accelerator_count", 0) > 0 ? [{ + type = lookup(each.value, "accelerator_type", "") + count = lookup(each.value, "accelerator_count", 0) + gpu_partition_size = lookup(each.value, "gpu_partition_size", null) + }] : [] : { + type = guest_accelerator["type"] + count = guest_accelerator["count"] + gpu_partition_size = guest_accelerator["gpu_partition_size"] + } + ] + + dynamic "workload_metadata_config" { + for_each = local.cluster_node_metadata_config + + content { + mode = lookup(each.value, "node_metadata", workload_metadata_config.value.mode) + } + } + + + shielded_instance_config { + enable_secure_boot = lookup(each.value, "enable_secure_boot", false) + enable_integrity_monitoring = lookup(each.value, "enable_integrity_monitoring", true) + } + } + + lifecycle { + ignore_changes = [initial_node_count] + + create_before_destroy = true + } + + timeouts { + create = lookup(var.timeouts, "create", "45m") + update = lookup(var.timeouts, "update", "45m") + delete = lookup(var.timeouts, "delete", "45m") + } + +} +resource "google_container_node_pool" "windows_pools" { + provider = google + for_each = local.windows_node_pools + name = { for k, v in random_id.name : k => v.hex }[each.key] project = var.project_id location = local.location @@ -548,4 +703,6 @@ resource "google_container_node_pool" "pools" { update = lookup(var.timeouts, "update", "45m") delete = lookup(var.timeouts, "delete", "45m") } + + depends_on = [google_container_node_pool.pools[0]] } diff --git a/modules/private-cluster-update-variant/main.tf b/modules/private-cluster-update-variant/main.tf index 7051be0fc0..a01735b156 100644 --- a/modules/private-cluster-update-variant/main.tf +++ b/modules/private-cluster-update-variant/main.tf @@ -45,8 +45,10 @@ locals { master_version_zonal = var.kubernetes_version != "latest" ? var.kubernetes_version : data.google_container_engine_versions.zone.latest_master_version master_version = var.regional ? local.master_version_regional : local.master_version_zonal // Build a map of maps of node pools from a list of objects - node_pool_names = [for np in toset(var.node_pools) : np.name] - node_pools = zipmap(local.node_pool_names, tolist(toset(var.node_pools))) + node_pool_names = [for np in toset(var.node_pools) : np.name] + node_pools = zipmap(local.node_pool_names, tolist(toset(var.node_pools))) + windows_node_pool_names = [for np in toset(var.windows_node_pools) : np.name] + windows_node_pools = zipmap(local.windows_node_pool_names, tolist(toset(var.windows_node_pools))) release_channel = var.release_channel != null ? [{ channel : var.release_channel }] : [] @@ -114,8 +116,15 @@ locals { cidr_blocks : var.master_authorized_networks }] - cluster_output_node_pools_names = concat([for np in google_container_node_pool.pools : np.name], [""]) - cluster_output_node_pools_versions = { for np in google_container_node_pool.pools : np.name => np.version } + cluster_output_node_pools_names = concat( + [for np in google_container_node_pool.pools : np.name], [""], + [for np in google_container_node_pool.windows_pools : np.name], [""] + ) + + cluster_output_node_pools_versions = merge( + { for np in google_container_node_pool.pools : np.name => np.version }, + { for np in google_container_node_pool.windows_pools : np.name => np.version }, + ) cluster_master_auth_list_layer1 = local.cluster_output_master_auth cluster_master_auth_list_layer2 = local.cluster_master_auth_list_layer1[0] diff --git a/modules/private-cluster-update-variant/variables.tf b/modules/private-cluster-update-variant/variables.tf index d8358185ce..d929ed1ffa 100644 --- a/modules/private-cluster-update-variant/variables.tf +++ b/modules/private-cluster-update-variant/variables.tf @@ -147,6 +147,12 @@ variable "node_pools" { ] } +variable "windows_node_pools" { + type = list(map(string)) + description = "List of maps containing Windows node pools" + default = [] +} + variable "node_pools_labels" { type = map(map(string)) description = "Map of maps containing node labels by node-pool name" diff --git a/modules/private-cluster-update-variant/variables_defaults.tf b/modules/private-cluster-update-variant/variables_defaults.tf index e7f52e3d4b..b570f5f850 100644 --- a/modules/private-cluster-update-variant/variables_defaults.tf +++ b/modules/private-cluster-update-variant/variables_defaults.tf @@ -27,6 +27,10 @@ locals { [for node_pool in var.node_pools : node_pool["name"]], [for node_pool in var.node_pools : {}] ), + zipmap( + [for node_pool in var.windows_node_pools : node_pool["name"]], + [for node_pool in var.windows_node_pools : {}] + ), var.node_pools_labels ) @@ -37,6 +41,10 @@ locals { [for node_pool in var.node_pools : node_pool["name"]], [for node_pool in var.node_pools : {}] ), + zipmap( + [for node_pool in var.windows_node_pools : node_pool["name"]], + [for node_pool in var.windows_node_pools : {}] + ), var.node_pools_metadata ) @@ -47,6 +55,10 @@ locals { [for node_pool in var.node_pools : node_pool["name"]], [for node_pool in var.node_pools : []] ), + zipmap( + [for node_pool in var.windows_node_pools : node_pool["name"]], + [for node_pool in var.windows_node_pools : []] + ), var.node_pools_taints ) @@ -57,6 +69,10 @@ locals { [for node_pool in var.node_pools : node_pool["name"]], [for node_pool in var.node_pools : []] ), + zipmap( + [for node_pool in var.windows_node_pools : node_pool["name"]], + [for node_pool in var.windows_node_pools : []] + ), var.node_pools_tags ) @@ -67,6 +83,10 @@ locals { [for node_pool in var.node_pools : node_pool["name"]], [for node_pool in var.node_pools : []] ), + zipmap( + [for node_pool in var.windows_node_pools : node_pool["name"]], + [for node_pool in var.windows_node_pools : []] + ), var.node_pools_oauth_scopes ) } diff --git a/modules/private-cluster/README.md b/modules/private-cluster/README.md index 1d2c5c60be..049bf6aaf4 100644 --- a/modules/private-cluster/README.md +++ b/modules/private-cluster/README.md @@ -210,6 +210,7 @@ Then perform the following commands on the root folder: | subnetwork | The subnetwork to host the cluster in (required) | `string` | n/a | yes | | timeouts | Timeout for cluster operations. | `map(string)` | `{}` | no | | upstream\_nameservers | If specified, the values replace the nameservers taken by default from the node’s /etc/resolv.conf | `list(string)` | `[]` | no | +| windows\_node\_pools | List of maps containing Windows node pools | `list(map(string))` | `[]` | no | | zones | The zones to host the cluster in (optional if regional cluster / required if zonal) | `list(string)` | `[]` | no | ## Outputs @@ -244,6 +245,9 @@ Then perform the following commands on the root folder: ## node_pools variable + +> Use this variable for provisioning linux based node pools. For Windows based node pools use [windows_node_pools](#windows\_node\_pools-variable) + The node_pools variable takes the following parameters: | Name | Description | Default | Requirement | @@ -282,6 +286,11 @@ The node_pools variable takes the following parameters: | tags | The list of instance tags applied to all nodes | | Required | | value | The value for the taint | | Required | | version | The Kubernetes version for the nodes in this pool. Should only be set if auto_upgrade is false | " " | Optional | + +## windows_node_pools variable +The windows_node_pools variable takes the same parameters as [node_pools](#node\_pools-variable) but is reserved for provisioning Windows based node pools only. This variable is introduced to satisfy a [specific requirement](https://cloud.google.com/kubernetes-engine/docs/how-to/creating-a-cluster-windows#create_a_cluster_and_node_pools) for the presence of at least one linux based node pool in the cluster before a windows based node pool can be created. + + ## Requirements Before this module can be used on a project, you must ensure that the following pre-requisites are fulfilled: diff --git a/modules/private-cluster/cluster.tf b/modules/private-cluster/cluster.tf index 10aab672e4..281ab3d029 100644 --- a/modules/private-cluster/cluster.tf +++ b/modules/private-cluster/cluster.tf @@ -309,6 +309,7 @@ resource "google_container_cluster" "primary" { resource "google_container_node_pool" "pools" { provider = google for_each = local.node_pools + name = each.key project = var.project_id location = local.location @@ -456,4 +457,159 @@ resource "google_container_node_pool" "pools" { update = lookup(var.timeouts, "update", "45m") delete = lookup(var.timeouts, "delete", "45m") } + +} +resource "google_container_node_pool" "windows_pools" { + provider = google + for_each = local.windows_node_pools + + name = each.key + project = var.project_id + location = local.location + // use node_locations if provided, defaults to cluster level node_locations if not specified + node_locations = lookup(each.value, "node_locations", "") != "" ? split(",", each.value["node_locations"]) : null + + cluster = google_container_cluster.primary.name + + version = lookup(each.value, "auto_upgrade", local.default_auto_upgrade) ? "" : lookup( + each.value, + "version", + google_container_cluster.primary.min_master_version, + ) + + initial_node_count = lookup(each.value, "autoscaling", true) ? lookup( + each.value, + "initial_node_count", + lookup(each.value, "min_count", 1) + ) : null + + max_pods_per_node = lookup(each.value, "max_pods_per_node", null) + + node_count = lookup(each.value, "autoscaling", true) ? null : lookup(each.value, "node_count", 1) + + dynamic "autoscaling" { + for_each = lookup(each.value, "autoscaling", true) ? [each.value] : [] + content { + min_node_count = lookup(autoscaling.value, "min_count", 1) + max_node_count = lookup(autoscaling.value, "max_count", 100) + } + } + + + management { + auto_repair = lookup(each.value, "auto_repair", true) + auto_upgrade = lookup(each.value, "auto_upgrade", local.default_auto_upgrade) + } + + upgrade_settings { + max_surge = lookup(each.value, "max_surge", 1) + max_unavailable = lookup(each.value, "max_unavailable", 0) + } + + node_config { + image_type = lookup(each.value, "image_type", "COS_CONTAINERD") + machine_type = lookup(each.value, "machine_type", "e2-medium") + min_cpu_platform = lookup(each.value, "min_cpu_platform", "") + dynamic "gcfs_config" { + for_each = lookup(each.value, "enable_gcfs", false) ? [true] : [] + content { + enabled = gcfs_config.value + } + } + dynamic "gvnic" { + for_each = lookup(each.value, "enable_gvnic", false) ? [true] : [] + content { + enabled = gvnic.value + } + } + labels = merge( + lookup(lookup(local.node_pools_labels, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(local.node_pools_labels, "default_values", {}), "node_pool", true) ? { "node_pool" = each.value["name"] } : {}, + local.node_pools_labels["all"], + local.node_pools_labels[each.value["name"]], + ) + metadata = merge( + lookup(lookup(local.node_pools_metadata, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(local.node_pools_metadata, "default_values", {}), "node_pool", true) ? { "node_pool" = each.value["name"] } : {}, + local.node_pools_metadata["all"], + local.node_pools_metadata[each.value["name"]], + { + "disable-legacy-endpoints" = var.disable_legacy_metadata_endpoints + }, + ) + dynamic "taint" { + for_each = concat( + local.node_pools_taints["all"], + local.node_pools_taints[each.value["name"]], + ) + content { + effect = taint.value.effect + key = taint.value.key + value = taint.value.value + } + } + tags = concat( + lookup(local.node_pools_tags, "default_values", [true, true])[0] ? [local.cluster_network_tag] : [], + lookup(local.node_pools_tags, "default_values", [true, true])[1] ? ["${local.cluster_network_tag}-${each.value["name"]}"] : [], + local.node_pools_tags["all"], + local.node_pools_tags[each.value["name"]], + ) + + local_ssd_count = lookup(each.value, "local_ssd_count", 0) + disk_size_gb = lookup(each.value, "disk_size_gb", 100) + disk_type = lookup(each.value, "disk_type", "pd-standard") + + + service_account = lookup( + each.value, + "service_account", + local.service_account, + ) + preemptible = lookup(each.value, "preemptible", false) + spot = lookup(each.value, "spot", false) + + oauth_scopes = concat( + local.node_pools_oauth_scopes["all"], + local.node_pools_oauth_scopes[each.value["name"]], + ) + + guest_accelerator = [ + for guest_accelerator in lookup(each.value, "accelerator_count", 0) > 0 ? [{ + type = lookup(each.value, "accelerator_type", "") + count = lookup(each.value, "accelerator_count", 0) + gpu_partition_size = lookup(each.value, "gpu_partition_size", null) + }] : [] : { + type = guest_accelerator["type"] + count = guest_accelerator["count"] + gpu_partition_size = guest_accelerator["gpu_partition_size"] + } + ] + + dynamic "workload_metadata_config" { + for_each = local.cluster_node_metadata_config + + content { + mode = lookup(each.value, "node_metadata", workload_metadata_config.value.mode) + } + } + + + shielded_instance_config { + enable_secure_boot = lookup(each.value, "enable_secure_boot", false) + enable_integrity_monitoring = lookup(each.value, "enable_integrity_monitoring", true) + } + } + + lifecycle { + ignore_changes = [initial_node_count] + + } + + timeouts { + create = lookup(var.timeouts, "create", "45m") + update = lookup(var.timeouts, "update", "45m") + delete = lookup(var.timeouts, "delete", "45m") + } + + depends_on = [google_container_node_pool.pools[0]] } diff --git a/modules/private-cluster/main.tf b/modules/private-cluster/main.tf index 7051be0fc0..a01735b156 100644 --- a/modules/private-cluster/main.tf +++ b/modules/private-cluster/main.tf @@ -45,8 +45,10 @@ locals { master_version_zonal = var.kubernetes_version != "latest" ? var.kubernetes_version : data.google_container_engine_versions.zone.latest_master_version master_version = var.regional ? local.master_version_regional : local.master_version_zonal // Build a map of maps of node pools from a list of objects - node_pool_names = [for np in toset(var.node_pools) : np.name] - node_pools = zipmap(local.node_pool_names, tolist(toset(var.node_pools))) + node_pool_names = [for np in toset(var.node_pools) : np.name] + node_pools = zipmap(local.node_pool_names, tolist(toset(var.node_pools))) + windows_node_pool_names = [for np in toset(var.windows_node_pools) : np.name] + windows_node_pools = zipmap(local.windows_node_pool_names, tolist(toset(var.windows_node_pools))) release_channel = var.release_channel != null ? [{ channel : var.release_channel }] : [] @@ -114,8 +116,15 @@ locals { cidr_blocks : var.master_authorized_networks }] - cluster_output_node_pools_names = concat([for np in google_container_node_pool.pools : np.name], [""]) - cluster_output_node_pools_versions = { for np in google_container_node_pool.pools : np.name => np.version } + cluster_output_node_pools_names = concat( + [for np in google_container_node_pool.pools : np.name], [""], + [for np in google_container_node_pool.windows_pools : np.name], [""] + ) + + cluster_output_node_pools_versions = merge( + { for np in google_container_node_pool.pools : np.name => np.version }, + { for np in google_container_node_pool.windows_pools : np.name => np.version }, + ) cluster_master_auth_list_layer1 = local.cluster_output_master_auth cluster_master_auth_list_layer2 = local.cluster_master_auth_list_layer1[0] diff --git a/modules/private-cluster/variables.tf b/modules/private-cluster/variables.tf index d8358185ce..d929ed1ffa 100644 --- a/modules/private-cluster/variables.tf +++ b/modules/private-cluster/variables.tf @@ -147,6 +147,12 @@ variable "node_pools" { ] } +variable "windows_node_pools" { + type = list(map(string)) + description = "List of maps containing Windows node pools" + default = [] +} + variable "node_pools_labels" { type = map(map(string)) description = "Map of maps containing node labels by node-pool name" diff --git a/modules/private-cluster/variables_defaults.tf b/modules/private-cluster/variables_defaults.tf index e7f52e3d4b..b570f5f850 100644 --- a/modules/private-cluster/variables_defaults.tf +++ b/modules/private-cluster/variables_defaults.tf @@ -27,6 +27,10 @@ locals { [for node_pool in var.node_pools : node_pool["name"]], [for node_pool in var.node_pools : {}] ), + zipmap( + [for node_pool in var.windows_node_pools : node_pool["name"]], + [for node_pool in var.windows_node_pools : {}] + ), var.node_pools_labels ) @@ -37,6 +41,10 @@ locals { [for node_pool in var.node_pools : node_pool["name"]], [for node_pool in var.node_pools : {}] ), + zipmap( + [for node_pool in var.windows_node_pools : node_pool["name"]], + [for node_pool in var.windows_node_pools : {}] + ), var.node_pools_metadata ) @@ -47,6 +55,10 @@ locals { [for node_pool in var.node_pools : node_pool["name"]], [for node_pool in var.node_pools : []] ), + zipmap( + [for node_pool in var.windows_node_pools : node_pool["name"]], + [for node_pool in var.windows_node_pools : []] + ), var.node_pools_taints ) @@ -57,6 +69,10 @@ locals { [for node_pool in var.node_pools : node_pool["name"]], [for node_pool in var.node_pools : []] ), + zipmap( + [for node_pool in var.windows_node_pools : node_pool["name"]], + [for node_pool in var.windows_node_pools : []] + ), var.node_pools_tags ) @@ -67,6 +83,10 @@ locals { [for node_pool in var.node_pools : node_pool["name"]], [for node_pool in var.node_pools : []] ), + zipmap( + [for node_pool in var.windows_node_pools : node_pool["name"]], + [for node_pool in var.windows_node_pools : []] + ), var.node_pools_oauth_scopes ) } diff --git a/modules/safer-cluster-update-variant/README.md b/modules/safer-cluster-update-variant/README.md index 6bd48704b4..b0ed558724 100644 --- a/modules/safer-cluster-update-variant/README.md +++ b/modules/safer-cluster-update-variant/README.md @@ -268,6 +268,7 @@ For simplicity, we suggest using `roles/container.admin` and | stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | `map(list(string))` | `{}` | no | | subnetwork | The subnetwork to host the cluster in | `string` | n/a | yes | | upstream\_nameservers | If specified, the values replace the nameservers taken by default from the node’s /etc/resolv.conf | `list(string)` | `[]` | no | +| windows\_node\_pools | List of maps containing node pools | `list(map(string))` | `[]` | no | | zones | The zones to host the cluster in | `list(string)` | `[]` | no | ## Outputs diff --git a/modules/safer-cluster-update-variant/main.tf b/modules/safer-cluster-update-variant/main.tf index 4d76e770ad..8917d86937 100644 --- a/modules/safer-cluster-update-variant/main.tf +++ b/modules/safer-cluster-update-variant/main.tf @@ -83,6 +83,7 @@ module "gke" { initial_node_count = (var.initial_node_count == 0) ? 1 : var.initial_node_count node_pools = var.node_pools + windows_node_pools = var.windows_node_pools node_pools_labels = var.node_pools_labels node_pools_metadata = var.node_pools_metadata node_pools_taints = var.node_pools_taints diff --git a/modules/safer-cluster-update-variant/variables.tf b/modules/safer-cluster-update-variant/variables.tf index 30013eb196..1429ae6855 100644 --- a/modules/safer-cluster-update-variant/variables.tf +++ b/modules/safer-cluster-update-variant/variables.tf @@ -152,6 +152,12 @@ variable "node_pools" { ] } +variable "windows_node_pools" { + type = list(map(string)) + description = "List of maps containing node pools" + default = [] +} + variable "node_pools_labels" { type = map(map(string)) description = "Map of maps containing node labels by node-pool name" diff --git a/modules/safer-cluster/README.md b/modules/safer-cluster/README.md index 6bd48704b4..b0ed558724 100644 --- a/modules/safer-cluster/README.md +++ b/modules/safer-cluster/README.md @@ -268,6 +268,7 @@ For simplicity, we suggest using `roles/container.admin` and | stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | `map(list(string))` | `{}` | no | | subnetwork | The subnetwork to host the cluster in | `string` | n/a | yes | | upstream\_nameservers | If specified, the values replace the nameservers taken by default from the node’s /etc/resolv.conf | `list(string)` | `[]` | no | +| windows\_node\_pools | List of maps containing node pools | `list(map(string))` | `[]` | no | | zones | The zones to host the cluster in | `list(string)` | `[]` | no | ## Outputs diff --git a/modules/safer-cluster/main.tf b/modules/safer-cluster/main.tf index ea2b2d4b0e..1e5361160d 100644 --- a/modules/safer-cluster/main.tf +++ b/modules/safer-cluster/main.tf @@ -83,6 +83,7 @@ module "gke" { initial_node_count = (var.initial_node_count == 0) ? 1 : var.initial_node_count node_pools = var.node_pools + windows_node_pools = var.windows_node_pools node_pools_labels = var.node_pools_labels node_pools_metadata = var.node_pools_metadata node_pools_taints = var.node_pools_taints diff --git a/modules/safer-cluster/variables.tf b/modules/safer-cluster/variables.tf index 30013eb196..1429ae6855 100644 --- a/modules/safer-cluster/variables.tf +++ b/modules/safer-cluster/variables.tf @@ -152,6 +152,12 @@ variable "node_pools" { ] } +variable "windows_node_pools" { + type = list(map(string)) + description = "List of maps containing node pools" + default = [] +} + variable "node_pools_labels" { type = map(map(string)) description = "Map of maps containing node labels by node-pool name" diff --git a/test/fixtures/simple_windows_node_pool/main.tf b/test/fixtures/simple_windows_node_pool/main.tf new file mode 100644 index 0000000000..c912a24259 --- /dev/null +++ b/test/fixtures/simple_windows_node_pool/main.tf @@ -0,0 +1,20 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +module "this" { + source = "../../../examples/simple_windows_node_pool" + project_id = var.project_ids[0] +} diff --git a/test/fixtures/simple_windows_node_pool/outputs.tf b/test/fixtures/simple_windows_node_pool/outputs.tf new file mode 100644 index 0000000000..139e4ca1d3 --- /dev/null +++ b/test/fixtures/simple_windows_node_pool/outputs.tf @@ -0,0 +1,63 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +output "project_id" { + value = module.this.project_id +} + +output "region" { + value = module.this.region +} + +output "cluster_name" { + description = "Cluster name" + value = module.this.cluster_name +} + +output "location" { + value = module.this.location +} + +output "zones" { + description = "List of zones in which the cluster resides" + value = module.this.zones +} + +output "master_kubernetes_version" { + description = "The master Kubernetes version" + value = module.this.master_kubernetes_version +} + +output "kubernetes_endpoint" { + sensitive = true + value = module.this.kubernetes_endpoint +} + +output "client_token" { + sensitive = true + value = module.this.client_token +} + +output "ca_certificate" { + description = "The cluster CA certificate" + value = module.this.ca_certificate + sensitive = true +} + +output "service_account" { + description = "The service account to default running nodes as if not overridden in `node_pools`." + value = module.this.service_account +} diff --git a/test/fixtures/simple_windows_node_pool/variables.tf b/test/fixtures/simple_windows_node_pool/variables.tf new file mode 100644 index 0000000000..62cc91a14d --- /dev/null +++ b/test/fixtures/simple_windows_node_pool/variables.tf @@ -0,0 +1,20 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +variable "project_ids" { + type = list(string) + description = "The GCP projects to use for integration tests" +} diff --git a/test/integration/simple_windows_node_pool/controls/gcloud.rb b/test/integration/simple_windows_node_pool/controls/gcloud.rb new file mode 100644 index 0000000000..f3c41eb593 --- /dev/null +++ b/test/integration/simple_windows_node_pool/controls/gcloud.rb @@ -0,0 +1,203 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +project_id = attribute('project_id') +location = attribute('location') +cluster_name = attribute('cluster_name') +service_account = attribute('service_account') + +control "gcloud" do + title "Google Compute Engine GKE configuration" + describe command("gcloud beta --project=#{project_id} container clusters --zone=#{location} describe #{cluster_name} --format=json") do + its(:exit_status) { should eq 0 } + its(:stderr) { should eq '' } + + let!(:data) do + if subject.exit_status == 0 + JSON.parse(subject.stdout) + else + {} + end + end + + describe "cluster" do + it "is running" do + expect(data['status']).to eq 'RUNNING' + end + + it "is regional" do + expect(data['location']).to match(/^.*[1-9]-[a-z]$/) + end + + it "is single zoned" do + expect(data['locations'].size).to eq 1 + end + + it "has the release channel set to REGULAR " do + expect(data['releaseChannel']['channel']).to eq "REGULAR" + end + end + + describe "node pool" do + let(:node_pools) { data['nodePools'].reject { |p| p['name'] == "default-pool" } } + + it "has 2 node pools" do + expect(node_pools.count).to eq 2 + end + + describe "pool-01" do + it "exists" do + expect(data['nodePools']).to include( + including( + "name" => "pool-01", + ) + ) + end + + it "uses an automatically created service account" do + expect(node_pools).to include( + including( + "name" => "pool-01", + "config" => including( + "serviceAccount" => service_account, + ), + ), + ) + end + + it "has the node count set to 1" do + expect(node_pools).to include( + including( + "name" => "pool-01", + "initialNodeCount" => 1, + ) + ) + end + + it "is the expected machine type" do + expect(node_pools).to include( + including( + "name" => "pool-01", + "config" => including( + "machineType" => "n2-standard-2", + ), + ) + ) + end + + it "has the expected network tags" do + expect(node_pools).to include( + including( + "name" => "pool-01", + "config" => including( + "tags" => match_array([ + "gke-#{cluster_name}", + "gke-#{cluster_name}-pool-01", + ]), + ), + ) + ) + end + + it "has autoupgrade enabled" do + expect(node_pools).to include( + including( + "name" => "pool-01", + "management" => including( + "autoUpgrade" => true, + ), + ) + ) + end + end + + describe "win-pool-01" do + it "exists" do + expect(data['nodePools']).to include( + including( + "name" => "win-pool-01", + ) + ) + end + + it "uses an automatically created service account" do + expect(node_pools).to include( + including( + "name" => "win-pool-01", + "config" => including( + "serviceAccount" => service_account, + ), + ), + ) + end + + it "has the node count set to 1" do + expect(node_pools).to include( + including( + "name" => "win-pool-01", + "initialNodeCount" => 1, + ) + ) + end + + it "is the expected machine type" do + expect(node_pools).to include( + including( + "name" => "win-pool-01", + "config" => including( + "machineType" => "n2-standard-2", + ), + ) + ) + end + + it "has the expected network tags" do + expect(node_pools).to include( + including( + "name" => "win-pool-01", + "config" => including( + "tags" => match_array([ + "gke-#{cluster_name}", + "gke-#{cluster_name}-win-pool-01", + ]), + ), + ) + ) + end + + it "has autoupgrade enabled" do + expect(node_pools).to include( + including( + "name" => "win-pool-01", + "management" => including( + "autoUpgrade" => true, + ), + ) + ) + end + + it "uses the windows_ltsc image" do + expect(node_pools).to include( + including( + "name" => "win-pool-01", + "config" => including( + "imageType" => "WINDOWS_LTSC", + ), + ), + ) + end + end + end + end +end diff --git a/test/integration/simple_windows_node_pool/controls/gcp.rb b/test/integration/simple_windows_node_pool/controls/gcp.rb new file mode 100644 index 0000000000..623a2aaca7 --- /dev/null +++ b/test/integration/simple_windows_node_pool/controls/gcp.rb @@ -0,0 +1,25 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +control "gcp" do + title "Native InSpec Resources" + + service_account = attribute("service_account") + project_id = attribute("project_id") + + describe google_service_account(project: project_id, name: service_account) do + its("display_name") { should eq "Terraform-managed service account for cluster #{attribute("cluster_name")}" } + its("project_id") { should eq project_id } + end +end diff --git a/test/integration/simple_windows_node_pool/inspec.yml b/test/integration/simple_windows_node_pool/inspec.yml new file mode 100644 index 0000000000..81b0476c01 --- /dev/null +++ b/test/integration/simple_windows_node_pool/inspec.yml @@ -0,0 +1,50 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +name: simple_windows_node_pool +depends: + - name: inspec-gcp + git: https://github.com/inspec/inspec-gcp.git + tag: v1.8.0 +attributes: + - name: project_id + required: true + type: string + - name: location + required: true + type: string + - name: cluster_name + required: true + type: string + - name: master_kubernetes_version + required: true + type: string + - name: kubernetes_endpoint + required: true + type: string + - name: client_token + required: true + type: string + - name: service_account + required: true + type: string + - name: service_account + required: true + type: string + - name: database_encryption_key_name + required: true + type: string + - name: identity_namespace + required: true + type: string diff --git a/variables.tf b/variables.tf index f3b13c565d..8271f51b66 100644 --- a/variables.tf +++ b/variables.tf @@ -147,6 +147,12 @@ variable "node_pools" { ] } +variable "windows_node_pools" { + type = list(map(string)) + description = "List of maps containing Windows node pools" + default = [] +} + variable "node_pools_labels" { type = map(map(string)) description = "Map of maps containing node labels by node-pool name" diff --git a/variables_defaults.tf b/variables_defaults.tf index e7f52e3d4b..b570f5f850 100644 --- a/variables_defaults.tf +++ b/variables_defaults.tf @@ -27,6 +27,10 @@ locals { [for node_pool in var.node_pools : node_pool["name"]], [for node_pool in var.node_pools : {}] ), + zipmap( + [for node_pool in var.windows_node_pools : node_pool["name"]], + [for node_pool in var.windows_node_pools : {}] + ), var.node_pools_labels ) @@ -37,6 +41,10 @@ locals { [for node_pool in var.node_pools : node_pool["name"]], [for node_pool in var.node_pools : {}] ), + zipmap( + [for node_pool in var.windows_node_pools : node_pool["name"]], + [for node_pool in var.windows_node_pools : {}] + ), var.node_pools_metadata ) @@ -47,6 +55,10 @@ locals { [for node_pool in var.node_pools : node_pool["name"]], [for node_pool in var.node_pools : []] ), + zipmap( + [for node_pool in var.windows_node_pools : node_pool["name"]], + [for node_pool in var.windows_node_pools : []] + ), var.node_pools_taints ) @@ -57,6 +69,10 @@ locals { [for node_pool in var.node_pools : node_pool["name"]], [for node_pool in var.node_pools : []] ), + zipmap( + [for node_pool in var.windows_node_pools : node_pool["name"]], + [for node_pool in var.windows_node_pools : []] + ), var.node_pools_tags ) @@ -67,6 +83,10 @@ locals { [for node_pool in var.node_pools : node_pool["name"]], [for node_pool in var.node_pools : []] ), + zipmap( + [for node_pool in var.windows_node_pools : node_pool["name"]], + [for node_pool in var.windows_node_pools : []] + ), var.node_pools_oauth_scopes ) }