diff --git a/CHANGELOG.md b/CHANGELOG.md index 9200790618..4c645b5daf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,14 @@ Extending the adopted spec, each change should have a link to its corresponding ## [Unreleased] +### Changed + +* Made `region` variable optional for zonal clusters [#247] + +### Added + +* Added [private](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/tree/master/modules/private-cluster-update-variant) and [beta private](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/tree/master/modules/beta-private-cluster-update-variant) variants which allow node pools to be created before being destroyed. [#256] + ## [v5.0.0] - 2019-09-25 v5.0.0 is a backwards-incompatible release. Please see the [upgrading guide](./docs/upgrading_to_v5.0.md). @@ -196,6 +204,8 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o [v0.3.0]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v0.2.0...v0.3.0 [v0.2.0]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v0.1.0...v0.2.0 +[#247]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/247 +[#256]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/256 [#248]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/248 [#228]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/228 [#238]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/238 diff --git a/README.md b/README.md index 02f020d54a..b269a1772f 100644 --- a/README.md +++ b/README.md @@ -165,7 +165,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | node\_version | The Kubernetes version of the node pools. Defaults kubernetes_version (master) variable and can be overridden for individual node pools by setting the `version` key on them. Must be empyty or set the same as master at cluster creation. | string | `""` | no | | non\_masquerade\_cidrs | List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading. | list(string) | `` | no | | project\_id | The project ID to host the cluster in (required) | string | n/a | yes | -| region | The region to host the cluster in (required) | string | n/a | yes | +| region | The region to host the cluster in (optional if zonal cluster / required if regional) | string | `"null"` | no | | regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | | registry\_project\_id | Project holding the Google Container Registry. If empty, we use the cluster project. If grant_registry_access is true, storage.objectViewer role is assigned on this project. | string | `""` | no | | remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | diff --git a/autogen/cluster.tf b/autogen/cluster.tf index 4e5fd74d55..296b2818df 100644 --- a/autogen/cluster.tf +++ b/autogen/cluster.tf @@ -219,6 +219,80 @@ resource "google_container_cluster" "primary" { /****************************************** Create Container Cluster node pools *****************************************/ +{% if update_variant %} +locals { + force_node_pool_recreation_resources = [ + "disk_size_gb", + "disk_type", + "accelerator_count", + "accelerator_type", + "local_ssd_count", + "machine_type", + "preemptible", + "service_account", + ] +} + +# This keepers list is based on the terraform google provider schemaNodeConfig +# resources where "ForceNew" is "true". schemaNodeConfig can be found in node_config.go at +# https://github.com/terraform-providers/terraform-provider-google/blob/master/google/node_config.go#L22 +resource "random_id" "name" { + count = length(var.node_pools) + byte_length = 2 + prefix = format("%s-", lookup(var.node_pools[count.index], "name")) + keepers = merge( + zipmap( + local.force_node_pool_recreation_resources, + [for keeper in local.force_node_pool_recreation_resources : lookup(var.node_pools[count.index], keeper, "")] + ), + { + labels = join(",", + sort( + concat( + keys(var.node_pools_labels["all"]), + values(var.node_pools_labels["all"]), + keys(var.node_pools_labels[var.node_pools[count.index]["name"]]), + values(var.node_pools_labels[var.node_pools[count.index]["name"]]) + ) + ) + ) + }, + { + metadata = join(",", + sort( + concat( + keys(var.node_pools_metadata["all"]), + values(var.node_pools_metadata["all"]), + keys(var.node_pools_metadata[var.node_pools[count.index]["name"]]), + values(var.node_pools_metadata[var.node_pools[count.index]["name"]]) + ) + ) + ) + }, + { + oauth_scopes = join(",", + sort( + concat( + var.node_pools_oauth_scopes["all"], + var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]] + ) + ) + ) + }, + { + tags = join(",", + sort( + concat( + var.node_pools_tags["all"], + var.node_pools_tags[var.node_pools[count.index]["name"]] + ) + ) + ) + } + ) +} + +{% endif %} resource "google_container_node_pool" "pools" { {% if beta_cluster %} provider = google-beta @@ -226,7 +300,11 @@ resource "google_container_node_pool" "pools" { provider = google {% endif %} count = length(var.node_pools) + {% if update_variant %} + name = random_id.name.*.hex[count.index] + {% else %} name = var.node_pools[count.index]["name"] + {% endif %} project = var.project_id location = local.location cluster = google_container_cluster.primary.name @@ -342,6 +420,9 @@ resource "google_container_node_pool" "pools" { lifecycle { ignore_changes = [initial_node_count] + {% if update_variant %} + create_before_destroy = true + {% endif %} } timeouts { diff --git a/autogen/main.tf b/autogen/main.tf index d9ad888d01..afbd7bf8c1 100644 --- a/autogen/main.tf +++ b/autogen/main.tf @@ -27,7 +27,7 @@ data "google_compute_zones" "available" { {% endif %} project = var.project_id - region = var.region + region = local.region } resource "random_shuffle" "available_zones" { @@ -38,6 +38,7 @@ resource "random_shuffle" "available_zones" { locals { // location location = var.regional ? var.region : var.zones[0] + region = var.region == null ? join("-", slice(split("-", var.zones[0]), 0, 2)) : var.region // for regional cluster - use var.zones if provided, use available otherwise, for zonal cluster use var.zones with first element extracted node_locations = var.regional ? coalescelist(compact(var.zones), sort(random_shuffle.available_zones.result)) : slice(var.zones, 1, length(var.zones)) // kuberentes version diff --git a/autogen/networks.tf b/autogen/networks.tf index 88df19bc3b..cff6762fa3 100644 --- a/autogen/networks.tf +++ b/autogen/networks.tf @@ -35,6 +35,6 @@ data "google_compute_subnetwork" "gke_subnetwork" { {% endif %} name = var.subnetwork - region = var.region + region = local.region project = local.network_project_id } diff --git a/autogen/variables.tf b/autogen/variables.tf index 17566d238f..af446afff8 100644 --- a/autogen/variables.tf +++ b/autogen/variables.tf @@ -40,7 +40,8 @@ variable "regional" { variable "region" { type = string - description = "The region to host the cluster in (required)" + description = "The region to host the cluster in (optional if zonal cluster / required if regional)" + default = null } variable "zones" { diff --git a/examples/node_pool_update_variant/README.md b/examples/node_pool_update_variant/README.md new file mode 100644 index 0000000000..9215f091cb --- /dev/null +++ b/examples/node_pool_update_variant/README.md @@ -0,0 +1,45 @@ +# Node Pool Cluster + +This example illustrates how to create a cluster with multiple custom node-pool configurations with node labels, taints, and network tags. + + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| cluster\_name\_suffix | A suffix to append to the default cluster name | string | `""` | no | +| compute\_engine\_service\_account | Service account to associate to the nodes in the cluster | string | n/a | yes | +| ip\_range\_pods | The secondary ip range to use for pods | string | n/a | yes | +| ip\_range\_services | The secondary ip range to use for pods | string | n/a | yes | +| network | The VPC network to host the cluster in | string | n/a | yes | +| project\_id | The project ID to host the cluster in | string | n/a | yes | +| region | The region to host the cluster in | string | n/a | yes | +| subnetwork | The subnetwork to host the cluster in | string | n/a | yes | +| zones | The zone to host the cluster in (required if is a zonal cluster) | list(string) | n/a | yes | + +## Outputs + +| Name | Description | +|------|-------------| +| ca\_certificate | | +| client\_token | | +| cluster\_name | Cluster name | +| ip\_range\_pods | The secondary IP range used for pods | +| ip\_range\_services | The secondary IP range used for services | +| kubernetes\_endpoint | | +| location | | +| master\_kubernetes\_version | The master Kubernetes version | +| network | | +| project\_id | | +| region | | +| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | +| subnetwork | | +| zones | List of zones in which the cluster resides | + + + +To provision this example, run the following from within this directory: +- `terraform init` to get the plugins +- `terraform plan` to see the infrastructure plan +- `terraform apply` to apply the infrastructure build +- `terraform destroy` to destroy the built infrastructure diff --git a/examples/node_pool_update_variant/data/shutdown-script.sh b/examples/node_pool_update_variant/data/shutdown-script.sh new file mode 100644 index 0000000000..f1ff19c353 --- /dev/null +++ b/examples/node_pool_update_variant/data/shutdown-script.sh @@ -0,0 +1,17 @@ +#!/bin/bash -e + +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +kubectl --kubeconfig=/var/lib/kubelet/kubeconfig drain --force=true --ignore-daemonsets=true --delete-local-data "$HOSTNAME" diff --git a/examples/node_pool_update_variant/main.tf b/examples/node_pool_update_variant/main.tf new file mode 100644 index 0000000000..c10e797511 --- /dev/null +++ b/examples/node_pool_update_variant/main.tf @@ -0,0 +1,119 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +locals { + cluster_type = "node-pool-update-variant" +} + +provider "google" { + version = "~> 2.12.0" + region = var.region +} + +data "google_compute_subnetwork" "subnetwork" { + name = var.subnetwork + project = var.project_id + region = var.region +} + +module "gke" { + source = "../../modules/private-cluster-update-variant" + project_id = var.project_id + name = "${local.cluster_type}-cluster${var.cluster_name_suffix}" + regional = false + region = var.region + zones = var.zones + network = var.network + subnetwork = var.subnetwork + ip_range_pods = var.ip_range_pods + ip_range_services = var.ip_range_services + create_service_account = false + service_account = var.compute_engine_service_account + enable_private_endpoint = true + enable_private_nodes = true + master_ipv4_cidr_block = "172.16.0.0/28" + + master_authorized_networks_config = [ + { + cidr_blocks = [ + { + cidr_block = data.google_compute_subnetwork.subnetwork.ip_cidr_range + display_name = "VPC" + }, + ] + }, + ] + + node_pools = [ + { + name = "pool-01" + min_count = 1 + max_count = 2 + service_account = var.compute_engine_service_account + auto_upgrade = true + }, + { + name = "pool-02" + machine_type = "n1-standard-2" + min_count = 1 + max_count = 2 + disk_size_gb = 30 + disk_type = "pd-standard" + accelerator_count = 1 + accelerator_type = "nvidia-tesla-p4" + image_type = "COS" + auto_repair = false + service_account = var.compute_engine_service_account + }, + ] + + node_pools_oauth_scopes = { + all = [] + pool-01 = [] + pool-02 = [] + } + + node_pools_metadata = { + all = {} + pool-01 = { + shutdown-script = file("${path.module}/data/shutdown-script.sh") + } + pool-02 = {} + } + + node_pools_labels = { + all = { + all-pools-example = true + } + pool-01 = { + pool-01-example = true + } + pool-02 = {} + } + + node_pools_tags = { + all = [ + "all-node-example", + ] + pool-01 = [ + "pool-01-example", + ] + pool-02 = [] + } +} + +data "google_client_config" "default" { +} diff --git a/examples/node_pool_update_variant/outputs.tf b/examples/node_pool_update_variant/outputs.tf new file mode 100644 index 0000000000..0d972dcd88 --- /dev/null +++ b/examples/node_pool_update_variant/outputs.tf @@ -0,0 +1,35 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +output "kubernetes_endpoint" { + sensitive = true + value = module.gke.endpoint +} + +output "client_token" { + sensitive = true + value = base64encode(data.google_client_config.default.access_token) +} + +output "ca_certificate" { + value = module.gke.ca_certificate +} + +output "service_account" { + description = "The service account to default running nodes as if not overridden in `node_pools`." + value = module.gke.service_account +} + diff --git a/examples/node_pool_update_variant/test_outputs.tf b/examples/node_pool_update_variant/test_outputs.tf new file mode 100644 index 0000000000..e64c40e477 --- /dev/null +++ b/examples/node_pool_update_variant/test_outputs.tf @@ -0,0 +1,63 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// These outputs are used to test the module with kitchen-terraform +// They do not need to be included in real-world uses of this module + +output "project_id" { + value = var.project_id +} + +output "region" { + value = module.gke.region +} + +output "cluster_name" { + description = "Cluster name" + value = module.gke.name +} + +output "network" { + value = var.network +} + +output "subnetwork" { + value = var.subnetwork +} + +output "location" { + value = module.gke.location +} + +output "ip_range_pods" { + description = "The secondary IP range used for pods" + value = var.ip_range_pods +} + +output "ip_range_services" { + description = "The secondary IP range used for services" + value = var.ip_range_services +} + +output "zones" { + description = "List of zones in which the cluster resides" + value = module.gke.zones +} + +output "master_kubernetes_version" { + description = "The master Kubernetes version" + value = module.gke.master_version +} diff --git a/examples/node_pool_update_variant/variables.tf b/examples/node_pool_update_variant/variables.tf new file mode 100644 index 0000000000..040c78d2c4 --- /dev/null +++ b/examples/node_pool_update_variant/variables.tf @@ -0,0 +1,54 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +variable "project_id" { + description = "The project ID to host the cluster in" +} + +variable "cluster_name_suffix" { + description = "A suffix to append to the default cluster name" + default = "" +} + +variable "region" { + description = "The region to host the cluster in" +} + +variable "zones" { + type = list(string) + description = "The zone to host the cluster in (required if is a zonal cluster)" +} + +variable "network" { + description = "The VPC network to host the cluster in" +} + +variable "subnetwork" { + description = "The subnetwork to host the cluster in" +} + +variable "ip_range_pods" { + description = "The secondary ip range to use for pods" +} + +variable "ip_range_services" { + description = "The secondary ip range to use for pods" +} + +variable "compute_engine_service_account" { + description = "Service account to associate to the nodes in the cluster" +} + diff --git a/examples/node_pool_update_variant_beta/README.md b/examples/node_pool_update_variant_beta/README.md new file mode 100644 index 0000000000..e95af795e9 --- /dev/null +++ b/examples/node_pool_update_variant_beta/README.md @@ -0,0 +1,46 @@ +# Node Pool Cluster + +This example illustrates how to create a cluster with multiple custom node-pool configurations with node labels, taints, and network tags. + + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| cluster\_name\_suffix | A suffix to append to the default cluster name | string | `""` | no | +| compute\_engine\_service\_account | Service account to associate to the nodes in the cluster | string | n/a | yes | +| credentials\_path | The path to the GCP credentials JSON file | string | n/a | yes | +| ip\_range\_pods | The secondary ip range to use for pods | string | n/a | yes | +| ip\_range\_services | The secondary ip range to use for pods | string | n/a | yes | +| network | The VPC network to host the cluster in | string | n/a | yes | +| project\_id | The project ID to host the cluster in | string | n/a | yes | +| region | The region to host the cluster in | string | n/a | yes | +| subnetwork | The subnetwork to host the cluster in | string | n/a | yes | +| zones | The zone to host the cluster in (required if is a zonal cluster) | list(string) | n/a | yes | + +## Outputs + +| Name | Description | +|------|-------------| +| ca\_certificate | | +| client\_token | | +| cluster\_name | Cluster name | +| ip\_range\_pods | The secondary IP range used for pods | +| ip\_range\_services | The secondary IP range used for services | +| kubernetes\_endpoint | | +| location | | +| master\_kubernetes\_version | The master Kubernetes version | +| network | | +| project\_id | | +| region | | +| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | +| subnetwork | | +| zones | List of zones in which the cluster resides | + + + +To provision this example, run the following from within this directory: +- `terraform init` to get the plugins +- `terraform plan` to see the infrastructure plan +- `terraform apply` to apply the infrastructure build +- `terraform destroy` to destroy the built infrastructure diff --git a/examples/node_pool_update_variant_beta/data/shutdown-script.sh b/examples/node_pool_update_variant_beta/data/shutdown-script.sh new file mode 100644 index 0000000000..f1ff19c353 --- /dev/null +++ b/examples/node_pool_update_variant_beta/data/shutdown-script.sh @@ -0,0 +1,17 @@ +#!/bin/bash -e + +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +kubectl --kubeconfig=/var/lib/kubelet/kubeconfig drain --force=true --ignore-daemonsets=true --delete-local-data "$HOSTNAME" diff --git a/examples/node_pool_update_variant_beta/main.tf b/examples/node_pool_update_variant_beta/main.tf new file mode 100644 index 0000000000..373fd59f30 --- /dev/null +++ b/examples/node_pool_update_variant_beta/main.tf @@ -0,0 +1,138 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +locals { + cluster_type = "node-pool-update-variant-beta" +} + +provider "google-beta" { + version = "~> 2.12.0" + credentials = file(var.credentials_path) + region = var.region +} + +data "google_compute_subnetwork" "subnetwork" { + name = var.subnetwork + project = var.project_id + region = var.region +} + +module "gke" { + source = "../../modules/beta-private-cluster-update-variant" + project_id = var.project_id + name = "${local.cluster_type}-cluster${var.cluster_name_suffix}" + regional = false + region = var.region + zones = var.zones + network = var.network + subnetwork = var.subnetwork + ip_range_pods = var.ip_range_pods + ip_range_services = var.ip_range_services + create_service_account = false + service_account = var.compute_engine_service_account + enable_private_endpoint = true + enable_private_nodes = true + master_ipv4_cidr_block = "172.16.0.0/28" + + master_authorized_networks_config = [ + { + cidr_blocks = [ + { + cidr_block = data.google_compute_subnetwork.subnetwork.ip_cidr_range + display_name = "VPC" + }, + ] + }, + ] + + node_pools = [ + { + name = "pool-01" + min_count = 1 + max_count = 2 + service_account = var.compute_engine_service_account + auto_upgrade = true + }, + { + name = "pool-02" + machine_type = "n1-standard-2" + min_count = 1 + max_count = 2 + disk_size_gb = 30 + disk_type = "pd-standard" + accelerator_count = 1 + accelerator_type = "nvidia-tesla-p4" + image_type = "COS" + auto_repair = false + service_account = var.compute_engine_service_account + }, + ] + + node_pools_oauth_scopes = { + all = [] + pool-01 = [] + pool-02 = [] + } + + node_pools_metadata = { + all = {} + pool-01 = { + shutdown-script = file("${path.module}/data/shutdown-script.sh") + } + pool-02 = {} + } + + node_pools_labels = { + all = { + all-pools-example = true + } + pool-01 = { + pool-01-example = true + } + pool-02 = {} + } + + node_pools_taints = { + all = [ + { + key = "all-pools-example" + value = true + effect = "PREFER_NO_SCHEDULE" + }, + ] + pool-01 = [ + { + key = "pool-01-example" + value = true + effect = "PREFER_NO_SCHEDULE" + }, + ] + pool-02 = [] + } + + node_pools_tags = { + all = [ + "all-node-example", + ] + pool-01 = [ + "pool-01-example", + ] + pool-02 = [] + } +} + +data "google_client_config" "default" { +} diff --git a/examples/node_pool_update_variant_beta/outputs.tf b/examples/node_pool_update_variant_beta/outputs.tf new file mode 100644 index 0000000000..0d972dcd88 --- /dev/null +++ b/examples/node_pool_update_variant_beta/outputs.tf @@ -0,0 +1,35 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +output "kubernetes_endpoint" { + sensitive = true + value = module.gke.endpoint +} + +output "client_token" { + sensitive = true + value = base64encode(data.google_client_config.default.access_token) +} + +output "ca_certificate" { + value = module.gke.ca_certificate +} + +output "service_account" { + description = "The service account to default running nodes as if not overridden in `node_pools`." + value = module.gke.service_account +} + diff --git a/examples/node_pool_update_variant_beta/test_outputs.tf b/examples/node_pool_update_variant_beta/test_outputs.tf new file mode 100644 index 0000000000..e64c40e477 --- /dev/null +++ b/examples/node_pool_update_variant_beta/test_outputs.tf @@ -0,0 +1,63 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// These outputs are used to test the module with kitchen-terraform +// They do not need to be included in real-world uses of this module + +output "project_id" { + value = var.project_id +} + +output "region" { + value = module.gke.region +} + +output "cluster_name" { + description = "Cluster name" + value = module.gke.name +} + +output "network" { + value = var.network +} + +output "subnetwork" { + value = var.subnetwork +} + +output "location" { + value = module.gke.location +} + +output "ip_range_pods" { + description = "The secondary IP range used for pods" + value = var.ip_range_pods +} + +output "ip_range_services" { + description = "The secondary IP range used for services" + value = var.ip_range_services +} + +output "zones" { + description = "List of zones in which the cluster resides" + value = module.gke.zones +} + +output "master_kubernetes_version" { + description = "The master Kubernetes version" + value = module.gke.master_version +} diff --git a/examples/node_pool_update_variant_beta/variables.tf b/examples/node_pool_update_variant_beta/variables.tf new file mode 100644 index 0000000000..9dc3873177 --- /dev/null +++ b/examples/node_pool_update_variant_beta/variables.tf @@ -0,0 +1,57 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +variable "project_id" { + description = "The project ID to host the cluster in" +} + +variable "credentials_path" { + description = "The path to the GCP credentials JSON file" +} + +variable "cluster_name_suffix" { + description = "A suffix to append to the default cluster name" + default = "" +} + +variable "region" { + description = "The region to host the cluster in" +} + +variable "zones" { + type = list(string) + description = "The zone to host the cluster in (required if is a zonal cluster)" +} + +variable "network" { + description = "The VPC network to host the cluster in" +} + +variable "subnetwork" { + description = "The subnetwork to host the cluster in" +} + +variable "ip_range_pods" { + description = "The secondary ip range to use for pods" +} + +variable "ip_range_services" { + description = "The secondary ip range to use for pods" +} + +variable "compute_engine_service_account" { + description = "Service account to associate to the nodes in the cluster" +} diff --git a/helpers/generate_modules/generate_modules.py b/helpers/generate_modules/generate_modules.py index c235e7ad65..b98b8bb69e 100755 --- a/helpers/generate_modules/generate_modules.py +++ b/helpers/generate_modules/generate_modules.py @@ -54,6 +54,17 @@ def template_options(self, base): 'private_cluster': True, 'beta_cluster': True, }), + Module("./modules/private-cluster-update-variant", { + 'module_path': '//modules/private-cluster-update-variant', + 'private_cluster': True, + 'update_variant': True, + }), + Module("./modules/beta-private-cluster-update-variant", { + 'module_path': '//modules/beta-private-cluster-update-variant', + 'private_cluster': True, + 'update_variant': True, + 'beta_cluster': True, + }), Module("./modules/beta-public-cluster", { 'module_path': '//modules/beta-public-cluster', 'private_cluster': False, diff --git a/helpers/migrate.py b/helpers/migrate.py index a22a7ce751..8f2d71cfce 100755 --- a/helpers/migrate.py +++ b/helpers/migrate.py @@ -18,7 +18,6 @@ import copy import subprocess import sys -import shutil import re MIGRATIONS = [ @@ -43,6 +42,7 @@ }, ] + class ModuleMigration: """ Migrate the resources from a flat project factory to match the new @@ -89,6 +89,7 @@ def targets(self): return to_move + class TerraformModule: """ A Terraform module with associated resources. @@ -171,7 +172,7 @@ def __init__(self, module, resource_type, name): self.module = module self.resource_type = resource_type - find_suffix = re.match('(^.+)\[(\d+)\]', name) + find_suffix = re.match(r'(^.+)\[(\d+)\]', name) if find_suffix: self.name = find_suffix.group(1) self.index = find_suffix.group(2) @@ -187,7 +188,7 @@ def path(self): if parts[0] == '': del parts[0] path = ".".join(parts) - if self.index is not -1 and self.plural: + if self.index != -1 and self.plural: path = "{0}[{1}]".format(path, self.index) return path @@ -198,6 +199,7 @@ def __repr__(self): self.resource_type, self.name) + def group_by_module(resources): """ Group a set of resources according to their containing module. @@ -241,7 +243,11 @@ def state_changes_for_module(module, statefile=None): for (old, new) in migration.moves(): wrapper = '"{0}"' - argv = ["terraform", "state", "mv", wrapper.format(old), wrapper.format(new)] + argv = ["terraform", + "state", + "mv", + wrapper.format(old), + wrapper.format(new)] commands.append(argv) return commands @@ -265,8 +271,8 @@ def migrate(statefile=None, dryrun=False): # Filter our list of Terraform modules down to anything that looks like a # zonal GKE module. We key this off the presence off of - # `google_container_cluster.zonal_primary` since that should almost always be - # unique to a GKE module. + # `google_container_cluster.zonal_primary` since that should almost always + # be unique to a GKE module. modules_to_migrate = [ module for module in modules if module.has_resource("google_container_cluster", "zonal_primary") @@ -289,6 +295,7 @@ def migrate(statefile=None, dryrun=False): argv = [arg.strip('"') for arg in argv] subprocess.run(argv, check=True, encoding='utf-8') + def main(argv): parser = argparser() args = parser.parse_args(argv[1:]) @@ -298,6 +305,7 @@ def main(argv): migrate(dryrun=args.dryrun) + def argparser(): parser = argparse.ArgumentParser(description='Migrate Terraform state') parser.add_argument('--dryrun', action='store_true', @@ -307,4 +315,4 @@ def argparser(): if __name__ == "__main__": - main(sys.argv) \ No newline at end of file + main(sys.argv) diff --git a/main.tf b/main.tf index b63d60f884..a9e1c15810 100644 --- a/main.tf +++ b/main.tf @@ -23,7 +23,7 @@ data "google_compute_zones" "available" { provider = google project = var.project_id - region = var.region + region = local.region } resource "random_shuffle" "available_zones" { @@ -34,6 +34,7 @@ resource "random_shuffle" "available_zones" { locals { // location location = var.regional ? var.region : var.zones[0] + region = var.region == null ? join("-", slice(split("-", var.zones[0]), 0, 2)) : var.region // for regional cluster - use var.zones if provided, use available otherwise, for zonal cluster use var.zones with first element extracted node_locations = var.regional ? coalescelist(compact(var.zones), sort(random_shuffle.available_zones.result)) : slice(var.zones, 1, length(var.zones)) // kuberentes version diff --git a/modules/beta-private-cluster-update-variant/README.md b/modules/beta-private-cluster-update-variant/README.md new file mode 100644 index 0000000000..0fc0068e96 --- /dev/null +++ b/modules/beta-private-cluster-update-variant/README.md @@ -0,0 +1,286 @@ +# Terraform Kubernetes Engine Module + +This module handles opinionated Google Cloud Platform Kubernetes Engine cluster creation and configuration with Node Pools, IP MASQ, Network Policy, etc. This particular submodule creates a [private cluster](https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters)Beta features are enabled in this submodule. +The resources/services/activations/deletions that this module will create/trigger are: +- Create a GKE cluster with the provided addons +- Create GKE Node Pool(s) with provided configuration and attach to cluster +- Replace the default kube-dns configmap if `stub_domains` are provided +- Activate network policy if `network_policy` is true +- Add `ip-masq-agent` configmap with provided `non_masquerade_cidrs` if `configure_ip_masq` is true + +Sub modules are provided from creating private clusters, beta private clusters, and beta public clusters as well. Beta sub modules allow for the use of various GKE beta features. See the modules directory for the various sub modules. + +**Note**: You must run Terraform from a VM on the same VPC as your cluster, otherwise there will be issues connecting to the GKE master. + + +## Compatibility + +This module is meant for use with Terraform 0.12. If you haven't +[upgraded][terraform-0.12-upgrade] and need a Terraform +0.11.x-compatible version of this module, the last released version +intended for Terraform 0.11.x is [3.0.0]. + +## Usage +There are multiple examples included in the [examples](./examples/) folder but simple usage is as follows: + +```hcl +module "gke" { + source = "terraform-google-modules/kubernetes-engine/google//modules/beta-private-cluster-update-variant" + project_id = "" + name = "gke-test-1" + region = "us-central1" + zones = ["us-central1-a", "us-central1-b", "us-central1-f"] + network = "vpc-01" + subnetwork = "us-central1-01" + ip_range_pods = "us-central1-01-gke-01-pods" + ip_range_services = "us-central1-01-gke-01-services" + http_load_balancing = false + horizontal_pod_autoscaling = true + kubernetes_dashboard = true + network_policy = true + enable_private_endpoint = true + enable_private_nodes = true + master_ipv4_cidr_block = "10.0.0.0/28" + istio = true + cloudrun = true + + node_pools = [ + { + name = "default-node-pool" + machine_type = "n1-standard-2" + min_count = 1 + max_count = 100 + disk_size_gb = 100 + disk_type = "pd-standard" + image_type = "COS" + auto_repair = true + auto_upgrade = true + service_account = "project-service-account@.iam.gserviceaccount.com" + preemptible = false + initial_node_count = 80 + }, + ] + + node_pools_oauth_scopes = { + all = [] + + default-node-pool = [ + "https://www.googleapis.com/auth/cloud-platform", + ] + } + + node_pools_labels = { + all = {} + + default-node-pool = { + default-node-pool = true + } + } + + node_pools_metadata = { + all = {} + + default-node-pool = { + node-pool-metadata-custom-value = "my-node-pool" + } + } + + node_pools_taints = { + all = [] + + default-node-pool = [ + { + key = "default-node-pool" + value = true + effect = "PREFER_NO_SCHEDULE" + }, + ] + } + + node_pools_tags = { + all = [] + + default-node-pool = [ + "default-node-pool", + ] + } +} +``` + + +Then perform the following commands on the root folder: + +- `terraform init` to get the plugins +- `terraform plan` to see the infrastructure plan +- `terraform apply` to apply the infrastructure build +- `terraform destroy` to destroy the built infrastructure + +## Upgrade to v3.0.0 + +v3.0.0 is a breaking release. Refer to the +[Upgrading to v3.0 guide][upgrading-to-v3.0] for details. + +## Upgrade to v2.0.0 + +v2.0.0 is a breaking release. Refer to the +[Upgrading to v2.0 guide][upgrading-to-v2.0] for details. + +## Upgrade to v1.0.0 + +Version 1.0.0 of this module introduces a breaking change: adding the `disable-legacy-endpoints` metadata field to all node pools. This metadata is required by GKE and [determines whether the `/0.1/` and `/v1beta1/` paths are available in the nodes' metadata server](https://cloud.google.com/kubernetes-engine/docs/how-to/protecting-cluster-metadata#disable-legacy-apis). If your applications do not require access to the node's metadata server, you can leave the default value of `true` provided by the module. If your applications require access to the metadata server, be sure to read the linked documentation to see if you need to set the value for this field to `false` to allow your applications access to the above metadata server paths. + +In either case, upgrading to module version `v1.0.0` will trigger a recreation of all node pools in the cluster. + + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| authenticator\_security\_group | The name of the RBAC security group for use with Google security groups in Kubernetes RBAC. Group name must be in format gke-security-groups@yourdomain.com | string | `"null"` | no | +| basic\_auth\_password | The password to be used with Basic Authentication. | string | `""` | no | +| basic\_auth\_username | The username to be used with Basic Authentication. An empty value will disable Basic Authentication, which is the recommended configuration. | string | `""` | no | +| cloudrun | (Beta) Enable CloudRun addon | string | `"false"` | no | +| cluster\_ipv4\_cidr | The IP address range of the kubernetes pods in this cluster. Default is an automatically assigned CIDR. | string | `""` | no | +| cluster\_resource\_labels | The GCE resource labels (a map of key/value pairs) to be applied to the cluster | map(string) | `` | no | +| configure\_ip\_masq | Enables the installation of ip masquerading, which is usually no longer required when using aliasied IP addresses. IP masquerading uses a kubectl call, so when you have a private cluster, you will need access to the API server. | string | `"false"` | no | +| create\_service\_account | Defines if service account specified to run nodes should be created. | bool | `"true"` | no | +| database\_encryption | Application-layer Secrets Encryption settings. The object format is {state = string, key_name = string}. Valid values of state are: "ENCRYPTED"; "DECRYPTED". key_name is the name of a CloudKMS key. | object | `` | no | +| default\_max\_pods\_per\_node | The maximum number of pods to schedule per node | string | `"110"` | no | +| deploy\_using\_private\_endpoint | (Beta) A toggle for Terraform and kubectl to connect to the master's internal IP address during deployment. | bool | `"false"` | no | +| description | The description of the cluster | string | `""` | no | +| disable\_legacy\_metadata\_endpoints | Disable the /0.1/ and /v1beta1/ metadata server endpoints on the node. Changing this value will cause all node pools to be recreated. | bool | `"true"` | no | +| enable\_binary\_authorization | Enable BinAuthZ Admission controller | string | `"false"` | no | +| enable\_intranode\_visibility | Whether Intra-node visibility is enabled for this cluster. This makes same node pod to pod traffic visible for VPC network | bool | `"false"` | no | +| enable\_private\_endpoint | (Beta) Whether the master's internal IP address is used as the cluster endpoint | bool | `"false"` | no | +| enable\_private\_nodes | (Beta) Whether nodes have internal IP addresses only | bool | `"false"` | no | +| enable\_vertical\_pod\_autoscaling | Vertical Pod Autoscaling automatically adjusts the resources of pods controlled by it | bool | `"false"` | no | +| grant\_registry\_access | Grants created cluster-specific service account storage.objectViewer role. | bool | `"false"` | no | +| horizontal\_pod\_autoscaling | Enable horizontal pod autoscaling addon | bool | `"true"` | no | +| http\_load\_balancing | Enable httpload balancer addon | bool | `"true"` | no | +| identity\_namespace | Workload Identity namespace | string | `""` | no | +| initial\_node\_count | The number of nodes to create in this cluster's default node pool. | number | `"0"` | no | +| ip\_masq\_link\_local | Whether to masquerade traffic to the link-local prefix (169.254.0.0/16). | bool | `"false"` | no | +| ip\_masq\_resync\_interval | The interval at which the agent attempts to sync its ConfigMap file from the disk. | string | `"60s"` | no | +| ip\_range\_pods | The _name_ of the secondary subnet ip range to use for pods | string | n/a | yes | +| ip\_range\_services | The _name_ of the secondary subnet range to use for services | string | n/a | yes | +| issue\_client\_certificate | Issues a client certificate to authenticate to the cluster endpoint. To maximize the security of your cluster, leave this option disabled. Client certificates don't automatically rotate and aren't easily revocable. WARNING: changing this after cluster creation is destructive! | bool | `"false"` | no | +| istio | (Beta) Enable Istio addon | string | `"false"` | no | +| kubernetes\_dashboard | Enable kubernetes dashboard addon | bool | `"false"` | no | +| kubernetes\_version | The Kubernetes version of the masters. If set to 'latest' it will pull latest available version in the selected region. | string | `"latest"` | no | +| logging\_service | The logging service that the cluster should write logs to. Available options include logging.googleapis.com, logging.googleapis.com/kubernetes (beta), and none | string | `"logging.googleapis.com"` | no | +| maintenance\_start\_time | Time window specified for daily maintenance operations in RFC3339 format | string | `"05:00"` | no | +| master\_authorized\_networks\_config | The desired configuration options for master authorized networks. The object format is {cidr_blocks = list(object({cidr_block = string, display_name = string}))}. Omit the nested cidr_blocks attribute to disallow external access (except the cluster node IPs, which GKE automatically whitelists). | object | `` | no | +| master\_ipv4\_cidr\_block | (Beta) The IP range in CIDR notation to use for the hosted master network | string | `"10.0.0.0/28"` | no | +| monitoring\_service | The monitoring service that the cluster should write metrics to. Automatically send metrics from pods in the cluster to the Google Cloud Monitoring API. VM metrics will be collected by Google Compute Engine regardless of this setting Available options include monitoring.googleapis.com, monitoring.googleapis.com/kubernetes (beta) and none | string | `"monitoring.googleapis.com"` | no | +| name | The name of the cluster (required) | string | n/a | yes | +| network | The VPC network to host the cluster in (required) | string | n/a | yes | +| network\_policy | Enable network policy addon | bool | `"false"` | no | +| network\_policy\_provider | The network policy provider. | string | `"CALICO"` | no | +| network\_project\_id | The project ID of the shared VPC's host (for shared vpc support) | string | `""` | no | +| node\_metadata | Specifies how node metadata is exposed to the workload running on the node | string | `"SECURE"` | no | +| node\_pools | List of maps containing node pools | list(map(string)) | `` | no | +| node\_pools\_labels | Map of maps containing node labels by node-pool name | map(map(string)) | `` | no | +| node\_pools\_metadata | Map of maps containing node metadata by node-pool name | map(map(string)) | `` | no | +| node\_pools\_oauth\_scopes | Map of lists containing node oauth scopes by node-pool name | map(list(string)) | `` | no | +| node\_pools\_tags | Map of lists containing node network tags by node-pool name | map(list(string)) | `` | no | +| node\_pools\_taints | Map of lists containing node taints by node-pool name | object | `` | no | +| node\_version | The Kubernetes version of the node pools. Defaults kubernetes_version (master) variable and can be overridden for individual node pools by setting the `version` key on them. Must be empyty or set the same as master at cluster creation. | string | `""` | no | +| non\_masquerade\_cidrs | List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading. | list(string) | `` | no | +| pod\_security\_policy\_config | enabled - Enable the PodSecurityPolicy controller for this cluster. If enabled, pods must be valid under a PodSecurityPolicy to be created. | list | `` | no | +| project\_id | The project ID to host the cluster in (required) | string | n/a | yes | +| region | The region to host the cluster in (required) | string | n/a | yes | +| regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | +| remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | +| resource\_usage\_export\_dataset\_id | The dataset id for which network egress metering for this cluster will be enabled. If enabled, a daemonset will be created in the cluster to meter network egress traffic. | string | `""` | no | +| sandbox\_enabled | (Beta) Enable GKE Sandbox (Do not forget to set `image_type` = `COS_CONTAINERD` and `node_version` = `1.12.7-gke.17` or later to use it). | bool | `"false"` | no | +| service\_account | The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created. | string | `""` | no | +| stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | map(list(string)) | `` | no | +| subnetwork | The subnetwork to host the cluster in (required) | string | n/a | yes | +| upstream\_nameservers | If specified, the values replace the nameservers taken by default from the node’s /etc/resolv.conf | list | `` | no | +| zones | The zones to host the cluster in (optional if regional cluster / required if zonal) | list(string) | `` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| ca\_certificate | Cluster ca certificate (base64 encoded) | +| cloudrun\_enabled | Whether CloudRun enabled | +| endpoint | Cluster endpoint | +| horizontal\_pod\_autoscaling\_enabled | Whether horizontal pod autoscaling enabled | +| http\_load\_balancing\_enabled | Whether http load balancing enabled | +| intranode\_visibility\_enabled | Whether intra-node visibility is enabled | +| istio\_enabled | Whether Istio is enabled | +| kubernetes\_dashboard\_enabled | Whether kubernetes dashboard enabled | +| location | Cluster location (region if regional cluster, zone if zonal cluster) | +| logging\_service | Logging service used | +| master\_authorized\_networks\_config | Networks from which access to master is permitted | +| master\_version | Current master kubernetes version | +| min\_master\_version | Minimum master kubernetes version | +| monitoring\_service | Monitoring service used | +| name | Cluster name | +| network\_policy\_enabled | Whether network policy enabled | +| node\_pools\_names | List of node pools names | +| node\_pools\_versions | List of node pools versions | +| pod\_security\_policy\_enabled | Whether pod security policy is enabled | +| region | Cluster region | +| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | +| type | Cluster type (regional / zonal) | +| vertical\_pod\_autoscaling\_enabled | Whether veritical pod autoscaling is enabled | +| zones | List of zones in which the cluster resides | + + + +## Requirements + +Before this module can be used on a project, you must ensure that the following pre-requisites are fulfilled: + +1. Terraform and kubectl are [installed](#software-dependencies) on the machine where Terraform is executed. +2. The Service Account you execute the module with has the right [permissions](#configure-a-service-account). +3. The Compute Engine and Kubernetes Engine APIs are [active](#enable-apis) on the project you will launch the cluster in. +4. If you are using a Shared VPC, the APIs must also be activated on the Shared VPC host project and your service account needs the proper permissions there. + +The [project factory](https://github.com/terraform-google-modules/terraform-google-project-factory) can be used to provision projects with the correct APIs active and the necessary Shared VPC connections. + +### Software Dependencies +#### Kubectl +- [kubectl](https://github.com/kubernetes/kubernetes/releases) 1.9.x +#### Terraform and Plugins +- [Terraform](https://www.terraform.io/downloads.html) 0.12 +- [Terraform Provider for GCP Beta][terraform-provider-google-beta] v2.9 + +### Configure a Service Account +In order to execute this module you must have a Service Account with the +following project roles: +- roles/compute.viewer +- roles/container.clusterAdmin +- roles/container.developer +- roles/iam.serviceAccountAdmin +- roles/iam.serviceAccountUser +- roles/resourcemanager.projectIamAdmin (only required if `service_account` is set to `create`) + +### Enable APIs +In order to operate with the Service Account you must activate the following APIs on the project where the Service Account was created: + +- Compute Engine API - compute.googleapis.com +- Kubernetes Engine API - container.googleapis.com + +## File structure +The project has the following folders and files: + +- /: root folder +- /examples: Examples for using this module and sub module. +- /helpers: Helper scripts. +- /scripts: Scripts for specific tasks on module (see Infrastructure section on this file). +- /test: Folders with files for testing the module (see Testing section on this file). +- /main.tf: `main` file for the public module, contains all the resources to create. +- /variables.tf: Variables for the public cluster module. +- /output.tf: The outputs for the public cluster module. +- /README.MD: This file. +- /modules: Private and beta sub modules. + + +[upgrading-to-v2.0]: ../../docs/upgrading_to_v2.0.md +[upgrading-to-v3.0]: ../../docs/upgrading_to_v3.0.md +[terraform-provider-google-beta]: https://github.com/terraform-providers/terraform-provider-google-beta +[3.0.0]: https://registry.terraform.io/modules/terraform-google-modules/kubernetes-engine/google/3.0.0 +[terraform-0.12-upgrade]: https://www.terraform.io/upgrade-guides/0-12.html diff --git a/modules/beta-private-cluster-update-variant/auth.tf b/modules/beta-private-cluster-update-variant/auth.tf new file mode 100644 index 0000000000..c177eee5a7 --- /dev/null +++ b/modules/beta-private-cluster-update-variant/auth.tf @@ -0,0 +1,34 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file was automatically generated from a template in ./autogen + +/****************************************** + Retrieve authentication token + *****************************************/ +data "google_client_config" "default" { + provider = google-beta +} + +/****************************************** + Configure provider + *****************************************/ +provider "kubernetes" { + load_config_file = false + host = "https://${local.cluster_endpoint}" + token = data.google_client_config.default.access_token + cluster_ca_certificate = base64decode(local.cluster_ca_certificate) +} diff --git a/modules/beta-private-cluster-update-variant/cluster.tf b/modules/beta-private-cluster-update-variant/cluster.tf new file mode 100644 index 0000000000..cf1def945d --- /dev/null +++ b/modules/beta-private-cluster-update-variant/cluster.tf @@ -0,0 +1,418 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file was automatically generated from a template in ./autogen + +/****************************************** + Create Container Cluster + *****************************************/ +resource "google_container_cluster" "primary" { + provider = google-beta + + name = var.name + description = var.description + project = var.project_id + resource_labels = var.cluster_resource_labels + + location = local.location + node_locations = local.node_locations + cluster_ipv4_cidr = var.cluster_ipv4_cidr + network = data.google_compute_network.gke_network.self_link + + dynamic "network_policy" { + for_each = local.cluster_network_policy + + content { + enabled = network_policy.value.enabled + provider = network_policy.value.provider + } + } + + subnetwork = data.google_compute_subnetwork.gke_subnetwork.self_link + min_master_version = local.master_version + + logging_service = var.logging_service + monitoring_service = var.monitoring_service + + enable_binary_authorization = var.enable_binary_authorization + enable_intranode_visibility = var.enable_intranode_visibility + default_max_pods_per_node = var.default_max_pods_per_node + + vertical_pod_autoscaling { + enabled = var.enable_vertical_pod_autoscaling + } + + dynamic "pod_security_policy_config" { + for_each = var.pod_security_policy_config + content { + enabled = pod_security_policy_config.value.enabled + } + } + + dynamic "resource_usage_export_config" { + for_each = var.resource_usage_export_dataset_id != "" ? [var.resource_usage_export_dataset_id] : [] + content { + enable_network_egress_metering = true + bigquery_destination { + dataset_id = resource_usage_export_config.value + } + } + } + dynamic "master_authorized_networks_config" { + for_each = var.master_authorized_networks_config + content { + dynamic "cidr_blocks" { + for_each = master_authorized_networks_config.value.cidr_blocks + content { + cidr_block = lookup(cidr_blocks.value, "cidr_block", "") + display_name = lookup(cidr_blocks.value, "display_name", "") + } + } + } + } + + master_auth { + username = var.basic_auth_username + password = var.basic_auth_password + + client_certificate_config { + issue_client_certificate = var.issue_client_certificate + } + } + + addons_config { + http_load_balancing { + disabled = ! var.http_load_balancing + } + + horizontal_pod_autoscaling { + disabled = ! var.horizontal_pod_autoscaling + } + + kubernetes_dashboard { + disabled = ! var.kubernetes_dashboard + } + + network_policy_config { + disabled = ! var.network_policy + } + + istio_config { + disabled = ! var.istio + } + + dynamic "cloudrun_config" { + for_each = local.cluster_cloudrun_config + + content { + disabled = cloudrun_config.value.disabled + } + } + } + + ip_allocation_policy { + cluster_secondary_range_name = var.ip_range_pods + services_secondary_range_name = var.ip_range_services + } + + maintenance_policy { + daily_maintenance_window { + start_time = var.maintenance_start_time + } + } + + lifecycle { + ignore_changes = [node_pool, initial_node_count] + } + + timeouts { + create = "30m" + update = "30m" + delete = "30m" + } + + node_pool { + name = "default-pool" + initial_node_count = var.initial_node_count + + node_config { + service_account = lookup(var.node_pools[0], "service_account", local.service_account) + + dynamic "workload_metadata_config" { + for_each = local.cluster_node_metadata_config + + content { + node_metadata = workload_metadata_config.value.node_metadata + } + } + + dynamic "sandbox_config" { + for_each = local.cluster_sandbox_enabled + + content { + sandbox_type = sandbox_config.value + } + } + } + } + + private_cluster_config { + enable_private_endpoint = var.enable_private_endpoint + enable_private_nodes = var.enable_private_nodes + master_ipv4_cidr_block = var.master_ipv4_cidr_block + } + + remove_default_node_pool = var.remove_default_node_pool + + dynamic "database_encryption" { + for_each = var.database_encryption + + content { + key_name = database_encryption.value.key_name + state = database_encryption.value.state + } + } + + dynamic "workload_identity_config" { + for_each = local.cluster_workload_identity_config + + content { + identity_namespace = workload_identity_config.value.identity_namespace + } + } + + dynamic "authenticator_groups_config" { + for_each = local.cluster_authenticator_security_group + content { + security_group = authenticator_groups_config.value.security_group + } + } +} + +/****************************************** + Create Container Cluster node pools + *****************************************/ +locals { + force_node_pool_recreation_resources = [ + "disk_size_gb", + "disk_type", + "accelerator_count", + "accelerator_type", + "local_ssd_count", + "machine_type", + "preemptible", + "service_account", + ] +} + +# This keepers list is based on the terraform google provider schemaNodeConfig +# resources where "ForceNew" is "true". schemaNodeConfig can be found in node_config.go at +# https://github.com/terraform-providers/terraform-provider-google/blob/master/google/node_config.go#L22 +resource "random_id" "name" { + count = length(var.node_pools) + byte_length = 2 + prefix = format("%s-", lookup(var.node_pools[count.index], "name")) + keepers = merge( + zipmap( + local.force_node_pool_recreation_resources, + [for keeper in local.force_node_pool_recreation_resources : lookup(var.node_pools[count.index], keeper, "")] + ), + { + labels = join(",", + sort( + concat( + keys(var.node_pools_labels["all"]), + values(var.node_pools_labels["all"]), + keys(var.node_pools_labels[var.node_pools[count.index]["name"]]), + values(var.node_pools_labels[var.node_pools[count.index]["name"]]) + ) + ) + ) + }, + { + metadata = join(",", + sort( + concat( + keys(var.node_pools_metadata["all"]), + values(var.node_pools_metadata["all"]), + keys(var.node_pools_metadata[var.node_pools[count.index]["name"]]), + values(var.node_pools_metadata[var.node_pools[count.index]["name"]]) + ) + ) + ) + }, + { + oauth_scopes = join(",", + sort( + concat( + var.node_pools_oauth_scopes["all"], + var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]] + ) + ) + ) + }, + { + tags = join(",", + sort( + concat( + var.node_pools_tags["all"], + var.node_pools_tags[var.node_pools[count.index]["name"]] + ) + ) + ) + } + ) +} + +resource "google_container_node_pool" "pools" { + provider = google-beta + count = length(var.node_pools) + name = random_id.name.*.hex[count.index] + project = var.project_id + location = local.location + cluster = google_container_cluster.primary.name + version = lookup(var.node_pools[count.index], "auto_upgrade", false) ? "" : lookup( + var.node_pools[count.index], + "version", + local.node_version, + ) + initial_node_count = lookup( + var.node_pools[count.index], + "initial_node_count", + lookup(var.node_pools[count.index], "min_count", 1), + ) + max_pods_per_node = lookup(var.node_pools[count.index], "max_pods_per_node", null) + + node_count = lookup(var.node_pools[count.index], "autoscaling", true) ? null : lookup(var.node_pools[count.index], "min_count", 1) + + dynamic "autoscaling" { + for_each = lookup(var.node_pools[count.index], "autoscaling", true) ? [var.node_pools[count.index]] : [] + content { + min_node_count = lookup(autoscaling.value, "min_count", 1) + max_node_count = lookup(autoscaling.value, "max_count", 100) + } + } + + management { + auto_repair = lookup(var.node_pools[count.index], "auto_repair", true) + auto_upgrade = lookup(var.node_pools[count.index], "auto_upgrade", local.default_auto_upgrade) + } + + node_config { + image_type = lookup(var.node_pools[count.index], "image_type", "COS") + machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2") + labels = merge( + { + "cluster_name" = var.name + }, + { + "node_pool" = var.node_pools[count.index]["name"] + }, + var.node_pools_labels["all"], + var.node_pools_labels[var.node_pools[count.index]["name"]], + ) + metadata = merge( + { + "cluster_name" = var.name + }, + { + "node_pool" = var.node_pools[count.index]["name"] + }, + var.node_pools_metadata["all"], + var.node_pools_metadata[var.node_pools[count.index]["name"]], + { + "disable-legacy-endpoints" = var.disable_legacy_metadata_endpoints + }, + ) + dynamic "taint" { + for_each = concat( + var.node_pools_taints["all"], + var.node_pools_taints[var.node_pools[count.index]["name"]], + ) + content { + effect = taint.value.effect + key = taint.value.key + value = taint.value.value + } + } + tags = concat( + ["gke-${var.name}"], + ["gke-${var.name}-${var.node_pools[count.index]["name"]}"], + var.node_pools_tags["all"], + var.node_pools_tags[var.node_pools[count.index]["name"]], + ) + + disk_size_gb = lookup(var.node_pools[count.index], "disk_size_gb", 100) + disk_type = lookup(var.node_pools[count.index], "disk_type", "pd-standard") + service_account = lookup( + var.node_pools[count.index], + "service_account", + local.service_account, + ) + preemptible = lookup(var.node_pools[count.index], "preemptible", false) + + oauth_scopes = concat( + var.node_pools_oauth_scopes["all"], + var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]], + ) + + guest_accelerator = [ + for guest_accelerator in lookup(var.node_pools[count.index], "accelerator_count", 0) > 0 ? [{ + type = lookup(var.node_pools[count.index], "accelerator_type", "") + count = lookup(var.node_pools[count.index], "accelerator_count", 0) + }] : [] : { + type = guest_accelerator["type"] + count = guest_accelerator["count"] + } + ] + + dynamic "workload_metadata_config" { + for_each = local.cluster_node_metadata_config + + content { + node_metadata = workload_metadata_config.value.node_metadata + } + } + } + + lifecycle { + ignore_changes = [initial_node_count] + create_before_destroy = true + } + + timeouts { + create = "30m" + update = "30m" + delete = "30m" + } +} + +resource "null_resource" "wait_for_cluster" { + + provisioner "local-exec" { + command = "${path.module}/scripts/wait-for-cluster.sh ${var.project_id} ${var.name}" + } + + provisioner "local-exec" { + when = destroy + command = "${path.module}/scripts/wait-for-cluster.sh ${var.project_id} ${var.name}" + } + + depends_on = [ + google_container_cluster.primary, + google_container_node_pool.pools, + ] +} diff --git a/modules/beta-private-cluster-update-variant/dns.tf b/modules/beta-private-cluster-update-variant/dns.tf new file mode 100644 index 0000000000..b240a23e65 --- /dev/null +++ b/modules/beta-private-cluster-update-variant/dns.tf @@ -0,0 +1,120 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file was automatically generated from a template in ./autogen + +/****************************************** + Delete default kube-dns configmap + *****************************************/ +resource "null_resource" "delete_default_kube_dns_configmap" { + count = local.custom_kube_dns_config || local.upstream_nameservers_config ? 1 : 0 + + provisioner "local-exec" { + command = "${path.module}/scripts/kubectl_wrapper.sh https://${local.cluster_endpoint} ${data.google_client_config.default.access_token} ${local.cluster_ca_certificate} ${path.module}/scripts/delete-default-resource.sh kube-system configmap kube-dns" + } + + depends_on = [ + data.google_client_config.default, + google_container_cluster.primary, + google_container_node_pool.pools, + ] +} + +/****************************************** + Create kube-dns confimap + *****************************************/ +resource "kubernetes_config_map" "kube-dns" { + count = local.custom_kube_dns_config && ! local.upstream_nameservers_config ? 1 : 0 + + metadata { + name = "kube-dns" + namespace = "kube-system" + + labels = { + maintained_by = "terraform" + } + } + + data = { + stubDomains = < 0 + upstream_nameservers_config = length(var.upstream_nameservers) > 0 + network_project_id = var.network_project_id != "" ? var.network_project_id : var.project_id + zone_count = length(var.zones) + cluster_type = var.regional ? "regional" : "zonal" + // auto upgrade by defaults only for regional cluster as long it has multiple masters versus zonal clusters have only have a single master so upgrades are more dangerous. + default_auto_upgrade = var.regional ? true : false + + cluster_network_policy = var.network_policy ? [{ + enabled = true + provider = var.network_policy_provider + }] : [{ + enabled = false + provider = null + }] + + cluster_cloudrun_config = var.cloudrun ? [{ disabled = false }] : [] + + cluster_node_metadata_config = var.node_metadata == "UNSPECIFIED" ? [] : [{ + node_metadata = var.node_metadata + }] + + cluster_authenticator_security_group = var.authenticator_security_group == null ? [] : [{ + security_group = var.authenticator_security_group + }] + + cluster_sandbox_enabled = var.sandbox_enabled ? ["gvisor"] : [] + + + cluster_output_name = google_container_cluster.primary.name + cluster_output_location = google_container_cluster.primary.location + cluster_output_region = google_container_cluster.primary.region + cluster_output_regional_zones = google_container_cluster.primary.node_locations + cluster_output_zonal_zones = local.zone_count > 1 ? slice(var.zones, 1, local.zone_count) : [] + cluster_output_zones = local.cluster_output_regional_zones + + cluster_output_endpoint = var.deploy_using_private_endpoint ? google_container_cluster.primary.private_cluster_config.0.private_endpoint : google_container_cluster.primary.endpoint + + cluster_output_master_auth = concat(google_container_cluster.primary.*.master_auth, []) + cluster_output_master_version = google_container_cluster.primary.master_version + cluster_output_min_master_version = google_container_cluster.primary.min_master_version + cluster_output_logging_service = google_container_cluster.primary.logging_service + cluster_output_monitoring_service = google_container_cluster.primary.monitoring_service + cluster_output_network_policy_enabled = google_container_cluster.primary.addons_config.0.network_policy_config.0.disabled + cluster_output_http_load_balancing_enabled = google_container_cluster.primary.addons_config.0.http_load_balancing.0.disabled + cluster_output_horizontal_pod_autoscaling_enabled = google_container_cluster.primary.addons_config.0.horizontal_pod_autoscaling.0.disabled + cluster_output_kubernetes_dashboard_enabled = google_container_cluster.primary.addons_config.0.kubernetes_dashboard.0.disabled + + # BETA features + cluster_output_istio_disabled = google_container_cluster.primary.addons_config.0.istio_config != null && length(google_container_cluster.primary.addons_config.0.istio_config) == 1 ? google_container_cluster.primary.addons_config.0.istio_config.0.disabled : false + cluster_output_pod_security_policy_enabled = google_container_cluster.primary.pod_security_policy_config != null && length(google_container_cluster.primary.pod_security_policy_config) == 1 ? google_container_cluster.primary.pod_security_policy_config.0.enabled : false + cluster_output_intranode_visbility_enabled = google_container_cluster.primary.enable_intranode_visibility + cluster_output_vertical_pod_autoscaling_enabled = google_container_cluster.primary.vertical_pod_autoscaling != null && length(google_container_cluster.primary.vertical_pod_autoscaling) == 1 ? google_container_cluster.primary.vertical_pod_autoscaling.0.enabled : false + + # /BETA features + + cluster_output_node_pools_names = concat(google_container_node_pool.pools.*.name, [""]) + cluster_output_node_pools_versions = concat(google_container_node_pool.pools.*.version, [""]) + + cluster_master_auth_list_layer1 = local.cluster_output_master_auth + cluster_master_auth_list_layer2 = local.cluster_master_auth_list_layer1[0] + cluster_master_auth_map = local.cluster_master_auth_list_layer2[0] + # cluster locals + cluster_name = local.cluster_output_name + cluster_location = local.cluster_output_location + cluster_region = local.cluster_output_region + cluster_zones = sort(local.cluster_output_zones) + cluster_endpoint = local.cluster_output_endpoint + cluster_ca_certificate = local.cluster_master_auth_map["cluster_ca_certificate"] + cluster_master_version = local.cluster_output_master_version + cluster_min_master_version = local.cluster_output_min_master_version + cluster_logging_service = local.cluster_output_logging_service + cluster_monitoring_service = local.cluster_output_monitoring_service + cluster_node_pools_names = local.cluster_output_node_pools_names + cluster_node_pools_versions = local.cluster_output_node_pools_versions + cluster_network_policy_enabled = ! local.cluster_output_network_policy_enabled + cluster_http_load_balancing_enabled = ! local.cluster_output_http_load_balancing_enabled + cluster_horizontal_pod_autoscaling_enabled = ! local.cluster_output_horizontal_pod_autoscaling_enabled + cluster_kubernetes_dashboard_enabled = ! local.cluster_output_kubernetes_dashboard_enabled + # BETA features + cluster_istio_enabled = ! local.cluster_output_istio_disabled + cluster_cloudrun_enabled = var.cloudrun + cluster_pod_security_policy_enabled = local.cluster_output_pod_security_policy_enabled + cluster_intranode_visibility_enabled = local.cluster_output_intranode_visbility_enabled + cluster_vertical_pod_autoscaling_enabled = local.cluster_output_vertical_pod_autoscaling_enabled + cluster_workload_identity_config = var.identity_namespace == "" ? [] : [{ + identity_namespace = var.identity_namespace + }] + # /BETA features +} + +/****************************************** + Get available container engine versions + *****************************************/ +data "google_container_engine_versions" "region" { + location = local.location + project = var.project_id +} + +data "google_container_engine_versions" "zone" { + // Work around to prevent a lack of zone declaration from causing regional cluster creation from erroring out due to error + // + // data.google_container_engine_versions.zone: Cannot determine zone: set in this resource, or set provider-level zone. + // + location = local.zone_count == 0 ? data.google_compute_zones.available.names[0] : var.zones[0] + project = var.project_id +} diff --git a/modules/beta-private-cluster-update-variant/masq.tf b/modules/beta-private-cluster-update-variant/masq.tf new file mode 100644 index 0000000000..b6e411fc42 --- /dev/null +++ b/modules/beta-private-cluster-update-variant/masq.tf @@ -0,0 +1,48 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file was automatically generated from a template in ./autogen + +/****************************************** + Create ip-masq-agent confimap + *****************************************/ +resource "kubernetes_config_map" "ip-masq-agent" { + count = var.configure_ip_masq ? 1 : 0 + + metadata { + name = "ip-masq-agent" + namespace = "kube-system" + + labels = { + maintained_by = "terraform" + } + } + + data = { + config = <&2 echo "3 arguments expected. Exiting." + exit 1 +fi + +RESOURCE_NAMESPACE=$1 +RESOURCE_TYPE=$2 +RESOURCE_NAME=$3 + +RESOURCE_LIST=$(kubectl -n "${RESOURCE_NAMESPACE}" get "${RESOURCE_TYPE}" || exit 1) + +# Delete requested resource +if [[ $RESOURCE_LIST = *"${RESOURCE_NAME}"* ]]; then + RESOURCE_MAINTAINED_LABEL=$(kubectl -n "${RESOURCE_NAMESPACE}" get "${RESOURCE_TYPE}" -o json "${RESOURCE_NAME}" | jq -r '.metadata.labels."maintained_by"') + if [[ $RESOURCE_MAINTAINED_LABEL = "terraform" ]]; then + echo "Terraform maintained ${RESOURCE_NAME} ${RESOURCE_TYPE} appears to have already been created in ${RESOURCE_NAMESPACE} namespace" + else + echo "Deleting default ${RESOURCE_NAME} ${RESOURCE_TYPE} found in ${RESOURCE_NAMESPACE} namespace" + kubectl -n "${RESOURCE_NAMESPACE}" delete "${RESOURCE_TYPE}" "${RESOURCE_NAME}" + fi +else + echo "No default ${RESOURCE_NAME} ${RESOURCE_TYPE} found in ${RESOURCE_NAMESPACE} namespace" +fi diff --git a/modules/beta-private-cluster-update-variant/scripts/kubectl_wrapper.sh b/modules/beta-private-cluster-update-variant/scripts/kubectl_wrapper.sh new file mode 100755 index 0000000000..e92300bcb5 --- /dev/null +++ b/modules/beta-private-cluster-update-variant/scripts/kubectl_wrapper.sh @@ -0,0 +1,53 @@ +#!/bin/bash +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +set -e + +if [ "$#" -lt 3 ]; then + >&2 echo "Not all expected arguments set." + exit 1 +fi + +HOST=$1 +TOKEN=$2 +CA_CERTIFICATE=$3 + +shift 3 + +RANDOM_ID="${RANDOM}_${RANDOM}" +export TMPDIR="/tmp/kubectl_wrapper_${RANDOM_ID}" + +function cleanup { + rm -rf "${TMPDIR}" +} +trap cleanup EXIT + +mkdir "${TMPDIR}" + +export KUBECONFIG="${TMPDIR}/config" + +# shellcheck disable=SC1117 +base64 --help | grep "\--decode" && B64_ARG="--decode" || B64_ARG="-d" +echo "${CA_CERTIFICATE}" | base64 ${B64_ARG} > "${TMPDIR}/ca_certificate" + +kubectl config set-cluster kubectl-wrapper --server="${HOST}" --certificate-authority="${TMPDIR}/ca_certificate" --embed-certs=true 1>/dev/null +rm -f "${TMPDIR}/ca_certificate" +kubectl config set-context kubectl-wrapper --cluster=kubectl-wrapper --user=kubectl-wrapper --namespace=default 1>/dev/null +kubectl config set-credentials kubectl-wrapper --token="${TOKEN}" 1>/dev/null +kubectl config use-context kubectl-wrapper 1>/dev/null +kubectl version 1>/dev/null + +"$@" diff --git a/modules/beta-private-cluster-update-variant/scripts/wait-for-cluster.sh b/modules/beta-private-cluster-update-variant/scripts/wait-for-cluster.sh new file mode 100755 index 0000000000..6ff3253d58 --- /dev/null +++ b/modules/beta-private-cluster-update-variant/scripts/wait-for-cluster.sh @@ -0,0 +1,33 @@ +#!/bin/bash +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e + +PROJECT=$1 +CLUSTER_NAME=$2 +gcloud_command="gcloud container clusters list --project=$PROJECT --format=json" +jq_query=".[] | select(.name==\"$CLUSTER_NAME\") | .status" + +echo "Waiting for cluster $2 in project $1 to reconcile..." + +current_status=$($gcloud_command | jq -r "$jq_query") + +while [[ "${current_status}" == "RECONCILING" ]]; do + printf "." + sleep 5 + current_status=$($gcloud_command | jq -r "$jq_query") +done + +echo "Cluster is ready!" diff --git a/modules/beta-private-cluster-update-variant/variables.tf b/modules/beta-private-cluster-update-variant/variables.tf new file mode 100644 index 0000000000..9a869a830f --- /dev/null +++ b/modules/beta-private-cluster-update-variant/variables.tf @@ -0,0 +1,407 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file was automatically generated from a template in ./autogen + +variable "project_id" { + type = string + description = "The project ID to host the cluster in (required)" +} + +variable "name" { + type = string + description = "The name of the cluster (required)" +} + +variable "description" { + type = string + description = "The description of the cluster" + default = "" +} + +variable "regional" { + type = bool + description = "Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!)" + default = true +} + +variable "region" { + type = string + description = "The region to host the cluster in (required)" +} + +variable "zones" { + type = list(string) + description = "The zones to host the cluster in (optional if regional cluster / required if zonal)" + default = [] +} + +variable "network" { + type = string + description = "The VPC network to host the cluster in (required)" +} + +variable "network_project_id" { + type = string + description = "The project ID of the shared VPC's host (for shared vpc support)" + default = "" +} + +variable "subnetwork" { + type = string + description = "The subnetwork to host the cluster in (required)" +} + +variable "kubernetes_version" { + type = string + description = "The Kubernetes version of the masters. If set to 'latest' it will pull latest available version in the selected region." + default = "latest" +} + +variable "node_version" { + type = string + description = "The Kubernetes version of the node pools. Defaults kubernetes_version (master) variable and can be overridden for individual node pools by setting the `version` key on them. Must be empyty or set the same as master at cluster creation." + default = "" +} + +variable "master_authorized_networks_config" { + type = list(object({ cidr_blocks = list(object({ cidr_block = string, display_name = string })) })) + description = "The desired configuration options for master authorized networks. The object format is {cidr_blocks = list(object({cidr_block = string, display_name = string}))}. Omit the nested cidr_blocks attribute to disallow external access (except the cluster node IPs, which GKE automatically whitelists)." + default = [] +} + +variable "horizontal_pod_autoscaling" { + type = bool + description = "Enable horizontal pod autoscaling addon" + default = true +} + +variable "http_load_balancing" { + type = bool + description = "Enable httpload balancer addon" + default = true +} + +variable "kubernetes_dashboard" { + type = bool + description = "Enable kubernetes dashboard addon" + default = false +} + +variable "network_policy" { + type = bool + description = "Enable network policy addon" + default = false +} + +variable "network_policy_provider" { + type = string + description = "The network policy provider." + default = "CALICO" +} + +variable "maintenance_start_time" { + type = string + description = "Time window specified for daily maintenance operations in RFC3339 format" + default = "05:00" +} + +variable "ip_range_pods" { + type = string + description = "The _name_ of the secondary subnet ip range to use for pods" +} + +variable "ip_range_services" { + type = string + description = "The _name_ of the secondary subnet range to use for services" +} + +variable "initial_node_count" { + type = number + description = "The number of nodes to create in this cluster's default node pool." + default = 0 +} + +variable "remove_default_node_pool" { + type = bool + description = "Remove default node pool while setting up the cluster" + default = false +} + +variable "disable_legacy_metadata_endpoints" { + type = bool + description = "Disable the /0.1/ and /v1beta1/ metadata server endpoints on the node. Changing this value will cause all node pools to be recreated." + default = true +} + +variable "node_pools" { + type = list(map(string)) + description = "List of maps containing node pools" + + default = [ + { + name = "default-node-pool" + }, + ] +} + +variable "node_pools_labels" { + type = map(map(string)) + description = "Map of maps containing node labels by node-pool name" + + default = { + all = {} + default-node-pool = {} + } +} + +variable "node_pools_metadata" { + type = map(map(string)) + description = "Map of maps containing node metadata by node-pool name" + + default = { + all = {} + default-node-pool = {} + } +} + +variable "node_pools_taints" { + type = map(list(object({ key = string, value = string, effect = string }))) + description = "Map of lists containing node taints by node-pool name" + + default = { + all = [] + default-node-pool = [] + } +} + +variable "node_pools_tags" { + type = map(list(string)) + description = "Map of lists containing node network tags by node-pool name" + + default = { + all = [] + default-node-pool = [] + } +} + +variable "node_pools_oauth_scopes" { + type = map(list(string)) + description = "Map of lists containing node oauth scopes by node-pool name" + + default = { + all = ["https://www.googleapis.com/auth/cloud-platform"] + default-node-pool = [] + } +} + +variable "stub_domains" { + type = map(list(string)) + description = "Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server" + default = {} +} + +variable "upstream_nameservers" { + type = "list" + description = "If specified, the values replace the nameservers taken by default from the node’s /etc/resolv.conf" + default = [] +} + +variable "non_masquerade_cidrs" { + type = list(string) + description = "List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading." + default = ["10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16"] +} + +variable "ip_masq_resync_interval" { + type = string + description = "The interval at which the agent attempts to sync its ConfigMap file from the disk." + default = "60s" +} + +variable "ip_masq_link_local" { + type = bool + description = "Whether to masquerade traffic to the link-local prefix (169.254.0.0/16)." + default = false +} + +variable "configure_ip_masq" { + description = "Enables the installation of ip masquerading, which is usually no longer required when using aliasied IP addresses. IP masquerading uses a kubectl call, so when you have a private cluster, you will need access to the API server." + default = false +} + +variable "logging_service" { + type = string + description = "The logging service that the cluster should write logs to. Available options include logging.googleapis.com, logging.googleapis.com/kubernetes (beta), and none" + default = "logging.googleapis.com" +} + +variable "monitoring_service" { + type = string + description = "The monitoring service that the cluster should write metrics to. Automatically send metrics from pods in the cluster to the Google Cloud Monitoring API. VM metrics will be collected by Google Compute Engine regardless of this setting Available options include monitoring.googleapis.com, monitoring.googleapis.com/kubernetes (beta) and none" + default = "monitoring.googleapis.com" +} + +variable "create_service_account" { + type = bool + description = "Defines if service account specified to run nodes should be created." + default = true +} + +variable "grant_registry_access" { + type = bool + description = "Grants created cluster-specific service account storage.objectViewer role." + default = false +} + +variable "service_account" { + type = string + description = "The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created." + default = "" +} + +variable "basic_auth_username" { + type = string + description = "The username to be used with Basic Authentication. An empty value will disable Basic Authentication, which is the recommended configuration." + default = "" +} + +variable "basic_auth_password" { + type = string + description = "The password to be used with Basic Authentication." + default = "" +} + +variable "issue_client_certificate" { + type = bool + description = "Issues a client certificate to authenticate to the cluster endpoint. To maximize the security of your cluster, leave this option disabled. Client certificates don't automatically rotate and aren't easily revocable. WARNING: changing this after cluster creation is destructive!" + default = false +} + +variable "cluster_ipv4_cidr" { + default = "" + description = "The IP address range of the kubernetes pods in this cluster. Default is an automatically assigned CIDR." +} + +variable "cluster_resource_labels" { + type = map(string) + description = "The GCE resource labels (a map of key/value pairs) to be applied to the cluster" + default = {} +} + + +variable "deploy_using_private_endpoint" { + type = bool + description = "(Beta) A toggle for Terraform and kubectl to connect to the master's internal IP address during deployment." + default = false +} + +variable "enable_private_endpoint" { + type = bool + description = "(Beta) Whether the master's internal IP address is used as the cluster endpoint" + default = false +} + +variable "enable_private_nodes" { + type = bool + description = "(Beta) Whether nodes have internal IP addresses only" + default = false +} + +variable "master_ipv4_cidr_block" { + type = string + description = "(Beta) The IP range in CIDR notation to use for the hosted master network" + default = "10.0.0.0/28" +} + +variable "istio" { + description = "(Beta) Enable Istio addon" + default = false +} + +variable "default_max_pods_per_node" { + description = "The maximum number of pods to schedule per node" + default = 110 +} + +variable "database_encryption" { + description = "Application-layer Secrets Encryption settings. The object format is {state = string, key_name = string}. Valid values of state are: \"ENCRYPTED\"; \"DECRYPTED\". key_name is the name of a CloudKMS key." + type = list(object({ state = string, key_name = string })) + default = [{ + state = "DECRYPTED" + key_name = "" + }] +} + +variable "cloudrun" { + description = "(Beta) Enable CloudRun addon" + default = false +} + +variable "enable_binary_authorization" { + description = "Enable BinAuthZ Admission controller" + default = false +} + +variable "pod_security_policy_config" { + description = "enabled - Enable the PodSecurityPolicy controller for this cluster. If enabled, pods must be valid under a PodSecurityPolicy to be created." + default = [{ + "enabled" = false + }] +} + +variable "resource_usage_export_dataset_id" { + type = string + description = "The dataset id for which network egress metering for this cluster will be enabled. If enabled, a daemonset will be created in the cluster to meter network egress traffic." + default = "" +} + +variable "node_metadata" { + description = "Specifies how node metadata is exposed to the workload running on the node" + default = "SECURE" + type = string +} + +variable "sandbox_enabled" { + type = bool + description = "(Beta) Enable GKE Sandbox (Do not forget to set `image_type` = `COS_CONTAINERD` and `node_version` = `1.12.7-gke.17` or later to use it)." + default = false +} + +variable "enable_intranode_visibility" { + type = bool + description = "Whether Intra-node visibility is enabled for this cluster. This makes same node pod to pod traffic visible for VPC network" + default = false +} + +variable "enable_vertical_pod_autoscaling" { + type = bool + description = "Vertical Pod Autoscaling automatically adjusts the resources of pods controlled by it" + default = false +} + +variable "identity_namespace" { + description = "Workload Identity namespace" + type = string + default = "" +} + +variable "authenticator_security_group" { + type = string + description = "The name of the RBAC security group for use with Google security groups in Kubernetes RBAC. Group name must be in format gke-security-groups@yourdomain.com" + default = null +} + diff --git a/modules/beta-private-cluster-update-variant/versions.tf b/modules/beta-private-cluster-update-variant/versions.tf new file mode 100644 index 0000000000..832ec1df39 --- /dev/null +++ b/modules/beta-private-cluster-update-variant/versions.tf @@ -0,0 +1,19 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +terraform { + required_version = ">= 0.12" +} diff --git a/modules/beta-private-cluster/README.md b/modules/beta-private-cluster/README.md index c2920e4b28..425ef67fa1 100644 --- a/modules/beta-private-cluster/README.md +++ b/modules/beta-private-cluster/README.md @@ -188,7 +188,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | non\_masquerade\_cidrs | List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading. | list(string) | `` | no | | pod\_security\_policy\_config | enabled - Enable the PodSecurityPolicy controller for this cluster. If enabled, pods must be valid under a PodSecurityPolicy to be created. | list | `` | no | | project\_id | The project ID to host the cluster in (required) | string | n/a | yes | -| region | The region to host the cluster in (required) | string | n/a | yes | +| region | The region to host the cluster in (optional if zonal cluster / required if regional) | string | `"null"` | no | | regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | | registry\_project\_id | Project holding the Google Container Registry. If empty, we use the cluster project. If grant_registry_access is true, storage.objectViewer role is assigned on this project. | string | `""` | no | | remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | diff --git a/modules/beta-private-cluster/main.tf b/modules/beta-private-cluster/main.tf index fc38644871..63bf31ac78 100644 --- a/modules/beta-private-cluster/main.tf +++ b/modules/beta-private-cluster/main.tf @@ -23,7 +23,7 @@ data "google_compute_zones" "available" { provider = google-beta project = var.project_id - region = var.region + region = local.region } resource "random_shuffle" "available_zones" { @@ -34,6 +34,7 @@ resource "random_shuffle" "available_zones" { locals { // location location = var.regional ? var.region : var.zones[0] + region = var.region == null ? join("-", slice(split("-", var.zones[0]), 0, 2)) : var.region // for regional cluster - use var.zones if provided, use available otherwise, for zonal cluster use var.zones with first element extracted node_locations = var.regional ? coalescelist(compact(var.zones), sort(random_shuffle.available_zones.result)) : slice(var.zones, 1, length(var.zones)) // kuberentes version diff --git a/modules/beta-private-cluster/networks.tf b/modules/beta-private-cluster/networks.tf index 14ea500e03..2456654130 100644 --- a/modules/beta-private-cluster/networks.tf +++ b/modules/beta-private-cluster/networks.tf @@ -27,6 +27,6 @@ data "google_compute_subnetwork" "gke_subnetwork" { provider = google-beta name = var.subnetwork - region = var.region + region = local.region project = local.network_project_id } diff --git a/modules/beta-private-cluster/variables.tf b/modules/beta-private-cluster/variables.tf index 2c53d06b90..ee2d5b5556 100644 --- a/modules/beta-private-cluster/variables.tf +++ b/modules/beta-private-cluster/variables.tf @@ -40,7 +40,8 @@ variable "regional" { variable "region" { type = string - description = "The region to host the cluster in (required)" + description = "The region to host the cluster in (optional if zonal cluster / required if regional)" + default = null } variable "zones" { diff --git a/modules/beta-public-cluster/README.md b/modules/beta-public-cluster/README.md index f013439240..73cf4ea4c5 100644 --- a/modules/beta-public-cluster/README.md +++ b/modules/beta-public-cluster/README.md @@ -179,7 +179,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | non\_masquerade\_cidrs | List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading. | list(string) | `` | no | | pod\_security\_policy\_config | enabled - Enable the PodSecurityPolicy controller for this cluster. If enabled, pods must be valid under a PodSecurityPolicy to be created. | list | `` | no | | project\_id | The project ID to host the cluster in (required) | string | n/a | yes | -| region | The region to host the cluster in (required) | string | n/a | yes | +| region | The region to host the cluster in (optional if zonal cluster / required if regional) | string | `"null"` | no | | regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | | registry\_project\_id | Project holding the Google Container Registry. If empty, we use the cluster project. If grant_registry_access is true, storage.objectViewer role is assigned on this project. | string | `""` | no | | remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | diff --git a/modules/beta-public-cluster/main.tf b/modules/beta-public-cluster/main.tf index dea58d4de5..f0477f9498 100644 --- a/modules/beta-public-cluster/main.tf +++ b/modules/beta-public-cluster/main.tf @@ -23,7 +23,7 @@ data "google_compute_zones" "available" { provider = google-beta project = var.project_id - region = var.region + region = local.region } resource "random_shuffle" "available_zones" { @@ -34,6 +34,7 @@ resource "random_shuffle" "available_zones" { locals { // location location = var.regional ? var.region : var.zones[0] + region = var.region == null ? join("-", slice(split("-", var.zones[0]), 0, 2)) : var.region // for regional cluster - use var.zones if provided, use available otherwise, for zonal cluster use var.zones with first element extracted node_locations = var.regional ? coalescelist(compact(var.zones), sort(random_shuffle.available_zones.result)) : slice(var.zones, 1, length(var.zones)) // kuberentes version diff --git a/modules/beta-public-cluster/networks.tf b/modules/beta-public-cluster/networks.tf index 14ea500e03..2456654130 100644 --- a/modules/beta-public-cluster/networks.tf +++ b/modules/beta-public-cluster/networks.tf @@ -27,6 +27,6 @@ data "google_compute_subnetwork" "gke_subnetwork" { provider = google-beta name = var.subnetwork - region = var.region + region = local.region project = local.network_project_id } diff --git a/modules/beta-public-cluster/variables.tf b/modules/beta-public-cluster/variables.tf index 07771a27a9..90008d54bd 100644 --- a/modules/beta-public-cluster/variables.tf +++ b/modules/beta-public-cluster/variables.tf @@ -40,7 +40,8 @@ variable "regional" { variable "region" { type = string - description = "The region to host the cluster in (required)" + description = "The region to host the cluster in (optional if zonal cluster / required if regional)" + default = null } variable "zones" { diff --git a/modules/private-cluster-update-variant/README.md b/modules/private-cluster-update-variant/README.md new file mode 100644 index 0000000000..e817361124 --- /dev/null +++ b/modules/private-cluster-update-variant/README.md @@ -0,0 +1,265 @@ +# Terraform Kubernetes Engine Module + +This module handles opinionated Google Cloud Platform Kubernetes Engine cluster creation and configuration with Node Pools, IP MASQ, Network Policy, etc. This particular submodule creates a [private cluster](https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters) +The resources/services/activations/deletions that this module will create/trigger are: +- Create a GKE cluster with the provided addons +- Create GKE Node Pool(s) with provided configuration and attach to cluster +- Replace the default kube-dns configmap if `stub_domains` are provided +- Activate network policy if `network_policy` is true +- Add `ip-masq-agent` configmap with provided `non_masquerade_cidrs` if `configure_ip_masq` is true + +Sub modules are provided from creating private clusters, beta private clusters, and beta public clusters as well. Beta sub modules allow for the use of various GKE beta features. See the modules directory for the various sub modules. + +**Note**: You must run Terraform from a VM on the same VPC as your cluster, otherwise there will be issues connecting to the GKE master. + + +## Compatibility + +This module is meant for use with Terraform 0.12. If you haven't +[upgraded][terraform-0.12-upgrade] and need a Terraform +0.11.x-compatible version of this module, the last released version +intended for Terraform 0.11.x is [3.0.0]. + +## Usage +There are multiple examples included in the [examples](./examples/) folder but simple usage is as follows: + +```hcl +module "gke" { + source = "terraform-google-modules/kubernetes-engine/google//modules/private-cluster-update-variant" + project_id = "" + name = "gke-test-1" + region = "us-central1" + zones = ["us-central1-a", "us-central1-b", "us-central1-f"] + network = "vpc-01" + subnetwork = "us-central1-01" + ip_range_pods = "us-central1-01-gke-01-pods" + ip_range_services = "us-central1-01-gke-01-services" + http_load_balancing = false + horizontal_pod_autoscaling = true + kubernetes_dashboard = true + network_policy = true + enable_private_endpoint = true + enable_private_nodes = true + master_ipv4_cidr_block = "10.0.0.0/28" + + node_pools = [ + { + name = "default-node-pool" + machine_type = "n1-standard-2" + min_count = 1 + max_count = 100 + disk_size_gb = 100 + disk_type = "pd-standard" + image_type = "COS" + auto_repair = true + auto_upgrade = true + service_account = "project-service-account@.iam.gserviceaccount.com" + preemptible = false + initial_node_count = 80 + }, + ] + + node_pools_oauth_scopes = { + all = [] + + default-node-pool = [ + "https://www.googleapis.com/auth/cloud-platform", + ] + } + + node_pools_labels = { + all = {} + + default-node-pool = { + default-node-pool = true + } + } + + node_pools_metadata = { + all = {} + + default-node-pool = { + node-pool-metadata-custom-value = "my-node-pool" + } + } + + node_pools_taints = { + all = [] + + default-node-pool = [ + { + key = "default-node-pool" + value = true + effect = "PREFER_NO_SCHEDULE" + }, + ] + } + + node_pools_tags = { + all = [] + + default-node-pool = [ + "default-node-pool", + ] + } +} +``` + + +Then perform the following commands on the root folder: + +- `terraform init` to get the plugins +- `terraform plan` to see the infrastructure plan +- `terraform apply` to apply the infrastructure build +- `terraform destroy` to destroy the built infrastructure + +## Upgrade to v3.0.0 + +v3.0.0 is a breaking release. Refer to the +[Upgrading to v3.0 guide][upgrading-to-v3.0] for details. + +## Upgrade to v2.0.0 + +v2.0.0 is a breaking release. Refer to the +[Upgrading to v2.0 guide][upgrading-to-v2.0] for details. + +## Upgrade to v1.0.0 + +Version 1.0.0 of this module introduces a breaking change: adding the `disable-legacy-endpoints` metadata field to all node pools. This metadata is required by GKE and [determines whether the `/0.1/` and `/v1beta1/` paths are available in the nodes' metadata server](https://cloud.google.com/kubernetes-engine/docs/how-to/protecting-cluster-metadata#disable-legacy-apis). If your applications do not require access to the node's metadata server, you can leave the default value of `true` provided by the module. If your applications require access to the metadata server, be sure to read the linked documentation to see if you need to set the value for this field to `false` to allow your applications access to the above metadata server paths. + +In either case, upgrading to module version `v1.0.0` will trigger a recreation of all node pools in the cluster. + + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| basic\_auth\_password | The password to be used with Basic Authentication. | string | `""` | no | +| basic\_auth\_username | The username to be used with Basic Authentication. An empty value will disable Basic Authentication, which is the recommended configuration. | string | `""` | no | +| cluster\_ipv4\_cidr | The IP address range of the kubernetes pods in this cluster. Default is an automatically assigned CIDR. | string | `""` | no | +| cluster\_resource\_labels | The GCE resource labels (a map of key/value pairs) to be applied to the cluster | map(string) | `` | no | +| configure\_ip\_masq | Enables the installation of ip masquerading, which is usually no longer required when using aliasied IP addresses. IP masquerading uses a kubectl call, so when you have a private cluster, you will need access to the API server. | string | `"false"` | no | +| create\_service\_account | Defines if service account specified to run nodes should be created. | bool | `"true"` | no | +| deploy\_using\_private\_endpoint | (Beta) A toggle for Terraform and kubectl to connect to the master's internal IP address during deployment. | bool | `"false"` | no | +| description | The description of the cluster | string | `""` | no | +| disable\_legacy\_metadata\_endpoints | Disable the /0.1/ and /v1beta1/ metadata server endpoints on the node. Changing this value will cause all node pools to be recreated. | bool | `"true"` | no | +| enable\_private\_endpoint | (Beta) Whether the master's internal IP address is used as the cluster endpoint | bool | `"false"` | no | +| enable\_private\_nodes | (Beta) Whether nodes have internal IP addresses only | bool | `"false"` | no | +| grant\_registry\_access | Grants created cluster-specific service account storage.objectViewer role. | bool | `"false"` | no | +| horizontal\_pod\_autoscaling | Enable horizontal pod autoscaling addon | bool | `"true"` | no | +| http\_load\_balancing | Enable httpload balancer addon | bool | `"true"` | no | +| initial\_node\_count | The number of nodes to create in this cluster's default node pool. | number | `"0"` | no | +| ip\_masq\_link\_local | Whether to masquerade traffic to the link-local prefix (169.254.0.0/16). | bool | `"false"` | no | +| ip\_masq\_resync\_interval | The interval at which the agent attempts to sync its ConfigMap file from the disk. | string | `"60s"` | no | +| ip\_range\_pods | The _name_ of the secondary subnet ip range to use for pods | string | n/a | yes | +| ip\_range\_services | The _name_ of the secondary subnet range to use for services | string | n/a | yes | +| issue\_client\_certificate | Issues a client certificate to authenticate to the cluster endpoint. To maximize the security of your cluster, leave this option disabled. Client certificates don't automatically rotate and aren't easily revocable. WARNING: changing this after cluster creation is destructive! | bool | `"false"` | no | +| kubernetes\_dashboard | Enable kubernetes dashboard addon | bool | `"false"` | no | +| kubernetes\_version | The Kubernetes version of the masters. If set to 'latest' it will pull latest available version in the selected region. | string | `"latest"` | no | +| logging\_service | The logging service that the cluster should write logs to. Available options include logging.googleapis.com, logging.googleapis.com/kubernetes (beta), and none | string | `"logging.googleapis.com"` | no | +| maintenance\_start\_time | Time window specified for daily maintenance operations in RFC3339 format | string | `"05:00"` | no | +| master\_authorized\_networks\_config | The desired configuration options for master authorized networks. The object format is {cidr_blocks = list(object({cidr_block = string, display_name = string}))}. Omit the nested cidr_blocks attribute to disallow external access (except the cluster node IPs, which GKE automatically whitelists). | object | `` | no | +| master\_ipv4\_cidr\_block | (Beta) The IP range in CIDR notation to use for the hosted master network | string | `"10.0.0.0/28"` | no | +| monitoring\_service | The monitoring service that the cluster should write metrics to. Automatically send metrics from pods in the cluster to the Google Cloud Monitoring API. VM metrics will be collected by Google Compute Engine regardless of this setting Available options include monitoring.googleapis.com, monitoring.googleapis.com/kubernetes (beta) and none | string | `"monitoring.googleapis.com"` | no | +| name | The name of the cluster (required) | string | n/a | yes | +| network | The VPC network to host the cluster in (required) | string | n/a | yes | +| network\_policy | Enable network policy addon | bool | `"false"` | no | +| network\_policy\_provider | The network policy provider. | string | `"CALICO"` | no | +| network\_project\_id | The project ID of the shared VPC's host (for shared vpc support) | string | `""` | no | +| node\_pools | List of maps containing node pools | list(map(string)) | `` | no | +| node\_pools\_labels | Map of maps containing node labels by node-pool name | map(map(string)) | `` | no | +| node\_pools\_metadata | Map of maps containing node metadata by node-pool name | map(map(string)) | `` | no | +| node\_pools\_oauth\_scopes | Map of lists containing node oauth scopes by node-pool name | map(list(string)) | `` | no | +| node\_pools\_tags | Map of lists containing node network tags by node-pool name | map(list(string)) | `` | no | +| node\_version | The Kubernetes version of the node pools. Defaults kubernetes_version (master) variable and can be overridden for individual node pools by setting the `version` key on them. Must be empyty or set the same as master at cluster creation. | string | `""` | no | +| non\_masquerade\_cidrs | List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading. | list(string) | `` | no | +| project\_id | The project ID to host the cluster in (required) | string | n/a | yes | +| region | The region to host the cluster in (required) | string | n/a | yes | +| regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | +| remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | +| service\_account | The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created. | string | `""` | no | +| stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | map(list(string)) | `` | no | +| subnetwork | The subnetwork to host the cluster in (required) | string | n/a | yes | +| upstream\_nameservers | If specified, the values replace the nameservers taken by default from the node’s /etc/resolv.conf | list | `` | no | +| zones | The zones to host the cluster in (optional if regional cluster / required if zonal) | list(string) | `` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| ca\_certificate | Cluster ca certificate (base64 encoded) | +| endpoint | Cluster endpoint | +| horizontal\_pod\_autoscaling\_enabled | Whether horizontal pod autoscaling enabled | +| http\_load\_balancing\_enabled | Whether http load balancing enabled | +| kubernetes\_dashboard\_enabled | Whether kubernetes dashboard enabled | +| location | Cluster location (region if regional cluster, zone if zonal cluster) | +| logging\_service | Logging service used | +| master\_authorized\_networks\_config | Networks from which access to master is permitted | +| master\_version | Current master kubernetes version | +| min\_master\_version | Minimum master kubernetes version | +| monitoring\_service | Monitoring service used | +| name | Cluster name | +| network\_policy\_enabled | Whether network policy enabled | +| node\_pools\_names | List of node pools names | +| node\_pools\_versions | List of node pools versions | +| region | Cluster region | +| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | +| type | Cluster type (regional / zonal) | +| zones | List of zones in which the cluster resides | + + + +## Requirements + +Before this module can be used on a project, you must ensure that the following pre-requisites are fulfilled: + +1. Terraform and kubectl are [installed](#software-dependencies) on the machine where Terraform is executed. +2. The Service Account you execute the module with has the right [permissions](#configure-a-service-account). +3. The Compute Engine and Kubernetes Engine APIs are [active](#enable-apis) on the project you will launch the cluster in. +4. If you are using a Shared VPC, the APIs must also be activated on the Shared VPC host project and your service account needs the proper permissions there. + +The [project factory](https://github.com/terraform-google-modules/terraform-google-project-factory) can be used to provision projects with the correct APIs active and the necessary Shared VPC connections. + +### Software Dependencies +#### Kubectl +- [kubectl](https://github.com/kubernetes/kubernetes/releases) 1.9.x +#### Terraform and Plugins +- [Terraform](https://www.terraform.io/downloads.html) 0.12 +- [Terraform Provider for GCP][terraform-provider-google] v2.9 + +### Configure a Service Account +In order to execute this module you must have a Service Account with the +following project roles: +- roles/compute.viewer +- roles/container.clusterAdmin +- roles/container.developer +- roles/iam.serviceAccountAdmin +- roles/iam.serviceAccountUser +- roles/resourcemanager.projectIamAdmin (only required if `service_account` is set to `create`) + +### Enable APIs +In order to operate with the Service Account you must activate the following APIs on the project where the Service Account was created: + +- Compute Engine API - compute.googleapis.com +- Kubernetes Engine API - container.googleapis.com + +## File structure +The project has the following folders and files: + +- /: root folder +- /examples: Examples for using this module and sub module. +- /helpers: Helper scripts. +- /scripts: Scripts for specific tasks on module (see Infrastructure section on this file). +- /test: Folders with files for testing the module (see Testing section on this file). +- /main.tf: `main` file for the public module, contains all the resources to create. +- /variables.tf: Variables for the public cluster module. +- /output.tf: The outputs for the public cluster module. +- /README.MD: This file. +- /modules: Private and beta sub modules. + + +[upgrading-to-v2.0]: ../../docs/upgrading_to_v2.0.md +[upgrading-to-v3.0]: ../../docs/upgrading_to_v3.0.md +[terraform-provider-google]: https://github.com/terraform-providers/terraform-provider-google +[3.0.0]: https://registry.terraform.io/modules/terraform-google-modules/kubernetes-engine/google/3.0.0 +[terraform-0.12-upgrade]: https://www.terraform.io/upgrade-guides/0-12.html diff --git a/modules/private-cluster-update-variant/auth.tf b/modules/private-cluster-update-variant/auth.tf new file mode 100644 index 0000000000..48e7cc6a5f --- /dev/null +++ b/modules/private-cluster-update-variant/auth.tf @@ -0,0 +1,34 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file was automatically generated from a template in ./autogen + +/****************************************** + Retrieve authentication token + *****************************************/ +data "google_client_config" "default" { + provider = google +} + +/****************************************** + Configure provider + *****************************************/ +provider "kubernetes" { + load_config_file = false + host = "https://${local.cluster_endpoint}" + token = data.google_client_config.default.access_token + cluster_ca_certificate = base64decode(local.cluster_ca_certificate) +} diff --git a/modules/private-cluster-update-variant/cluster.tf b/modules/private-cluster-update-variant/cluster.tf new file mode 100644 index 0000000000..e8db91a77a --- /dev/null +++ b/modules/private-cluster-update-variant/cluster.tf @@ -0,0 +1,322 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file was automatically generated from a template in ./autogen + +/****************************************** + Create Container Cluster + *****************************************/ +resource "google_container_cluster" "primary" { + provider = google + + name = var.name + description = var.description + project = var.project_id + resource_labels = var.cluster_resource_labels + + location = local.location + node_locations = local.node_locations + cluster_ipv4_cidr = var.cluster_ipv4_cidr + network = data.google_compute_network.gke_network.self_link + + dynamic "network_policy" { + for_each = local.cluster_network_policy + + content { + enabled = network_policy.value.enabled + provider = network_policy.value.provider + } + } + + subnetwork = data.google_compute_subnetwork.gke_subnetwork.self_link + min_master_version = local.master_version + + logging_service = var.logging_service + monitoring_service = var.monitoring_service + + dynamic "master_authorized_networks_config" { + for_each = var.master_authorized_networks_config + content { + dynamic "cidr_blocks" { + for_each = master_authorized_networks_config.value.cidr_blocks + content { + cidr_block = lookup(cidr_blocks.value, "cidr_block", "") + display_name = lookup(cidr_blocks.value, "display_name", "") + } + } + } + } + + master_auth { + username = var.basic_auth_username + password = var.basic_auth_password + + client_certificate_config { + issue_client_certificate = var.issue_client_certificate + } + } + + addons_config { + http_load_balancing { + disabled = ! var.http_load_balancing + } + + horizontal_pod_autoscaling { + disabled = ! var.horizontal_pod_autoscaling + } + + kubernetes_dashboard { + disabled = ! var.kubernetes_dashboard + } + + network_policy_config { + disabled = ! var.network_policy + } + } + + ip_allocation_policy { + cluster_secondary_range_name = var.ip_range_pods + services_secondary_range_name = var.ip_range_services + } + + maintenance_policy { + daily_maintenance_window { + start_time = var.maintenance_start_time + } + } + + lifecycle { + ignore_changes = [node_pool, initial_node_count] + } + + timeouts { + create = "30m" + update = "30m" + delete = "30m" + } + + node_pool { + name = "default-pool" + initial_node_count = var.initial_node_count + + node_config { + service_account = lookup(var.node_pools[0], "service_account", local.service_account) + } + } + + private_cluster_config { + enable_private_endpoint = var.enable_private_endpoint + enable_private_nodes = var.enable_private_nodes + master_ipv4_cidr_block = var.master_ipv4_cidr_block + } + + remove_default_node_pool = var.remove_default_node_pool +} + +/****************************************** + Create Container Cluster node pools + *****************************************/ +locals { + force_node_pool_recreation_resources = [ + "disk_size_gb", + "disk_type", + "accelerator_count", + "accelerator_type", + "local_ssd_count", + "machine_type", + "preemptible", + "service_account", + ] +} + +# This keepers list is based on the terraform google provider schemaNodeConfig +# resources where "ForceNew" is "true". schemaNodeConfig can be found in node_config.go at +# https://github.com/terraform-providers/terraform-provider-google/blob/master/google/node_config.go#L22 +resource "random_id" "name" { + count = length(var.node_pools) + byte_length = 2 + prefix = format("%s-", lookup(var.node_pools[count.index], "name")) + keepers = merge( + zipmap( + local.force_node_pool_recreation_resources, + [for keeper in local.force_node_pool_recreation_resources : lookup(var.node_pools[count.index], keeper, "")] + ), + { + labels = join(",", + sort( + concat( + keys(var.node_pools_labels["all"]), + values(var.node_pools_labels["all"]), + keys(var.node_pools_labels[var.node_pools[count.index]["name"]]), + values(var.node_pools_labels[var.node_pools[count.index]["name"]]) + ) + ) + ) + }, + { + metadata = join(",", + sort( + concat( + keys(var.node_pools_metadata["all"]), + values(var.node_pools_metadata["all"]), + keys(var.node_pools_metadata[var.node_pools[count.index]["name"]]), + values(var.node_pools_metadata[var.node_pools[count.index]["name"]]) + ) + ) + ) + }, + { + oauth_scopes = join(",", + sort( + concat( + var.node_pools_oauth_scopes["all"], + var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]] + ) + ) + ) + }, + { + tags = join(",", + sort( + concat( + var.node_pools_tags["all"], + var.node_pools_tags[var.node_pools[count.index]["name"]] + ) + ) + ) + } + ) +} + +resource "google_container_node_pool" "pools" { + provider = google + count = length(var.node_pools) + name = random_id.name.*.hex[count.index] + project = var.project_id + location = local.location + cluster = google_container_cluster.primary.name + version = lookup(var.node_pools[count.index], "auto_upgrade", false) ? "" : lookup( + var.node_pools[count.index], + "version", + local.node_version, + ) + initial_node_count = lookup( + var.node_pools[count.index], + "initial_node_count", + lookup(var.node_pools[count.index], "min_count", 1), + ) + + node_count = lookup(var.node_pools[count.index], "autoscaling", true) ? null : lookup(var.node_pools[count.index], "min_count", 1) + + dynamic "autoscaling" { + for_each = lookup(var.node_pools[count.index], "autoscaling", true) ? [var.node_pools[count.index]] : [] + content { + min_node_count = lookup(autoscaling.value, "min_count", 1) + max_node_count = lookup(autoscaling.value, "max_count", 100) + } + } + + management { + auto_repair = lookup(var.node_pools[count.index], "auto_repair", true) + auto_upgrade = lookup(var.node_pools[count.index], "auto_upgrade", local.default_auto_upgrade) + } + + node_config { + image_type = lookup(var.node_pools[count.index], "image_type", "COS") + machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2") + labels = merge( + { + "cluster_name" = var.name + }, + { + "node_pool" = var.node_pools[count.index]["name"] + }, + var.node_pools_labels["all"], + var.node_pools_labels[var.node_pools[count.index]["name"]], + ) + metadata = merge( + { + "cluster_name" = var.name + }, + { + "node_pool" = var.node_pools[count.index]["name"] + }, + var.node_pools_metadata["all"], + var.node_pools_metadata[var.node_pools[count.index]["name"]], + { + "disable-legacy-endpoints" = var.disable_legacy_metadata_endpoints + }, + ) + tags = concat( + ["gke-${var.name}"], + ["gke-${var.name}-${var.node_pools[count.index]["name"]}"], + var.node_pools_tags["all"], + var.node_pools_tags[var.node_pools[count.index]["name"]], + ) + + disk_size_gb = lookup(var.node_pools[count.index], "disk_size_gb", 100) + disk_type = lookup(var.node_pools[count.index], "disk_type", "pd-standard") + service_account = lookup( + var.node_pools[count.index], + "service_account", + local.service_account, + ) + preemptible = lookup(var.node_pools[count.index], "preemptible", false) + + oauth_scopes = concat( + var.node_pools_oauth_scopes["all"], + var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]], + ) + + guest_accelerator = [ + for guest_accelerator in lookup(var.node_pools[count.index], "accelerator_count", 0) > 0 ? [{ + type = lookup(var.node_pools[count.index], "accelerator_type", "") + count = lookup(var.node_pools[count.index], "accelerator_count", 0) + }] : [] : { + type = guest_accelerator["type"] + count = guest_accelerator["count"] + } + ] + } + + lifecycle { + ignore_changes = [initial_node_count] + create_before_destroy = true + } + + timeouts { + create = "30m" + update = "30m" + delete = "30m" + } +} + +resource "null_resource" "wait_for_cluster" { + + provisioner "local-exec" { + command = "${path.module}/scripts/wait-for-cluster.sh ${var.project_id} ${var.name}" + } + + provisioner "local-exec" { + when = destroy + command = "${path.module}/scripts/wait-for-cluster.sh ${var.project_id} ${var.name}" + } + + depends_on = [ + google_container_cluster.primary, + google_container_node_pool.pools, + ] +} diff --git a/modules/private-cluster-update-variant/dns.tf b/modules/private-cluster-update-variant/dns.tf new file mode 100644 index 0000000000..b240a23e65 --- /dev/null +++ b/modules/private-cluster-update-variant/dns.tf @@ -0,0 +1,120 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file was automatically generated from a template in ./autogen + +/****************************************** + Delete default kube-dns configmap + *****************************************/ +resource "null_resource" "delete_default_kube_dns_configmap" { + count = local.custom_kube_dns_config || local.upstream_nameservers_config ? 1 : 0 + + provisioner "local-exec" { + command = "${path.module}/scripts/kubectl_wrapper.sh https://${local.cluster_endpoint} ${data.google_client_config.default.access_token} ${local.cluster_ca_certificate} ${path.module}/scripts/delete-default-resource.sh kube-system configmap kube-dns" + } + + depends_on = [ + data.google_client_config.default, + google_container_cluster.primary, + google_container_node_pool.pools, + ] +} + +/****************************************** + Create kube-dns confimap + *****************************************/ +resource "kubernetes_config_map" "kube-dns" { + count = local.custom_kube_dns_config && ! local.upstream_nameservers_config ? 1 : 0 + + metadata { + name = "kube-dns" + namespace = "kube-system" + + labels = { + maintained_by = "terraform" + } + } + + data = { + stubDomains = < 0 + upstream_nameservers_config = length(var.upstream_nameservers) > 0 + network_project_id = var.network_project_id != "" ? var.network_project_id : var.project_id + zone_count = length(var.zones) + cluster_type = var.regional ? "regional" : "zonal" + // auto upgrade by defaults only for regional cluster as long it has multiple masters versus zonal clusters have only have a single master so upgrades are more dangerous. + default_auto_upgrade = var.regional ? true : false + + cluster_network_policy = var.network_policy ? [{ + enabled = true + provider = var.network_policy_provider + }] : [{ + enabled = false + provider = null + }] + + + cluster_output_name = google_container_cluster.primary.name + cluster_output_location = google_container_cluster.primary.location + cluster_output_region = google_container_cluster.primary.region + cluster_output_regional_zones = google_container_cluster.primary.node_locations + cluster_output_zonal_zones = local.zone_count > 1 ? slice(var.zones, 1, local.zone_count) : [] + cluster_output_zones = local.cluster_output_regional_zones + + cluster_output_endpoint = var.deploy_using_private_endpoint ? google_container_cluster.primary.private_cluster_config.0.private_endpoint : google_container_cluster.primary.endpoint + + cluster_output_master_auth = concat(google_container_cluster.primary.*.master_auth, []) + cluster_output_master_version = google_container_cluster.primary.master_version + cluster_output_min_master_version = google_container_cluster.primary.min_master_version + cluster_output_logging_service = google_container_cluster.primary.logging_service + cluster_output_monitoring_service = google_container_cluster.primary.monitoring_service + cluster_output_network_policy_enabled = google_container_cluster.primary.addons_config.0.network_policy_config.0.disabled + cluster_output_http_load_balancing_enabled = google_container_cluster.primary.addons_config.0.http_load_balancing.0.disabled + cluster_output_horizontal_pod_autoscaling_enabled = google_container_cluster.primary.addons_config.0.horizontal_pod_autoscaling.0.disabled + cluster_output_kubernetes_dashboard_enabled = google_container_cluster.primary.addons_config.0.kubernetes_dashboard.0.disabled + + + cluster_output_node_pools_names = concat(google_container_node_pool.pools.*.name, [""]) + cluster_output_node_pools_versions = concat(google_container_node_pool.pools.*.version, [""]) + + cluster_master_auth_list_layer1 = local.cluster_output_master_auth + cluster_master_auth_list_layer2 = local.cluster_master_auth_list_layer1[0] + cluster_master_auth_map = local.cluster_master_auth_list_layer2[0] + # cluster locals + cluster_name = local.cluster_output_name + cluster_location = local.cluster_output_location + cluster_region = local.cluster_output_region + cluster_zones = sort(local.cluster_output_zones) + cluster_endpoint = local.cluster_output_endpoint + cluster_ca_certificate = local.cluster_master_auth_map["cluster_ca_certificate"] + cluster_master_version = local.cluster_output_master_version + cluster_min_master_version = local.cluster_output_min_master_version + cluster_logging_service = local.cluster_output_logging_service + cluster_monitoring_service = local.cluster_output_monitoring_service + cluster_node_pools_names = local.cluster_output_node_pools_names + cluster_node_pools_versions = local.cluster_output_node_pools_versions + cluster_network_policy_enabled = ! local.cluster_output_network_policy_enabled + cluster_http_load_balancing_enabled = ! local.cluster_output_http_load_balancing_enabled + cluster_horizontal_pod_autoscaling_enabled = ! local.cluster_output_horizontal_pod_autoscaling_enabled + cluster_kubernetes_dashboard_enabled = ! local.cluster_output_kubernetes_dashboard_enabled +} + +/****************************************** + Get available container engine versions + *****************************************/ +data "google_container_engine_versions" "region" { + location = local.location + project = var.project_id +} + +data "google_container_engine_versions" "zone" { + // Work around to prevent a lack of zone declaration from causing regional cluster creation from erroring out due to error + // + // data.google_container_engine_versions.zone: Cannot determine zone: set in this resource, or set provider-level zone. + // + location = local.zone_count == 0 ? data.google_compute_zones.available.names[0] : var.zones[0] + project = var.project_id +} diff --git a/modules/private-cluster-update-variant/masq.tf b/modules/private-cluster-update-variant/masq.tf new file mode 100644 index 0000000000..b6e411fc42 --- /dev/null +++ b/modules/private-cluster-update-variant/masq.tf @@ -0,0 +1,48 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file was automatically generated from a template in ./autogen + +/****************************************** + Create ip-masq-agent confimap + *****************************************/ +resource "kubernetes_config_map" "ip-masq-agent" { + count = var.configure_ip_masq ? 1 : 0 + + metadata { + name = "ip-masq-agent" + namespace = "kube-system" + + labels = { + maintained_by = "terraform" + } + } + + data = { + config = <&2 echo "3 arguments expected. Exiting." + exit 1 +fi + +RESOURCE_NAMESPACE=$1 +RESOURCE_TYPE=$2 +RESOURCE_NAME=$3 + +RESOURCE_LIST=$(kubectl -n "${RESOURCE_NAMESPACE}" get "${RESOURCE_TYPE}" || exit 1) + +# Delete requested resource +if [[ $RESOURCE_LIST = *"${RESOURCE_NAME}"* ]]; then + RESOURCE_MAINTAINED_LABEL=$(kubectl -n "${RESOURCE_NAMESPACE}" get "${RESOURCE_TYPE}" -o json "${RESOURCE_NAME}" | jq -r '.metadata.labels."maintained_by"') + if [[ $RESOURCE_MAINTAINED_LABEL = "terraform" ]]; then + echo "Terraform maintained ${RESOURCE_NAME} ${RESOURCE_TYPE} appears to have already been created in ${RESOURCE_NAMESPACE} namespace" + else + echo "Deleting default ${RESOURCE_NAME} ${RESOURCE_TYPE} found in ${RESOURCE_NAMESPACE} namespace" + kubectl -n "${RESOURCE_NAMESPACE}" delete "${RESOURCE_TYPE}" "${RESOURCE_NAME}" + fi +else + echo "No default ${RESOURCE_NAME} ${RESOURCE_TYPE} found in ${RESOURCE_NAMESPACE} namespace" +fi diff --git a/modules/private-cluster-update-variant/scripts/kubectl_wrapper.sh b/modules/private-cluster-update-variant/scripts/kubectl_wrapper.sh new file mode 100755 index 0000000000..e92300bcb5 --- /dev/null +++ b/modules/private-cluster-update-variant/scripts/kubectl_wrapper.sh @@ -0,0 +1,53 @@ +#!/bin/bash +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +set -e + +if [ "$#" -lt 3 ]; then + >&2 echo "Not all expected arguments set." + exit 1 +fi + +HOST=$1 +TOKEN=$2 +CA_CERTIFICATE=$3 + +shift 3 + +RANDOM_ID="${RANDOM}_${RANDOM}" +export TMPDIR="/tmp/kubectl_wrapper_${RANDOM_ID}" + +function cleanup { + rm -rf "${TMPDIR}" +} +trap cleanup EXIT + +mkdir "${TMPDIR}" + +export KUBECONFIG="${TMPDIR}/config" + +# shellcheck disable=SC1117 +base64 --help | grep "\--decode" && B64_ARG="--decode" || B64_ARG="-d" +echo "${CA_CERTIFICATE}" | base64 ${B64_ARG} > "${TMPDIR}/ca_certificate" + +kubectl config set-cluster kubectl-wrapper --server="${HOST}" --certificate-authority="${TMPDIR}/ca_certificate" --embed-certs=true 1>/dev/null +rm -f "${TMPDIR}/ca_certificate" +kubectl config set-context kubectl-wrapper --cluster=kubectl-wrapper --user=kubectl-wrapper --namespace=default 1>/dev/null +kubectl config set-credentials kubectl-wrapper --token="${TOKEN}" 1>/dev/null +kubectl config use-context kubectl-wrapper 1>/dev/null +kubectl version 1>/dev/null + +"$@" diff --git a/modules/private-cluster-update-variant/scripts/wait-for-cluster.sh b/modules/private-cluster-update-variant/scripts/wait-for-cluster.sh new file mode 100755 index 0000000000..6ff3253d58 --- /dev/null +++ b/modules/private-cluster-update-variant/scripts/wait-for-cluster.sh @@ -0,0 +1,33 @@ +#!/bin/bash +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e + +PROJECT=$1 +CLUSTER_NAME=$2 +gcloud_command="gcloud container clusters list --project=$PROJECT --format=json" +jq_query=".[] | select(.name==\"$CLUSTER_NAME\") | .status" + +echo "Waiting for cluster $2 in project $1 to reconcile..." + +current_status=$($gcloud_command | jq -r "$jq_query") + +while [[ "${current_status}" == "RECONCILING" ]]; do + printf "." + sleep 5 + current_status=$($gcloud_command | jq -r "$jq_query") +done + +echo "Cluster is ready!" diff --git a/modules/private-cluster-update-variant/variables.tf b/modules/private-cluster-update-variant/variables.tf new file mode 100644 index 0000000000..8008e08975 --- /dev/null +++ b/modules/private-cluster-update-variant/variables.tf @@ -0,0 +1,318 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file was automatically generated from a template in ./autogen + +variable "project_id" { + type = string + description = "The project ID to host the cluster in (required)" +} + +variable "name" { + type = string + description = "The name of the cluster (required)" +} + +variable "description" { + type = string + description = "The description of the cluster" + default = "" +} + +variable "regional" { + type = bool + description = "Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!)" + default = true +} + +variable "region" { + type = string + description = "The region to host the cluster in (required)" +} + +variable "zones" { + type = list(string) + description = "The zones to host the cluster in (optional if regional cluster / required if zonal)" + default = [] +} + +variable "network" { + type = string + description = "The VPC network to host the cluster in (required)" +} + +variable "network_project_id" { + type = string + description = "The project ID of the shared VPC's host (for shared vpc support)" + default = "" +} + +variable "subnetwork" { + type = string + description = "The subnetwork to host the cluster in (required)" +} + +variable "kubernetes_version" { + type = string + description = "The Kubernetes version of the masters. If set to 'latest' it will pull latest available version in the selected region." + default = "latest" +} + +variable "node_version" { + type = string + description = "The Kubernetes version of the node pools. Defaults kubernetes_version (master) variable and can be overridden for individual node pools by setting the `version` key on them. Must be empyty or set the same as master at cluster creation." + default = "" +} + +variable "master_authorized_networks_config" { + type = list(object({ cidr_blocks = list(object({ cidr_block = string, display_name = string })) })) + description = "The desired configuration options for master authorized networks. The object format is {cidr_blocks = list(object({cidr_block = string, display_name = string}))}. Omit the nested cidr_blocks attribute to disallow external access (except the cluster node IPs, which GKE automatically whitelists)." + default = [] +} + +variable "horizontal_pod_autoscaling" { + type = bool + description = "Enable horizontal pod autoscaling addon" + default = true +} + +variable "http_load_balancing" { + type = bool + description = "Enable httpload balancer addon" + default = true +} + +variable "kubernetes_dashboard" { + type = bool + description = "Enable kubernetes dashboard addon" + default = false +} + +variable "network_policy" { + type = bool + description = "Enable network policy addon" + default = false +} + +variable "network_policy_provider" { + type = string + description = "The network policy provider." + default = "CALICO" +} + +variable "maintenance_start_time" { + type = string + description = "Time window specified for daily maintenance operations in RFC3339 format" + default = "05:00" +} + +variable "ip_range_pods" { + type = string + description = "The _name_ of the secondary subnet ip range to use for pods" +} + +variable "ip_range_services" { + type = string + description = "The _name_ of the secondary subnet range to use for services" +} + +variable "initial_node_count" { + type = number + description = "The number of nodes to create in this cluster's default node pool." + default = 0 +} + +variable "remove_default_node_pool" { + type = bool + description = "Remove default node pool while setting up the cluster" + default = false +} + +variable "disable_legacy_metadata_endpoints" { + type = bool + description = "Disable the /0.1/ and /v1beta1/ metadata server endpoints on the node. Changing this value will cause all node pools to be recreated." + default = true +} + +variable "node_pools" { + type = list(map(string)) + description = "List of maps containing node pools" + + default = [ + { + name = "default-node-pool" + }, + ] +} + +variable "node_pools_labels" { + type = map(map(string)) + description = "Map of maps containing node labels by node-pool name" + + default = { + all = {} + default-node-pool = {} + } +} + +variable "node_pools_metadata" { + type = map(map(string)) + description = "Map of maps containing node metadata by node-pool name" + + default = { + all = {} + default-node-pool = {} + } +} + +variable "node_pools_tags" { + type = map(list(string)) + description = "Map of lists containing node network tags by node-pool name" + + default = { + all = [] + default-node-pool = [] + } +} + +variable "node_pools_oauth_scopes" { + type = map(list(string)) + description = "Map of lists containing node oauth scopes by node-pool name" + + default = { + all = ["https://www.googleapis.com/auth/cloud-platform"] + default-node-pool = [] + } +} + +variable "stub_domains" { + type = map(list(string)) + description = "Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server" + default = {} +} + +variable "upstream_nameservers" { + type = "list" + description = "If specified, the values replace the nameservers taken by default from the node’s /etc/resolv.conf" + default = [] +} + +variable "non_masquerade_cidrs" { + type = list(string) + description = "List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading." + default = ["10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16"] +} + +variable "ip_masq_resync_interval" { + type = string + description = "The interval at which the agent attempts to sync its ConfigMap file from the disk." + default = "60s" +} + +variable "ip_masq_link_local" { + type = bool + description = "Whether to masquerade traffic to the link-local prefix (169.254.0.0/16)." + default = false +} + +variable "configure_ip_masq" { + description = "Enables the installation of ip masquerading, which is usually no longer required when using aliasied IP addresses. IP masquerading uses a kubectl call, so when you have a private cluster, you will need access to the API server." + default = false +} + +variable "logging_service" { + type = string + description = "The logging service that the cluster should write logs to. Available options include logging.googleapis.com, logging.googleapis.com/kubernetes (beta), and none" + default = "logging.googleapis.com" +} + +variable "monitoring_service" { + type = string + description = "The monitoring service that the cluster should write metrics to. Automatically send metrics from pods in the cluster to the Google Cloud Monitoring API. VM metrics will be collected by Google Compute Engine regardless of this setting Available options include monitoring.googleapis.com, monitoring.googleapis.com/kubernetes (beta) and none" + default = "monitoring.googleapis.com" +} + +variable "create_service_account" { + type = bool + description = "Defines if service account specified to run nodes should be created." + default = true +} + +variable "grant_registry_access" { + type = bool + description = "Grants created cluster-specific service account storage.objectViewer role." + default = false +} + +variable "service_account" { + type = string + description = "The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created." + default = "" +} + +variable "basic_auth_username" { + type = string + description = "The username to be used with Basic Authentication. An empty value will disable Basic Authentication, which is the recommended configuration." + default = "" +} + +variable "basic_auth_password" { + type = string + description = "The password to be used with Basic Authentication." + default = "" +} + +variable "issue_client_certificate" { + type = bool + description = "Issues a client certificate to authenticate to the cluster endpoint. To maximize the security of your cluster, leave this option disabled. Client certificates don't automatically rotate and aren't easily revocable. WARNING: changing this after cluster creation is destructive!" + default = false +} + +variable "cluster_ipv4_cidr" { + default = "" + description = "The IP address range of the kubernetes pods in this cluster. Default is an automatically assigned CIDR." +} + +variable "cluster_resource_labels" { + type = map(string) + description = "The GCE resource labels (a map of key/value pairs) to be applied to the cluster" + default = {} +} + + +variable "deploy_using_private_endpoint" { + type = bool + description = "(Beta) A toggle for Terraform and kubectl to connect to the master's internal IP address during deployment." + default = false +} + +variable "enable_private_endpoint" { + type = bool + description = "(Beta) Whether the master's internal IP address is used as the cluster endpoint" + default = false +} + +variable "enable_private_nodes" { + type = bool + description = "(Beta) Whether nodes have internal IP addresses only" + default = false +} + +variable "master_ipv4_cidr_block" { + type = string + description = "(Beta) The IP range in CIDR notation to use for the hosted master network" + default = "10.0.0.0/28" +} diff --git a/modules/private-cluster-update-variant/versions.tf b/modules/private-cluster-update-variant/versions.tf new file mode 100644 index 0000000000..832ec1df39 --- /dev/null +++ b/modules/private-cluster-update-variant/versions.tf @@ -0,0 +1,19 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +terraform { + required_version = ">= 0.12" +} diff --git a/modules/private-cluster/README.md b/modules/private-cluster/README.md index f7f8fef179..a9e780d24b 100644 --- a/modules/private-cluster/README.md +++ b/modules/private-cluster/README.md @@ -174,7 +174,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | node\_version | The Kubernetes version of the node pools. Defaults kubernetes_version (master) variable and can be overridden for individual node pools by setting the `version` key on them. Must be empyty or set the same as master at cluster creation. | string | `""` | no | | non\_masquerade\_cidrs | List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading. | list(string) | `` | no | | project\_id | The project ID to host the cluster in (required) | string | n/a | yes | -| region | The region to host the cluster in (required) | string | n/a | yes | +| region | The region to host the cluster in (optional if zonal cluster / required if regional) | string | `"null"` | no | | regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | | registry\_project\_id | Project holding the Google Container Registry. If empty, we use the cluster project. If grant_registry_access is true, storage.objectViewer role is assigned on this project. | string | `""` | no | | remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | diff --git a/modules/private-cluster/main.tf b/modules/private-cluster/main.tf index bfe746401c..2bd1c40d14 100644 --- a/modules/private-cluster/main.tf +++ b/modules/private-cluster/main.tf @@ -23,7 +23,7 @@ data "google_compute_zones" "available" { provider = google project = var.project_id - region = var.region + region = local.region } resource "random_shuffle" "available_zones" { @@ -34,6 +34,7 @@ resource "random_shuffle" "available_zones" { locals { // location location = var.regional ? var.region : var.zones[0] + region = var.region == null ? join("-", slice(split("-", var.zones[0]), 0, 2)) : var.region // for regional cluster - use var.zones if provided, use available otherwise, for zonal cluster use var.zones with first element extracted node_locations = var.regional ? coalescelist(compact(var.zones), sort(random_shuffle.available_zones.result)) : slice(var.zones, 1, length(var.zones)) // kuberentes version diff --git a/modules/private-cluster/networks.tf b/modules/private-cluster/networks.tf index a382073dc0..aae034eee5 100644 --- a/modules/private-cluster/networks.tf +++ b/modules/private-cluster/networks.tf @@ -27,6 +27,6 @@ data "google_compute_subnetwork" "gke_subnetwork" { provider = google name = var.subnetwork - region = var.region + region = local.region project = local.network_project_id } diff --git a/modules/private-cluster/variables.tf b/modules/private-cluster/variables.tf index 00d7779e83..a425c13d9a 100644 --- a/modules/private-cluster/variables.tf +++ b/modules/private-cluster/variables.tf @@ -40,7 +40,8 @@ variable "regional" { variable "region" { type = string - description = "The region to host the cluster in (required)" + description = "The region to host the cluster in (optional if zonal cluster / required if regional)" + default = null } variable "zones" { diff --git a/networks.tf b/networks.tf index a382073dc0..aae034eee5 100644 --- a/networks.tf +++ b/networks.tf @@ -27,6 +27,6 @@ data "google_compute_subnetwork" "gke_subnetwork" { provider = google name = var.subnetwork - region = var.region + region = local.region project = local.network_project_id } diff --git a/scripts/wait-for-cluster.sh b/scripts/wait-for-cluster.sh index 6ff3253d58..37f0176ec7 100755 --- a/scripts/wait-for-cluster.sh +++ b/scripts/wait-for-cluster.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,6 +15,10 @@ set -e +if [ -n "${GOOGLE_APPLICATION_CREDENTIALS}" ]; then + CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE=${GOOGLE_APPLICATION_CREDENTIALS} +fi + PROJECT=$1 CLUSTER_NAME=$2 gcloud_command="gcloud container clusters list --project=$PROJECT --format=json" diff --git a/test/fixtures/node_pool_update_variant/example.tf b/test/fixtures/node_pool_update_variant/example.tf new file mode 100644 index 0000000000..c3a21df3d5 --- /dev/null +++ b/test/fixtures/node_pool_update_variant/example.tf @@ -0,0 +1,29 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +module "example" { + source = "../../../examples/node_pool_update_variant" + + project_id = var.project_id + cluster_name_suffix = "-${random_string.suffix.result}" + region = var.region + zones = slice(var.zones, 0, 1) + network = google_compute_network.main.name + subnetwork = google_compute_subnetwork.main.name + ip_range_pods = google_compute_subnetwork.main.secondary_ip_range[0].range_name + ip_range_services = google_compute_subnetwork.main.secondary_ip_range[1].range_name + compute_engine_service_account = var.compute_engine_service_account +} diff --git a/test/fixtures/node_pool_update_variant/network.tf b/test/fixtures/node_pool_update_variant/network.tf new file mode 100644 index 0000000000..e1292eae3b --- /dev/null +++ b/test/fixtures/node_pool_update_variant/network.tf @@ -0,0 +1,48 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +resource "random_string" "suffix" { + length = 4 + special = false + upper = false +} + +provider "google" { + project = var.project_id +} + +resource "google_compute_network" "main" { + name = "cft-gke-test-${random_string.suffix.result}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "main" { + name = "cft-gke-test-${random_string.suffix.result}" + ip_cidr_range = "10.0.0.0/17" + region = var.region + network = google_compute_network.main.self_link + + secondary_ip_range { + range_name = "cft-gke-test-pods-${random_string.suffix.result}" + ip_cidr_range = "192.168.0.0/18" + } + + secondary_ip_range { + range_name = "cft-gke-test-services-${random_string.suffix.result}" + ip_cidr_range = "192.168.64.0/18" + } +} + diff --git a/test/fixtures/node_pool_update_variant/outputs.tf b/test/fixtures/node_pool_update_variant/outputs.tf new file mode 120000 index 0000000000..726bdc722f --- /dev/null +++ b/test/fixtures/node_pool_update_variant/outputs.tf @@ -0,0 +1 @@ +../shared/outputs.tf \ No newline at end of file diff --git a/test/fixtures/node_pool_update_variant/variables.tf b/test/fixtures/node_pool_update_variant/variables.tf new file mode 120000 index 0000000000..c113c00a3d --- /dev/null +++ b/test/fixtures/node_pool_update_variant/variables.tf @@ -0,0 +1 @@ +../shared/variables.tf \ No newline at end of file diff --git a/variables.tf b/variables.tf index da9c744646..b498bf5b6e 100644 --- a/variables.tf +++ b/variables.tf @@ -40,7 +40,8 @@ variable "regional" { variable "region" { type = string - description = "The region to host the cluster in (required)" + description = "The region to host the cluster in (optional if zonal cluster / required if regional)" + default = null } variable "zones" {