diff --git a/.dockerignore b/.dockerignore index 5e7ebff64e..ab75c635fe 100644 --- a/.dockerignore +++ b/.dockerignore @@ -7,4 +7,5 @@ test/fixtures/*/.terraform test/fixtures/*/terraform.tfstate.d examples/.kitchen examples/*/.terraform -examples/*/terraform.tfstate.d \ No newline at end of file +examples/*/terraform.tfstate.d + diff --git a/.kitchen.yml b/.kitchen.yml index 425e3d84b1..2e62d513b1 100644 --- a/.kitchen.yml +++ b/.kitchen.yml @@ -29,13 +29,15 @@ platforms: - name: local suites: - - name: "deploy_service" - driver: - root_module_directory: test/fixtures/deploy_service - verifier: - systems: - - name: deploy_service - backend: local +# Disabled due to issue #274 +# (https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/issues/274) +# - name: "deploy_service" +# driver: +# root_module_directory: test/fixtures/deploy_service +# verifier: +# systems: +# - name: deploy_service +# backend: local - name: "disable_client_cert" driver: root_module_directory: test/fixtures/disable_client_cert @@ -43,13 +45,15 @@ suites: systems: - name: disable_client_cert backend: local - - name: "node_pool" - driver: - root_module_directory: test/fixtures/node_pool - verifier: - systems: - - name: node_pool - backend: local +# Disabled due to issue #274 +# (https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/issues/274) +# - name: "node_pool" +# driver: +# root_module_directory: test/fixtures/node_pool +# verifier: +# systems: +# - name: node_pool +# backend: local - name: "shared_vpc" driver: root_module_directory: test/fixtures/shared_vpc @@ -64,6 +68,23 @@ suites: systems: - name: simple_regional backend: local + - name: "simple_regional_with_networking" + driver: + root_module_directory: test/fixtures/simple_regional_with_networking + verifier: + systems: + - name: simple_regional_with_networking + backend: local + controls: + - gcloud + - name: subnet + backend: local + controls: + - subnet + - name: network + backend: gcp + controls: + - network - name: "simple_regional_private" driver: root_module_directory: test/fixtures/simple_regional_private @@ -98,12 +119,14 @@ suites: systems: - name: stub_domains backend: local - - name: stub_domains_private - driver: - root_module_directory: test/fixtures/stub_domains_private - systems: - - name: stub_domains_private - backend: local +# Disabled due to issue #264 +# (https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/issues/264) +# - name: stub_domains_private +# driver: +# root_module_directory: test/fixtures/stub_domains_private +# systems: +# - name: stub_domains_private +# backend: local - name: "upstream_nameservers" driver: root_module_directory: test/fixtures/upstream_nameservers @@ -138,3 +161,10 @@ suites: backend: gcp controls: - gcp + - name: "sandbox_enabled" + driver: + root_module_directory: test/fixtures/sandbox_enabled + verifier: + systems: + - name: sandbox_enabled + backend: local diff --git a/.ruby-version b/.ruby-version deleted file mode 100644 index aedc15bb0c..0000000000 --- a/.ruby-version +++ /dev/null @@ -1 +0,0 @@ -2.5.3 diff --git a/CHANGELOG.md b/CHANGELOG.md index ffaf0f3b55..cf6ef22df2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,12 +8,40 @@ Extending the adopted spec, each change should have a link to its corresponding ## [Unreleased] -### Changed +## [v5.1.1] - 2019-10-25 + +### Fixed -* All Beta functionality removed from non-beta clusters, some properties like node_pool taints available only in beta cluster now [#228] +* Fixed bug with setting up sandboxing on nodes. [#286] + +## [v5.1.0] - 2019-10-24 ### Added +* Added ability to skip local-exec provisioners. [#258] +* Added [private](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/tree/master/modules/private-cluster-update-variant) and [beta private](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/tree/master/modules/beta-private-cluster-update-variant) variants which allow node pools to be created before being destroyed. [#256] +* Add a parameter `registry_project_id` to allow connecting to registries in other projects. [#273] + +### Changed + +* Made `region` variable optional for zonal clusters. [#247] +* Made default metadata, labels, and tags optional. [#282] + +### Fixed + +* Authenticate gcloud in wait-for-cluster.sh using value of `GOOGLE_APPLICATION_CREDENTIALS`. [#284] [#285] + +## [v5.0.0] - 2019-09-25 +v5.0.0 is a backwards-incompatible release. Please see the [upgrading guide](./docs/upgrading_to_v5.0.md). + +The v5.0.0 module requires using the [2.12 version](https://github.com/terraform-providers/terraform-provider-google/blob/master/CHANGELOG.md#2120-august-01-2019) of the Google provider. + +### Changed + +* **Breaking**: Enabled metadata-concealment by default [#248] +* All beta functionality removed from non-beta clusters, moved `node_pool_taints` to beta modules [#228] + +### Added * Added support for resource usage export config [#238] * Added `sandbox_enabled` variable to use GKE Sandbox [#241] * Added `grant_registry_access` variable to grant Container Registry access to created SA [#236] @@ -22,6 +50,10 @@ Extending the adopted spec, each change should have a link to its corresponding * Support for Google Groups based RBAC beta feature [#217] * Support for disabling node pool autoscaling by setting `autoscaling` to `false` within the node pool variable. [#250] +### Fixed + +* Fixed issue with passing a dynamically created Service Account to the module. [#27] + ## [v4.1.0] 2019-07-24 ### Added @@ -39,6 +71,8 @@ Extending the adopted spec, each change should have a link to its corresponding * Supported version of Terraform is 0.12. [#177] ## [v3.0.0] - 2019-07-08 +v3.0.0 is a breaking release. Refer to the +[Upgrading to v3.0 guide][upgrading-to-v3.0] for details. ### Added @@ -79,6 +113,8 @@ Extending the adopted spec, each change should have a link to its corresponding 2.3. [#148] ## [v2.0.0] - 2019-04-12 +v2.0.0 is a breaking release. Refer to the +[Upgrading to v2.0 guide][upgrading-to-v2.0] for details. ### Added @@ -110,6 +146,10 @@ Extending the adopted spec, each change should have a link to its corresponding * Fix empty zone list. [#132] ## [v1.0.0] - 2019-03-25 +Version 1.0.0 of this module introduces a breaking change: adding the `disable-legacy-endpoints` metadata field to all node pools. This metadata is required by GKE and [determines whether the `/0.1/` and `/v1beta1/` paths are available in the nodes' metadata server](https://cloud.google.com/kubernetes-engine/docs/how-to/protecting-cluster-metadata#disable-legacy-apis). If your applications do not require access to the node's metadata server, you can leave the default value of `true` provided by the module. If your applications require access to the metadata server, be sure to read the linked documentation to see if you need to set the value for this field to `false` to allow your applications access to the above metadata server paths. + +In either case, upgrading to module version `v1.0.0` will trigger a recreation of all node pools in the cluster. + ### Added * Allow creation of service accounts. [#80] * Add support for private clusters via submodule. [#69] @@ -164,7 +204,10 @@ Extending the adopted spec, each change should have a link to its corresponding * Initial release of module. -[Unreleased]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v4.1.0...HEAD +[Unreleased]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v5.1.1...HEAD +[v5.1.1]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v5.1.0...v5.1.1 +[v5.1.0]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v5.0.0...v5.1.0 +[v5.0.0]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v4.1.0...v5.0.0 [v4.1.0]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v4.0.0...v4.1.0 [v4.0.0]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v3.0.0...v4.0.0 [v3.0.0]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v2.1.0...v3.0.0 @@ -178,6 +221,15 @@ Extending the adopted spec, each change should have a link to its corresponding [v0.3.0]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v0.2.0...v0.3.0 [v0.2.0]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v0.1.0...v0.2.0 +[#286]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/286 +[#285]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/285 +[#284]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/284 +[#282]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/282 +[#273]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/273 +[#258]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/issues/258 +[#256]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/256 +[#248]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/248 +[#247]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/247 [#228]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/228 [#238]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/238 [#241]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/241 @@ -185,6 +237,7 @@ Extending the adopted spec, each change should have a link to its corresponding [#236]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/236 [#217]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/217 [#234]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/234 +[#27]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/issues/27 [#216]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/216 [#214]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/214 [#210]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/210 @@ -239,3 +292,9 @@ Extending the adopted spec, each change should have a link to its corresponding [#15]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/issues/15 [#10]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/10 [#9]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/9 + +[upgrading-to-v2.0]: docs/upgrading_to_v2.0.md +[upgrading-to-v3.0]: docs/upgrading_to_v3.0.md +[terraform-provider-google]: https://github.com/terraform-providers/terraform-provider-google +[3.0.0]: https://registry.terraform.io/modules/terraform-google-modules/kubernetes-engine/google/3.0.0 +[terraform-0.12-upgrade]: https://www.terraform.io/upgrade-guides/0-12.html diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000000..cd4943578a --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,122 @@ +# Contributing + +This document provides guidelines for contributing to the module. + +## Dependencies + +The following dependencies must be installed on the development system: + +- [Docker Engine][docker-engine] +- [Google Cloud SDK][google-cloud-sdk] +- [make] + +## Generating Documentation for Inputs and Outputs + +The Inputs and Outputs tables in the READMEs of the root module, +submodules, and example modules are automatically generated based on +the `variables` and `outputs` of the respective modules. These tables +must be refreshed if the module interfaces are changed. + +## Templating + +To more cleanly handle cases where desired functionality would require complex duplication of Terraform resources (i.e. [PR 51](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/51)), this repository is largely generated from the [`autogen`](/autogen) directory. + +The root module is generated by running `make generate`. Changes to this repository should be made in the [`autogen`](/autogen) directory where appropriate. + +Note: The correct sequence to update the repo using autogen functionality is to run +`make docker_generate && make docker_generate_docs`. This will create the various Terraform files, and then +generate the Terraform documentation using `terraform-docs`. + +### Autogeneration of documentation from .tf files +To generate new Inputs and Outputs tables run +``` +make docker_generate_docs +``` + +## Integration Testing + +Integration tests are used to verify the behaviour of the root module, +submodules, and example modules. Additions, changes, and fixes should +be accompanied with tests. + +The integration tests are run using [Kitchen][kitchen], +[Kitchen-Terraform][kitchen-terraform], and [InSpec][inspec]. These +tools are packaged within a Docker image for convenience. + +The general strategy for these tests is to verify the behaviour of the +[example modules](./examples/), thus ensuring that the root module, +submodules, and example modules are all functionally correct. + +Six test-kitchen instances are defined: + +- `deploy-service` +- `node-pool` +- `shared-vpc` +- `simple-regional` +- `simple-zonal` +- `stub-domains` + +The test-kitchen instances in `test/fixtures/` wrap identically-named examples in the `examples/` directory.` + +### Test Environment +The easiest way to test the module is in an isolated test project. The setup for such a project is defined in [test/setup](./test/setup/) directory. + +To use this setup, you need a service account with Project Creator access on a folder. Export the Service Account credentials to your environment like so: + +``` +export SERVICE_ACCOUNT_JSON=$(< credentials.json) +``` + +You will also need to set a few environment variables: +``` +export TF_VAR_org_id="your_org_id" +export TF_VAR_folder_id="your_folder_id" +export TF_VAR_billing_account="your_billing_account_id" +``` + +With these settings in place, you can prepare a test project using Docker: +``` +make docker_test_prepare +``` + +### Noninteractive Execution + +Run `make docker_test_integration` to test all of the example modules +noninteractively, using the prepared test project. + +### Interactive Execution + +1. Run `make docker_run` to start the testing Docker container in + interactive mode. + +1. Run `kitchen_do create ` to initialize the working + directory for an example module. + +1. Run `kitchen_do converge ` to apply the example module. + +1. Run `kitchen_do verify ` to test the example module. + +1. Run `kitchen_do destroy ` to destroy the example module + state. + +## Linting and Formatting + +Many of the files in the repository can be linted or formatted to +maintain a standard of quality. + +### Execution + +Run `make docker_test_lint`. + +[docker-engine]: https://www.docker.com/products/docker-engine +[flake8]: http://flake8.pycqa.org/en/latest/ +[gofmt]: https://golang.org/cmd/gofmt/ +[google-cloud-sdk]: https://cloud.google.com/sdk/install +[hadolint]: https://github.com/hadolint/hadolint +[inspec]: https://inspec.io/ +[kitchen-terraform]: https://github.com/newcontext-oss/kitchen-terraform +[kitchen]: https://kitchen.ci/ +[make]: https://en.wikipedia.org/wiki/Make_(software) +[shellcheck]: https://www.shellcheck.net/ +[terraform-docs]: https://github.com/segmentio/terraform-docs +[terraform]: https://terraform.io/ diff --git a/Makefile b/Makefile index 21d7a2764f..736cad34ce 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,172 +12,85 @@ # See the License for the specific language governing permissions and # limitations under the License. +# Please note that this file was generated from [terraform-google-module-template](https://github.com/terraform-google-modules/terraform-google-module-template). +# Please make sure to contribute relevant changes upstream! + # Make will use bash instead of sh SHELL := /usr/bin/env bash -# Docker build config variables -CREDENTIALS_PATH ?= /cft/workdir/credentials.json -DOCKER_ORG := gcr.io/cloud-foundation-cicd -DOCKER_TAG_BASE_KITCHEN_TERRAFORM ?= 2.3.0 -DOCKER_REPO_BASE_KITCHEN_TERRAFORM := ${DOCKER_ORG}/cft/kitchen-terraform:${DOCKER_TAG_BASE_KITCHEN_TERRAFORM} - -# All is the first target in the file so it will get picked up when you just run 'make' on its own -.PHONY: all -all: check generate_docs - -.PHONY: check -check: check_shell check_python check_golang check_terraform check_base_files test_check_headers check_headers check_trailing_whitespace check_generate check_generate_docs - -# The .PHONY directive tells make that this isn't a real target and so -# the presence of a file named 'check_shell' won't cause this target to stop -# working -.PHONY: check_shell -check_shell: - @source test/make.sh && check_shell - -.PHONY: check_python -check_python: - @source test/make.sh && check_python - -.PHONY: check_golang -check_golang: - @source test/make.sh && golang - -.PHONY: check_terraform -check_terraform: - @source test/make.sh && check_terraform - -.PHONY: check_base_files -check_base_files: - @source test/make.sh && basefiles +DOCKER_TAG_VERSION_DEVELOPER_TOOLS := 0.4.6 +DOCKER_IMAGE_DEVELOPER_TOOLS := cft/developer-tools +REGISTRY_URL := gcr.io/cloud-foundation-cicd -.PHONY: check_shebangs -check_shebangs: - @source test/make.sh && check_bash - -.PHONY: check_trailing_whitespace -check_trailing_whitespace: - @source test/make.sh && check_trailing_whitespace - -.PHONY: test_check_headers -test_check_headers: - @echo "Testing the validity of the header check" - @python test/test_verify_boilerplate.py - -.PHONY: check_headers -check_headers: - @echo "Checking file headers" - @python test/verify_boilerplate.py - -.PHONY: check_generate -check_generate: ## Check that `make generate` does not generate a diff - @source test/make.sh && check_generate - -.PHONY: check_generate_docs -check_generate_docs: ## Check that `make generate_docs` does not generate a diff - @source test/make.sh && check_generate_docs - -# Integration tests -.PHONY: test_integration -test_integration: - test/ci_integration.sh - -.PHONY: generate_docs -generate_docs: - @source test/make.sh && generate_docs - -.PHONY: generate -generate: - @source test/make.sh && generate - -.PHONY: dev -dev: generate generate_docs - @echo "Updated files" - -# Versioning -.PHONY: version -version: - @source helpers/version-repo.sh - -# Run docker +# Enter docker container for local development .PHONY: docker_run docker_run: docker run --rm -it \ - -e COMPUTE_ENGINE_SERVICE_ACCOUNT \ - -e PROJECT_ID \ - -e REGION \ - -e ZONES \ -e SERVICE_ACCOUNT_JSON \ - -e CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE=${CREDENTIALS_PATH} \ - -e GOOGLE_APPLICATION_CREDENTIALS=${CREDENTIALS_PATH} \ - -v "$(CURDIR)":/cft/workdir \ - ${DOCKER_REPO_BASE_KITCHEN_TERRAFORM} \ - /bin/bash -c "source test/ci_integration.sh && setup_environment && exec /bin/bash" + -v "$(CURDIR)":/workspace \ + $(REGISTRY_URL)/${DOCKER_IMAGE_DEVELOPER_TOOLS}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ + /bin/bash -.PHONY: docker_create -docker_create: docker_build_kitchen_terraform +# Execute prepare tests within the docker container +.PHONY: docker_test_prepare +docker_test_prepare: docker run --rm -it \ - -e COMPUTE_ENGINE_SERVICE_ACCOUNT \ - -e PROJECT_ID \ - -e REGION \ - -e ZONES \ -e SERVICE_ACCOUNT_JSON \ - -e CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE=${CREDENTIALS_PATH} \ - -e GOOGLE_APPLICATION_CREDENTIALS=${CREDENTIALS_PATH} \ - -v "$(CURDIR)":/cft/workdir \ - ${DOCKER_REPO_BASE_KITCHEN_TERRAFORM} \ - /bin/bash -c "source test/ci_integration.sh && setup_environment && kitchen create" - -.PHONY: docker_converge -docker_converge: + -e TF_VAR_org_id \ + -e TF_VAR_folder_id \ + -e TF_VAR_billing_account \ + -v "$(CURDIR)":/workspace \ + $(REGISTRY_URL)/${DOCKER_IMAGE_DEVELOPER_TOOLS}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ + /usr/local/bin/execute_with_credentials.sh prepare_environment + +# Clean up test environment within the docker container +.PHONY: docker_test_cleanup +docker_test_cleanup: docker run --rm -it \ - -e COMPUTE_ENGINE_SERVICE_ACCOUNT \ - -e PROJECT_ID \ - -e REGION \ - -e ZONES \ -e SERVICE_ACCOUNT_JSON \ - -e CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE=${CREDENTIALS_PATH} \ - -e GOOGLE_APPLICATION_CREDENTIALS=${CREDENTIALS_PATH} \ - -v "$(CURDIR)":/cft/workdir \ - ${DOCKER_REPO_BASE_KITCHEN_TERRAFORM} \ - /bin/bash -c "source test/ci_integration.sh && setup_environment && kitchen converge && kitchen converge" - -.PHONY: docker_verify -docker_verify: + -e TF_VAR_org_id \ + -e TF_VAR_folder_id \ + -e TF_VAR_billing_account \ + -v "$(CURDIR)":/workspace \ + $(REGISTRY_URL)/${DOCKER_IMAGE_DEVELOPER_TOOLS}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ + /usr/local/bin/execute_with_credentials.sh cleanup_environment + +# Execute integration tests within the docker container +.PHONY: docker_test_integration +docker_test_integration: docker run --rm -it \ - -e COMPUTE_ENGINE_SERVICE_ACCOUNT \ - -e PROJECT_ID \ - -e REGION \ - -e ZONES \ -e SERVICE_ACCOUNT_JSON \ - -e CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE=${CREDENTIALS_PATH} \ - -e GOOGLE_APPLICATION_CREDENTIALS=${CREDENTIALS_PATH} \ - -v "$(CURDIR)":/cft/workdir \ - ${DOCKER_REPO_BASE_KITCHEN_TERRAFORM} \ - /bin/bash -c "source test/ci_integration.sh && setup_environment && kitchen verify" + -v "$(CURDIR)":/workspace \ + $(REGISTRY_URL)/${DOCKER_IMAGE_DEVELOPER_TOOLS}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ + /usr/local/bin/test_integration.sh -.PHONY: docker_destroy -docker_destroy: +# Execute lint tests within the docker container +.PHONY: docker_test_lint +docker_test_lint: docker run --rm -it \ - -e COMPUTE_ENGINE_SERVICE_ACCOUNT \ - -e PROJECT_ID \ - -e REGION \ - -e ZONES \ - -e SERVICE_ACCOUNT_JSON \ - -e CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE=${CREDENTIALS_PATH} \ - -e GOOGLE_APPLICATION_CREDENTIALS=${CREDENTIALS_PATH} \ - -v "$(CURDIR)":/cft/workdir \ - ${DOCKER_REPO_BASE_KITCHEN_TERRAFORM} \ - /bin/bash -c "source test/ci_integration.sh && setup_environment && kitchen destroy" + -v "$(CURDIR)":/workspace \ + $(REGISTRY_URL)/${DOCKER_IMAGE_DEVELOPER_TOOLS}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ + /usr/local/bin/test_lint.sh -.PHONY: test_integration_docker -test_integration_docker: +# Generate documentation +.PHONY: docker_generate_docs +docker_generate_docs: docker run --rm -it \ - -e COMPUTE_ENGINE_SERVICE_ACCOUNT \ - -e PROJECT_ID \ - -e REGION \ - -e ZONES \ - -e SERVICE_ACCOUNT_JSON \ - -v "$(CURDIR)":/cft/workdir \ - ${DOCKER_REPO_BASE_KITCHEN_TERRAFORM} \ - /bin/bash -c "test/ci_integration.sh" + -v "$(CURDIR)":/workspace \ + $(REGISTRY_URL)/${DOCKER_IMAGE_DEVELOPER_TOOLS}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ + /bin/bash -c 'source /usr/local/bin/task_helper_functions.sh && generate_docs' + +# Generate files from autogen +.PHONY: docker_generate +docker_generate: + docker run --rm -it \ + -v "$(CURDIR)":/workspace \ + $(REGISTRY_URL)/${DOCKER_IMAGE_DEVELOPER_TOOLS}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \ + /bin/bash -c 'source /usr/local/bin/task_helper_functions.sh && generate' + +# Alias for backwards compatibility +.PHONY: generate_docs +generate_docs: docker_generate_docs + +.PHONY: generate +generate: docker_generate diff --git a/README.md b/README.md index 923d3f7a09..15f6aff13b 100644 --- a/README.md +++ b/README.md @@ -165,10 +165,12 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | node\_version | The Kubernetes version of the node pools. Defaults kubernetes_version (master) variable and can be overridden for individual node pools by setting the `version` key on them. Must be empyty or set the same as master at cluster creation. | string | `""` | no | | non\_masquerade\_cidrs | List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading. | list(string) | `` | no | | project\_id | The project ID to host the cluster in (required) | string | n/a | yes | -| region | The region to host the cluster in (required) | string | n/a | yes | +| region | The region to host the cluster in (optional if zonal cluster / required if regional) | string | `"null"` | no | | regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | +| registry\_project\_id | Project holding the Google Container Registry. If empty, we use the cluster project. If grant_registry_access is true, storage.objectViewer role is assigned on this project. | string | `""` | no | | remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | | service\_account | The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created. | string | `""` | no | +| skip\_provisioners | Flag to skip all local-exec provisioners. It breaks `stub_domains` and `upstream_nameservers` variables functionality. | bool | `"false"` | no | | stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | map(list(string)) | `` | no | | subnetwork | The subnetwork to host the cluster in (required) | string | n/a | yes | | upstream\_nameservers | If specified, the values replace the nameservers taken by default from the node’s /etc/resolv.conf | list | `` | no | @@ -228,6 +230,9 @@ following project roles: - roles/iam.serviceAccountUser - roles/resourcemanager.projectIamAdmin (only required if `service_account` is set to `create`) +Additionally, if `service_account` is set to `create` and `grant_registry_access` is requested, the service account requires the following role on the `registry_project_id` project: +- roles/resourcemanager.projectIamAdmin + ### Enable APIs In order to operate with the Service Account you must activate the following APIs on the project where the Service Account was created: @@ -248,141 +253,6 @@ The project has the following folders and files: - /README.MD: This file. - /modules: Private and beta sub modules. -## Templating - -To more cleanly handle cases where desired functionality would require complex duplication of Terraform resources (i.e. [PR 51](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/51)), this repository is largely generated from the [`autogen`](/autogen) directory. - -The root module is generated by running `make generate`. Changes to this repository should be made in the [`autogen`](/autogen) directory where appropriate. - -Note: The correct sequence to update the repo using autogen functionality is to run -`make generate && make generate_docs`. This will create the various Terraform files, and then -generate the Terraform documentation using `terraform-docs`. - -## Testing - -### Requirements -- [bundler](https://github.com/bundler/bundler) -- [gcloud](https://cloud.google.com/sdk/install) -- [terraform-docs](https://github.com/segmentio/terraform-docs/releases) 0.6.0 - -### Autogeneration of documentation from .tf files -Run -``` -make generate_docs -``` - -### Integration test - -Integration tests are run though [test-kitchen](https://github.com/test-kitchen/test-kitchen), [kitchen-terraform](https://github.com/newcontext-oss/kitchen-terraform), and [InSpec](https://github.com/inspec/inspec). - -Six test-kitchen instances are defined: - -- `deploy-service` -- `node-pool` -- `shared-vpc` -- `simple-regional` -- `simple-zonal` -- `stub-domains` - -The test-kitchen instances in `test/fixtures/` wrap identically-named examples in the `examples/` directory. - -#### Setup - -1. Configure the [test fixtures](#test-configuration) -2. Download a Service Account key with the necessary permissions and put it in the module's root directory with the name `credentials.json`. - - Requires the [permissions to run the module](#configure-a-service-account) - - Requires `roles/compute.networkAdmin` to create the test suite's networks - - Requires `roles/resourcemanager.projectIamAdmin` since service account creation is tested -3. Build the Docker container for testing: - - ``` - make docker_build_kitchen_terraform - ``` -4. Run the testing container in interactive mode: - - ``` - make docker_run - ``` - - The module root directory will be loaded into the Docker container at `/cft/workdir/`. -5. Run kitchen-terraform to test the infrastructure: - - 1. `kitchen create` creates Terraform state and downloads modules, if applicable. - 2. `kitchen converge` creates the underlying resources. Run `kitchen converge ` to create resources for a specific test case. - 3. Run `kitchen converge` again. This is necessary due to an oddity in how `networkPolicyConfig` is handled by the upstream API. (See [#72](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/issues/72) for details). - 4. `kitchen verify` tests the created infrastructure. Run `kitchen verify ` to run a specific test case. - 5. `kitchen destroy` tears down the underlying resources created by `kitchen converge`. Run `kitchen destroy ` to tear down resources for a specific test case. - -Alternatively, you can simply run `make test_integration_docker` to run all the test steps non-interactively. - -If you wish to parallelize running the test suites, it is also possible to offload the work onto Concourse to run each test suite for you using the command `make test_integration_concourse`. The `.concourse` directory will be created and contain all of the logs from the running test suites. - -When running tests locally, you will need to use your own test project environment. You can configure your environment by setting all of the following variables: - -``` -export COMPUTE_ENGINE_SERVICE_ACCOUNT="" -export PROJECT_ID="" -export REGION="" -export ZONES='[""]' -export SERVICE_ACCOUNT_JSON="$(cat "")" -export CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE="" -export GOOGLE_APPLICATION_CREDENTIALS="" -``` - -#### Test configuration - -Each test-kitchen instance is configured with a `variables.tfvars` file in the test fixture directory, e.g. `test/fixtures/node_pool/terraform.tfvars`. -For convenience, since all of the variables are project-specific, these files have been symlinked to `test/fixtures/shared/terraform.tfvars`. -Similarly, each test fixture has a `variables.tf` to define these variables, and an `outputs.tf` to facilitate providing necessary information for `inspec` to locate and query against created resources. - -Each test-kitchen instance creates a GCP Network and Subnetwork fixture to house resources, and may create any other necessary fixture data as needed. - -### Autogeneration of documentation from .tf files -Run -``` -make generate_docs -``` - -### Linting -The makefile in this project will lint or sometimes just format any shell, -Python, golang, Terraform, or Dockerfiles. The linters will only be run if -the makefile finds files with the appropriate file extension. - -All of the linter checks are in the default make target, so you just have to -run - -``` -make -s -``` - -The -s is for 'silent'. Successful output looks like this - -``` -Running shellcheck -Running flake8 -Running go fmt and go vet -Running terraform validate -Running hadolint on Dockerfiles -Checking for required files -Testing the validity of the header check -.. ----------------------------------------------------------------------- -Ran 2 tests in 0.026s - -OK -Checking file headers -The following lines have trailing whitespace -``` - -The linters -are as follows: -* Shell - shellcheck. Can be found in homebrew -* Python - flake8. Can be installed with 'pip install flake8' -* Golang - gofmt. gofmt comes with the standard golang installation. golang -is a compiled language so there is no standard linter. -* Terraform - terraform has a built-in linter in the 'terraform validate' -command. -* Dockerfiles - hadolint. Can be found in homebrew [upgrading-to-v2.0]: docs/upgrading_to_v2.0.md [upgrading-to-v3.0]: docs/upgrading_to_v3.0.md diff --git a/autogen/README.md b/autogen/README.md index 620aa422c5..846d339911 100644 --- a/autogen/README.md +++ b/autogen/README.md @@ -28,7 +28,7 @@ There are multiple examples included in the [examples](./examples/) folder but s ```hcl module "gke" { - source = "terraform-google-modules/kubernetes-engine/google{% if private_cluster %}//modules/private-cluster{% endif %}" + source = "terraform-google-modules/kubernetes-engine/google{{ module_path }}" project_id = "" name = "gke-test-1" region = "us-central1" @@ -173,6 +173,9 @@ following project roles: - roles/iam.serviceAccountUser - roles/resourcemanager.projectIamAdmin (only required if `service_account` is set to `create`) +Additionally, if `service_account` is set to `create` and `grant_registry_access` is requested, the service account requires the following role on the `registry_project_id` project: +- roles/resourcemanager.projectIamAdmin + ### Enable APIs In order to operate with the Service Account you must activate the following APIs on the project where the Service Account was created: @@ -193,141 +196,6 @@ The project has the following folders and files: - /README.MD: This file. - /modules: Private and beta sub modules. -## Templating - -To more cleanly handle cases where desired functionality would require complex duplication of Terraform resources (i.e. [PR 51](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/51)), this repository is largely generated from the [`autogen`](/autogen) directory. - -The root module is generated by running `make generate`. Changes to this repository should be made in the [`autogen`](/autogen) directory where appropriate. - -Note: The correct sequence to update the repo using autogen functionality is to run -`make generate && make generate_docs`. This will create the various Terraform files, and then -generate the Terraform documentation using `terraform-docs`. - -## Testing - -### Requirements -- [bundler](https://github.com/bundler/bundler) -- [gcloud](https://cloud.google.com/sdk/install) -- [terraform-docs](https://github.com/segmentio/terraform-docs/releases) 0.6.0 - -### Autogeneration of documentation from .tf files -Run -``` -make generate_docs -``` - -### Integration test - -Integration tests are run though [test-kitchen](https://github.com/test-kitchen/test-kitchen), [kitchen-terraform](https://github.com/newcontext-oss/kitchen-terraform), and [InSpec](https://github.com/inspec/inspec). - -Six test-kitchen instances are defined: - -- `deploy-service` -- `node-pool` -- `shared-vpc` -- `simple-regional` -- `simple-zonal` -- `stub-domains` - -The test-kitchen instances in `test/fixtures/` wrap identically-named examples in the `examples/` directory. - -#### Setup - -1. Configure the [test fixtures](#test-configuration) -2. Download a Service Account key with the necessary permissions and put it in the module's root directory with the name `credentials.json`. - - Requires the [permissions to run the module](#configure-a-service-account) - - Requires `roles/compute.networkAdmin` to create the test suite's networks - - Requires `roles/resourcemanager.projectIamAdmin` since service account creation is tested -3. Build the Docker container for testing: - - ``` - make docker_build_kitchen_terraform - ``` -4. Run the testing container in interactive mode: - - ``` - make docker_run - ``` - - The module root directory will be loaded into the Docker container at `/cft/workdir/`. -5. Run kitchen-terraform to test the infrastructure: - - 1. `kitchen create` creates Terraform state and downloads modules, if applicable. - 2. `kitchen converge` creates the underlying resources. Run `kitchen converge ` to create resources for a specific test case. - 3. Run `kitchen converge` again. This is necessary due to an oddity in how `networkPolicyConfig` is handled by the upstream API. (See [#72](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/issues/72) for details). - 4. `kitchen verify` tests the created infrastructure. Run `kitchen verify ` to run a specific test case. - 5. `kitchen destroy` tears down the underlying resources created by `kitchen converge`. Run `kitchen destroy ` to tear down resources for a specific test case. - -Alternatively, you can simply run `make test_integration_docker` to run all the test steps non-interactively. - -If you wish to parallelize running the test suites, it is also possible to offload the work onto Concourse to run each test suite for you using the command `make test_integration_concourse`. The `.concourse` directory will be created and contain all of the logs from the running test suites. - -When running tests locally, you will need to use your own test project environment. You can configure your environment by setting all of the following variables: - -``` -export COMPUTE_ENGINE_SERVICE_ACCOUNT="" -export PROJECT_ID="" -export REGION="" -export ZONES='[""]' -export SERVICE_ACCOUNT_JSON="$(cat "")" -export CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE="" -export GOOGLE_APPLICATION_CREDENTIALS="" -``` - -#### Test configuration - -Each test-kitchen instance is configured with a `variables.tfvars` file in the test fixture directory, e.g. `test/fixtures/node_pool/terraform.tfvars`. -For convenience, since all of the variables are project-specific, these files have been symlinked to `test/fixtures/shared/terraform.tfvars`. -Similarly, each test fixture has a `variables.tf` to define these variables, and an `outputs.tf` to facilitate providing necessary information for `inspec` to locate and query against created resources. - -Each test-kitchen instance creates a GCP Network and Subnetwork fixture to house resources, and may create any other necessary fixture data as needed. - -### Autogeneration of documentation from .tf files -Run -``` -make generate_docs -``` - -### Linting -The makefile in this project will lint or sometimes just format any shell, -Python, golang, Terraform, or Dockerfiles. The linters will only be run if -the makefile finds files with the appropriate file extension. - -All of the linter checks are in the default make target, so you just have to -run - -``` -make -s -``` - -The -s is for 'silent'. Successful output looks like this - -``` -Running shellcheck -Running flake8 -Running go fmt and go vet -Running terraform validate -Running hadolint on Dockerfiles -Checking for required files -Testing the validity of the header check -.. ----------------------------------------------------------------------- -Ran 2 tests in 0.026s - -OK -Checking file headers -The following lines have trailing whitespace -``` - -The linters -are as follows: -* Shell - shellcheck. Can be found in homebrew -* Python - flake8. Can be installed with 'pip install flake8' -* Golang - gofmt. gofmt comes with the standard golang installation. golang -is a compiled language so there is no standard linter. -* Terraform - terraform has a built-in linter in the 'terraform validate' -command. -* Dockerfiles - hadolint. Can be found in homebrew {% if private_cluster %} [upgrading-to-v2.0]: ../../docs/upgrading_to_v2.0.md diff --git a/autogen/cluster.tf b/autogen/cluster.tf index 9ae4414a18..23d9a6b5ff 100644 --- a/autogen/cluster.tf +++ b/autogen/cluster.tf @@ -45,6 +45,16 @@ resource "google_container_cluster" "primary" { } } +{% if beta_cluster %} + dynamic "release_channel" { + for_each = local.release_channel + + content { + channel = release_channel.value.channel + } + } +{% endif %} + subnetwork = data.google_compute_subnetwork.gke_subnetwork.self_link min_master_version = local.master_version @@ -143,7 +153,7 @@ resource "google_container_cluster" "primary" { } lifecycle { - ignore_changes = [node_pool] + ignore_changes = [node_pool, initial_node_count] } timeouts { @@ -167,14 +177,6 @@ resource "google_container_cluster" "primary" { node_metadata = workload_metadata_config.value.node_metadata } } - - dynamic "sandbox_config" { - for_each = local.cluster_sandbox_enabled - - content { - sandbox_type = sandbox_config.value - } - } {% endif %} } } @@ -219,6 +221,80 @@ resource "google_container_cluster" "primary" { /****************************************** Create Container Cluster node pools *****************************************/ +{% if update_variant %} +locals { + force_node_pool_recreation_resources = [ + "disk_size_gb", + "disk_type", + "accelerator_count", + "accelerator_type", + "local_ssd_count", + "machine_type", + "preemptible", + "service_account", + ] +} + +# This keepers list is based on the terraform google provider schemaNodeConfig +# resources where "ForceNew" is "true". schemaNodeConfig can be found in node_config.go at +# https://github.com/terraform-providers/terraform-provider-google/blob/master/google/node_config.go#L22 +resource "random_id" "name" { + count = length(var.node_pools) + byte_length = 2 + prefix = format("%s-", lookup(var.node_pools[count.index], "name")) + keepers = merge( + zipmap( + local.force_node_pool_recreation_resources, + [for keeper in local.force_node_pool_recreation_resources : lookup(var.node_pools[count.index], keeper, "")] + ), + { + labels = join(",", + sort( + concat( + keys(var.node_pools_labels["all"]), + values(var.node_pools_labels["all"]), + keys(var.node_pools_labels[var.node_pools[count.index]["name"]]), + values(var.node_pools_labels[var.node_pools[count.index]["name"]]) + ) + ) + ) + }, + { + metadata = join(",", + sort( + concat( + keys(var.node_pools_metadata["all"]), + values(var.node_pools_metadata["all"]), + keys(var.node_pools_metadata[var.node_pools[count.index]["name"]]), + values(var.node_pools_metadata[var.node_pools[count.index]["name"]]) + ) + ) + ) + }, + { + oauth_scopes = join(",", + sort( + concat( + var.node_pools_oauth_scopes["all"], + var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]] + ) + ) + ) + }, + { + tags = join(",", + sort( + concat( + var.node_pools_tags["all"], + var.node_pools_tags[var.node_pools[count.index]["name"]] + ) + ) + ) + } + ) +} + +{% endif %} resource "google_container_node_pool" "pools" { {% if beta_cluster %} provider = google-beta @@ -226,7 +302,11 @@ resource "google_container_node_pool" "pools" { provider = google {% endif %} count = length(var.node_pools) + {% if update_variant %} + name = random_id.name.*.hex[count.index] + {% else %} name = var.node_pools[count.index]["name"] + {% endif %} project = var.project_id location = local.location cluster = google_container_cluster.primary.name @@ -263,22 +343,14 @@ resource "google_container_node_pool" "pools" { image_type = lookup(var.node_pools[count.index], "image_type", "COS") machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2") labels = merge( - { - "cluster_name" = var.name - }, - { - "node_pool" = var.node_pools[count.index]["name"] - }, + lookup(lookup(var.node_pools_labels, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(var.node_pools_labels, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index]["name"] } : {}, var.node_pools_labels["all"], var.node_pools_labels[var.node_pools[count.index]["name"]], ) metadata = merge( - { - "cluster_name" = var.name - }, - { - "node_pool" = var.node_pools[count.index]["name"] - }, + lookup(lookup(var.node_pools_metadata, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(var.node_pools_metadata, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index]["name"] } : {}, var.node_pools_metadata["all"], var.node_pools_metadata[var.node_pools[count.index]["name"]], { @@ -299,8 +371,8 @@ resource "google_container_node_pool" "pools" { } {% endif %} tags = concat( - ["gke-${var.name}"], - ["gke-${var.name}-${var.node_pools[count.index]["name"]}"], + lookup(var.node_pools_tags, "default_values", [true, true])[0] ? ["gke-${var.name}"] : [], + lookup(var.node_pools_tags, "default_values", [true, true])[1] ? ["gke-${var.name}-${var.node_pools[count.index]["name"]}"] : [], var.node_pools_tags["all"], var.node_pools_tags[var.node_pools[count.index]["name"]], ) @@ -337,11 +409,22 @@ resource "google_container_node_pool" "pools" { node_metadata = workload_metadata_config.value.node_metadata } } + + dynamic "sandbox_config" { + for_each = local.cluster_sandbox_enabled + + content { + sandbox_type = sandbox_config.value + } + } {% endif %} } lifecycle { ignore_changes = [initial_node_count] + {% if update_variant %} + create_before_destroy = true + {% endif %} } timeouts { @@ -352,6 +435,7 @@ resource "google_container_node_pool" "pools" { } resource "null_resource" "wait_for_cluster" { + count = var.skip_provisioners ? 0 : 1 provisioner "local-exec" { command = "${path.module}/scripts/wait-for-cluster.sh ${var.project_id} ${var.name}" diff --git a/autogen/dns.tf b/autogen/dns.tf index d9d4a35395..20c3b25ee9 100644 --- a/autogen/dns.tf +++ b/autogen/dns.tf @@ -20,7 +20,7 @@ Delete default kube-dns configmap *****************************************/ resource "null_resource" "delete_default_kube_dns_configmap" { - count = local.custom_kube_dns_config || local.upstream_nameservers_config ? 1 : 0 + count = (local.custom_kube_dns_config || local.upstream_nameservers_config) && ! var.skip_provisioners ? 1 : 0 provisioner "local-exec" { command = "${path.module}/scripts/kubectl_wrapper.sh https://${local.cluster_endpoint} ${data.google_client_config.default.access_token} ${local.cluster_ca_certificate} ${path.module}/scripts/delete-default-resource.sh kube-system configmap kube-dns" diff --git a/autogen/main.tf b/autogen/main.tf index c4b070f9a3..30347b9b15 100644 --- a/autogen/main.tf +++ b/autogen/main.tf @@ -27,7 +27,7 @@ data "google_compute_zones" "available" { {% endif %} project = var.project_id - region = var.region + region = local.region } resource "random_shuffle" "available_zones" { @@ -38,6 +38,7 @@ resource "random_shuffle" "available_zones" { locals { // location location = var.regional ? var.region : var.zones[0] + region = var.region == null ? join("-", slice(split("-", var.zones[0]), 0, 2)) : var.region // for regional cluster - use var.zones if provided, use available otherwise, for zonal cluster use var.zones with first element extracted node_locations = var.regional ? coalescelist(compact(var.zones), sort(random_shuffle.available_zones.result)) : slice(var.zones, 1, length(var.zones)) // kuberentes version @@ -47,6 +48,10 @@ locals { node_version_zonal = var.node_version != "" && ! var.regional ? var.node_version : local.master_version_zonal master_version = var.regional ? local.master_version_regional : local.master_version_zonal node_version = var.regional ? local.node_version_regional : local.node_version_zonal +{% if beta_cluster %} + release_channel = var.release_channel != null ? [{ channel : var.release_channel }] : [] +{% endif %} + custom_kube_dns_config = length(keys(var.stub_domains)) > 0 upstream_nameservers_config = length(var.upstream_nameservers) > 0 @@ -104,10 +109,10 @@ locals { {% if beta_cluster %} # BETA features - cluster_output_istio_enabled = google_container_cluster.primary.addons_config.0.istio_config.0.disabled - cluster_output_pod_security_policy_enabled = google_container_cluster.primary.pod_security_policy_config.0.enabled + cluster_output_istio_disabled = google_container_cluster.primary.addons_config.0.istio_config != null && length(google_container_cluster.primary.addons_config.0.istio_config) == 1 ? google_container_cluster.primary.addons_config.0.istio_config.0.disabled : false + cluster_output_pod_security_policy_enabled = google_container_cluster.primary.pod_security_policy_config != null && length(google_container_cluster.primary.pod_security_policy_config) == 1 ? google_container_cluster.primary.pod_security_policy_config.0.enabled : false cluster_output_intranode_visbility_enabled = google_container_cluster.primary.enable_intranode_visibility - cluster_output_vertical_pod_autoscaling_enabled = google_container_cluster.primary.vertical_pod_autoscaling.0.enabled + cluster_output_vertical_pod_autoscaling_enabled = google_container_cluster.primary.vertical_pod_autoscaling != null && length(google_container_cluster.primary.vertical_pod_autoscaling) == 1 ? google_container_cluster.primary.vertical_pod_autoscaling.0.enabled : false # /BETA features {% endif %} @@ -137,7 +142,7 @@ locals { cluster_kubernetes_dashboard_enabled = ! local.cluster_output_kubernetes_dashboard_enabled {% if beta_cluster %} # BETA features - cluster_istio_enabled = ! local.cluster_output_istio_enabled + cluster_istio_enabled = ! local.cluster_output_istio_disabled cluster_cloudrun_enabled = var.cloudrun cluster_pod_security_policy_enabled = local.cluster_output_pod_security_policy_enabled cluster_intranode_visibility_enabled = local.cluster_output_intranode_visbility_enabled diff --git a/autogen/networks.tf b/autogen/networks.tf index 88df19bc3b..cff6762fa3 100644 --- a/autogen/networks.tf +++ b/autogen/networks.tf @@ -35,6 +35,6 @@ data "google_compute_subnetwork" "gke_subnetwork" { {% endif %} name = var.subnetwork - region = var.region + region = local.region project = local.network_project_id } diff --git a/autogen/outputs.tf b/autogen/outputs.tf index ff8eab1bef..704569d00e 100644 --- a/autogen/outputs.tf +++ b/autogen/outputs.tf @@ -150,4 +150,8 @@ output "vertical_pod_autoscaling_enabled" { value = local.cluster_vertical_pod_autoscaling_enabled } +output "release_channel" { + description = "The release channel of this cluster" + value = var.release_channel +} {% endif %} diff --git a/autogen/sa.tf b/autogen/sa.tf index 62b31f457a..eaebeb2a22 100644 --- a/autogen/sa.tf +++ b/autogen/sa.tf @@ -64,7 +64,7 @@ resource "google_project_iam_member" "cluster_service_account-monitoring_viewer" resource "google_project_iam_member" "cluster_service_account-gcr" { count = var.create_service_account && var.grant_registry_access ? 1 : 0 - project = var.project_id + project = var.registry_project_id == "" ? var.project_id : var.registry_project_id role = "roles/storage.objectViewer" member = "serviceAccount:${google_service_account.cluster_service_account[0].email}" } diff --git a/autogen/scripts/wait-for-cluster.sh b/autogen/scripts/wait-for-cluster.sh index 6ff3253d58..b7019eace1 100755 --- a/autogen/scripts/wait-for-cluster.sh +++ b/autogen/scripts/wait-for-cluster.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,6 +15,11 @@ set -e +# shellcheck disable=SC2034 +if [ -n "${GOOGLE_APPLICATION_CREDENTIALS}" ]; then + export CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE="${GOOGLE_APPLICATION_CREDENTIALS}" +fi + PROJECT=$1 CLUSTER_NAME=$2 gcloud_command="gcloud container clusters list --project=$PROJECT --format=json" diff --git a/autogen/variables.tf b/autogen/variables.tf index 16c2955ced..3f2a12f3a9 100644 --- a/autogen/variables.tf +++ b/autogen/variables.tf @@ -40,7 +40,8 @@ variable "regional" { variable "region" { type = string - description = "The region to host the cluster in (required)" + description = "The region to host the cluster in (optional if zonal cluster / required if regional)" + default = null } variable "zones" { @@ -269,6 +270,12 @@ variable "grant_registry_access" { default = false } +variable "registry_project_id" { + type = string + description = "Project holding the Google Container Registry. If empty, we use the cluster project. If grant_registry_access is true, storage.objectViewer role is assigned on this project." + default = "" +} + variable "service_account" { type = string description = "The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created." @@ -304,6 +311,11 @@ variable "cluster_resource_labels" { default = {} } +variable "skip_provisioners" { + type = bool + description = "Flag to skip all local-exec provisioners. It breaks `stub_domains` and `upstream_nameservers` variables functionality." + default = false +} {% if private_cluster %} variable "deploy_using_private_endpoint" { @@ -376,7 +388,8 @@ variable "resource_usage_export_dataset_id" { variable "node_metadata" { description = "Specifies how node metadata is exposed to the workload running on the node" - default = "UNSPECIFIED" + default = "SECURE" + type = string } variable "sandbox_enabled" { @@ -409,4 +422,9 @@ variable "authenticator_security_group" { default = null } +variable "release_channel" { + type = string + description = "(Beta) The release channel of this cluster. Accepted values are `UNSPECIFIED`, `RAPID`, `REGULAR` and `STABLE`. Defaults to `UNSPECIFIED`." + default = null +} {% endif %} diff --git a/build/int.cloudbuild.yaml b/build/int.cloudbuild.yaml new file mode 100644 index 0000000000..5a52a5889b --- /dev/null +++ b/build/int.cloudbuild.yaml @@ -0,0 +1,269 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +timeout: 12600s +steps: +- id: prepare + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && prepare_environment'] + env: + - 'TF_VAR_org_id=$_ORG_ID' + - 'TF_VAR_folder_id=$_FOLDER_ID' + - 'TF_VAR_billing_account=$_BILLING_ACCOUNT' +- id: create disable-client-cert-local + waitFor: + - prepare + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do create disable-client-cert-local'] +- id: converge disable-client-cert-local + waitFor: + - create disable-client-cert-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge disable-client-cert-local'] +- id: verify disable-client-cert-local + waitFor: + - converge disable-client-cert-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify disable-client-cert-local'] +- id: destroy disable-client-cert-local + waitFor: + - verify disable-client-cert-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy disable-client-cert-local'] +- id: create shared-vpc-local + waitFor: + - prepare + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do create shared-vpc-local'] +- id: converge shared-vpc-local + waitFor: + - create shared-vpc-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge shared-vpc-local'] +- id: verify shared-vpc-local + waitFor: + - converge shared-vpc-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify shared-vpc-local'] +- id: destroy shared-vpc-local + waitFor: + - verify shared-vpc-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy shared-vpc-local'] +- id: create simple-regional-local + waitFor: + - prepare + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do create simple-regional-local'] +- id: converge simple-regional-local + waitFor: + - create simple-regional-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge simple-regional-local'] +- id: verify simple-regional-local + waitFor: + - converge simple-regional-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify simple-regional-local'] +- id: destroy simple-regional-local + waitFor: + - verify simple-regional-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy simple-regional-local'] +- id: create simple-regional-private-local + waitFor: + - prepare + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do create simple-regional-private-local'] +- id: converge simple-regional-private-local + waitFor: + - create simple-regional-private-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge simple-regional-private-local'] +- id: verify simple-regional-private-local + waitFor: + - converge simple-regional-private-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify simple-regional-private-local'] +- id: destroy simple-regional-private-local + waitFor: + - verify simple-regional-private-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy simple-regional-private-local'] +- id: create simple-regional-with-networking-local + waitFor: + - prepare + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do create simple-regional-with-networking-local'] +- id: converge simple-regional-with-networking-local + waitFor: + - create simple-regional-with-networking-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge simple-regional-with-networking-local'] +- id: verify simple-regional-with-networking-local + waitFor: + - converge simple-regional-with-networking-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify simple-regional-with-networking-local'] +- id: destroy simple-regional-with-networking-local + waitFor: + - verify simple-regional-with-networking-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy simple-regional-with-networking-local'] +- id: create simple-zonal-local + waitFor: + - prepare + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do create simple-zonal-local'] +- id: converge simple-zonal-local + waitFor: + - create simple-zonal-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge simple-zonal-local'] +- id: verify simple-zonal-local + waitFor: + - converge simple-zonal-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify simple-zonal-local'] +- id: destroy simple-zonal-local + waitFor: + - verify simple-zonal-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy simple-zonal-local'] +- id: create simple-zonal-private-local + waitFor: + - prepare + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do create simple-zonal-private-local'] +- id: converge simple-zonal-private-local + waitFor: + - create simple-zonal-private-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge simple-zonal-private-local'] +- id: verify simple-zonal-private-local + waitFor: + - converge simple-zonal-private-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify simple-zonal-private-local'] +- id: destroy simple-zonal-private-local + waitFor: + - verify simple-zonal-private-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy simple-zonal-private-local'] +- id: create stub-domains-local + waitFor: + - prepare + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do create stub-domains-local'] +- id: converge stub-domains-local + waitFor: + - create stub-domains-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge stub-domains-local'] +- id: verify stub-domains-local + waitFor: + - converge stub-domains-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify stub-domains-local'] +- id: destroy stub-domains-local + waitFor: + - verify stub-domains-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy stub-domains-local'] +- id: create upstream-nameservers-local + waitFor: + - prepare + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do create upstream-nameservers-local'] +- id: converge upstream-nameservers-local + waitFor: + - create upstream-nameservers-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge upstream-nameservers-local'] +- id: verify upstream-nameservers-local + waitFor: + - converge upstream-nameservers-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify upstream-nameservers-local'] +- id: destroy upstream-nameservers-local + waitFor: + - verify upstream-nameservers-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy upstream-nameservers-local'] +- id: create stub-domains-upstream-nameservers-local + waitFor: + - prepare + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do create stub-domains-upstream-nameservers-local'] +- id: converge stub-domains-upstream-nameservers-local + waitFor: + - create stub-domains-upstream-nameservers-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge stub-domains-upstream-nameservers-local'] +- id: verify stub-domains-upstream-nameservers-local + waitFor: + - converge stub-domains-upstream-nameservers-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify stub-domains-upstream-nameservers-local'] +- id: destroy stub-domains-upstream-nameservers-local + waitFor: + - verify stub-domains-upstream-nameservers-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy stub-domains-upstream-nameservers-local'] +- id: create workload-metadata-config-local + waitFor: + - prepare + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do create workload-metadata-config-local'] +- id: converge workload-metadata-config-local + waitFor: + - create workload-metadata-config-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge workload-metadata-config-local'] +- id: verify workload-metadata-config-local + waitFor: + - converge workload-metadata-config-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify workload-metadata-config-local'] +- id: destroy workload-metadata-config-local + waitFor: + - verify workload-metadata-config-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy workload-metadata-config-local'] +- id: create sandbox-enabled-local + waitFor: + - prepare + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do create sandbox-enabled-local'] +- id: converge sandbox-enabled-local + waitFor: + - create sandbox-enabled-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge sandbox-enabled-local'] +- id: verify sandbox-enabled-local + waitFor: + - converge sandbox-enabled-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify sandbox-enabled-local'] +- id: destroy sandbox-enabled-local + waitFor: + - verify sandbox-enabled-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy sandbox-enabled-local'] +tags: +- 'ci' +- 'integration' +substitutions: + _DOCKER_IMAGE_DEVELOPER_TOOLS: 'cft/developer-tools' + _DOCKER_TAG_VERSION_DEVELOPER_TOOLS: '0.4.6' diff --git a/build/lint.cloudbuild.yaml b/build/lint.cloudbuild.yaml new file mode 100644 index 0000000000..7ba0827bdb --- /dev/null +++ b/build/lint.cloudbuild.yaml @@ -0,0 +1,27 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +steps: +- id: 'lint-generation' + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && check_generate'] +- id: 'lint-tests' + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/usr/local/bin/test_lint.sh'] +tags: +- 'ci' +- 'lint' +substitutions: + _DOCKER_IMAGE_DEVELOPER_TOOLS: 'cft/developer-tools' + _DOCKER_TAG_VERSION_DEVELOPER_TOOLS: '0.4.6' diff --git a/cluster.tf b/cluster.tf index 0f94ddb0e5..7e5f53ee47 100644 --- a/cluster.tf +++ b/cluster.tf @@ -41,6 +41,7 @@ resource "google_container_cluster" "primary" { } } + subnetwork = data.google_compute_subnetwork.gke_subnetwork.self_link min_master_version = local.master_version @@ -99,7 +100,7 @@ resource "google_container_cluster" "primary" { } lifecycle { - ignore_changes = [node_pool] + ignore_changes = [node_pool, initial_node_count] } timeouts { @@ -161,22 +162,14 @@ resource "google_container_node_pool" "pools" { image_type = lookup(var.node_pools[count.index], "image_type", "COS") machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2") labels = merge( - { - "cluster_name" = var.name - }, - { - "node_pool" = var.node_pools[count.index]["name"] - }, + lookup(lookup(var.node_pools_labels, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(var.node_pools_labels, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index]["name"] } : {}, var.node_pools_labels["all"], var.node_pools_labels[var.node_pools[count.index]["name"]], ) metadata = merge( - { - "cluster_name" = var.name - }, - { - "node_pool" = var.node_pools[count.index]["name"] - }, + lookup(lookup(var.node_pools_metadata, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(var.node_pools_metadata, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index]["name"] } : {}, var.node_pools_metadata["all"], var.node_pools_metadata[var.node_pools[count.index]["name"]], { @@ -184,8 +177,8 @@ resource "google_container_node_pool" "pools" { }, ) tags = concat( - ["gke-${var.name}"], - ["gke-${var.name}-${var.node_pools[count.index]["name"]}"], + lookup(var.node_pools_tags, "default_values", [true, true])[0] ? ["gke-${var.name}"] : [], + lookup(var.node_pools_tags, "default_values", [true, true])[1] ? ["gke-${var.name}-${var.node_pools[count.index]["name"]}"] : [], var.node_pools_tags["all"], var.node_pools_tags[var.node_pools[count.index]["name"]], ) @@ -227,6 +220,7 @@ resource "google_container_node_pool" "pools" { } resource "null_resource" "wait_for_cluster" { + count = var.skip_provisioners ? 0 : 1 provisioner "local-exec" { command = "${path.module}/scripts/wait-for-cluster.sh ${var.project_id} ${var.name}" diff --git a/dns.tf b/dns.tf index b240a23e65..8a581ff68e 100644 --- a/dns.tf +++ b/dns.tf @@ -20,7 +20,7 @@ Delete default kube-dns configmap *****************************************/ resource "null_resource" "delete_default_kube_dns_configmap" { - count = local.custom_kube_dns_config || local.upstream_nameservers_config ? 1 : 0 + count = (local.custom_kube_dns_config || local.upstream_nameservers_config) && ! var.skip_provisioners ? 1 : 0 provisioner "local-exec" { command = "${path.module}/scripts/kubectl_wrapper.sh https://${local.cluster_endpoint} ${data.google_client_config.default.access_token} ${local.cluster_ca_certificate} ${path.module}/scripts/delete-default-resource.sh kube-system configmap kube-dns" diff --git a/docs/upgrading_to_v5.0.md b/docs/upgrading_to_v5.0.md new file mode 100644 index 0000000000..39abfbe8a5 --- /dev/null +++ b/docs/upgrading_to_v5.0.md @@ -0,0 +1,82 @@ +# Upgrading to v5.0 + +The v5.0 release of *kubernetes-engine* is a backwards incompatible +release. + +## Migration Instructions + +### Node pool taints +Previously, node pool taints could be set on all module versions. + +Now, to set taints you must use the beta version of the module. + +```diff + module "kubernetes_engine_private_cluster" { +- source = "terraform-google-modules/kubernetes-engine/google" ++ source = "terraform-google-modules/kubernetes-engine/google//modules/beta-public-cluster" +- version = "~> 4.0" ++ version = "~> 5.0" + } +``` + +### Service Account creation + +Previously, if you explicitly specified a Service Account using the `service_account` variable on the module this was sufficient to force that Service Account to be used. + +Now, an additional `create_service_account` has been added with a default value of `true`. If you would like to use an explicitly created Service Account from outside the module, you will need to set `create_service_account` to `false` (in addition to passing in the Service Account email). + +No action is needed if you use the module's default service account. + +```diff + module "kubernetes_engine_private_cluster" { + source = "terraform-google-modules/kubernetes-engine/google" +- version = "~> 4.0" ++ version = "~> 5.0" + + service_account = "project-service-account@my-project.iam.gserviceaccount.com" ++ create_service_account = false + # ... + } +``` + +### Resource simplification +The `google_container_cluster` and `google_container_node_pool` resources previously were different between regional and zonal clusters. They have now been collapsed into a single resource using the `location` variable. + +If you are using regional clusters, no migration is needed. If you are using zonal clusters, a state migration is needed. You can use a [script](../helpers/migrate.py) we provided to determine the required state changes: + +1. Download the script + + ```sh + curl -O https://raw.githubusercontent.com/terraform-google-modules/terraform-google-kubernetes-engine/v5.0.0/helpers/migrate.py + chmod +x migrate.py + ``` + +2. Run the script in dryrun mode to confirm the expected changes: + + ```sh + $ ./migrate.py --dryrun + + ---- Migrating the following modules: + -- module.gke-cluster-dev.module.gke + ---- Commands to run: + terraform state mv -state terraform.tfstate "module.gke-cluster-dev.module.gke.google_container_cluster.zonal_primary[0]" "module.gke-cluster-dev.module.gke.google_container_cluster.primary[0]" + terraform state mv "module.gke-cluster-dev.module.gke.google_container_node_pool.zonal_pools[0]" "module.gke-cluster-dev.module.gke.google_container_node_pool.pools[0]" + ``` + +3. Execute the migration script + + ```sh + $ ./migrate.py + + ---- Migrating the following modules: + -- module.gke-cluster-dev.module.gke + ---- Commands to run: + Move "module.gke-cluster-dev.module.gke.google_container_cluster.zonal_primary[0]" to "module.gke-cluster-dev.module.gke.google_container_cluster.primary[0]" + Successfully moved 1 object(s). + Move "module.gke-cluster-dev.module.gke.google_container_node_pool.zonal_pools[0]" to "module.gke-cluster-dev.module.gke.google_container_node_pool.pools[0]" + Successfully moved 1 object(s). + Move "module.gke-cluster-dev.module.gke.null_resource.wait_for_zonal_cluster" to "module.gke-cluster-dev.module.gke.null_resource.wait_for_cluster" + Successfully moved 1 object(s). + ``` + +4. Run `terraform plan` to confirm no changes are expected. diff --git a/examples/deploy_service/README.md b/examples/deploy_service/README.md index 5dcb7ca7a7..e13981c450 100644 --- a/examples/deploy_service/README.md +++ b/examples/deploy_service/README.md @@ -37,7 +37,7 @@ It will: | network | | | project\_id | | | region | | -| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | +| service\_account | The default service account used for running nodes. | | subnetwork | | | zones | List of zones in which the cluster resides | diff --git a/examples/deploy_service/outputs.tf b/examples/deploy_service/outputs.tf index 0d972dcd88..01a13147c2 100644 --- a/examples/deploy_service/outputs.tf +++ b/examples/deploy_service/outputs.tf @@ -29,7 +29,7 @@ output "ca_certificate" { } output "service_account" { - description = "The service account to default running nodes as if not overridden in `node_pools`." + description = "The default service account used for running nodes." value = module.gke.service_account } diff --git a/examples/disable_client_cert/README.md b/examples/disable_client_cert/README.md index 14dd6545c0..2f531b9906 100644 --- a/examples/disable_client_cert/README.md +++ b/examples/disable_client_cert/README.md @@ -36,7 +36,7 @@ This example illustrates how to create a simple cluster and disable deprecated s | network | | | project\_id | | | region | | -| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | +| service\_account | The default service account used for running nodes. | | subnetwork | | | zones | List of zones in which the cluster resides | diff --git a/examples/disable_client_cert/outputs.tf b/examples/disable_client_cert/outputs.tf index 0d972dcd88..01a13147c2 100644 --- a/examples/disable_client_cert/outputs.tf +++ b/examples/disable_client_cert/outputs.tf @@ -29,7 +29,7 @@ output "ca_certificate" { } output "service_account" { - description = "The service account to default running nodes as if not overridden in `node_pools`." + description = "The default service account used for running nodes." value = module.gke.service_account } diff --git a/examples/node_pool/README.md b/examples/node_pool/README.md index 9215f091cb..237b3f0b6f 100644 --- a/examples/node_pool/README.md +++ b/examples/node_pool/README.md @@ -32,7 +32,7 @@ This example illustrates how to create a cluster with multiple custom node-pool | network | | | project\_id | | | region | | -| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | +| service\_account | The default service account used for running nodes. | | subnetwork | | | zones | List of zones in which the cluster resides | diff --git a/examples/node_pool/main.tf b/examples/node_pool/main.tf index 6662bb84ac..c7a7f852ae 100644 --- a/examples/node_pool/main.tf +++ b/examples/node_pool/main.tf @@ -19,7 +19,7 @@ locals { } provider "google-beta" { - version = "~> 2.12.0" + version = "~> 2.18.0" region = var.region } diff --git a/examples/node_pool/outputs.tf b/examples/node_pool/outputs.tf index 0d972dcd88..01a13147c2 100644 --- a/examples/node_pool/outputs.tf +++ b/examples/node_pool/outputs.tf @@ -29,7 +29,7 @@ output "ca_certificate" { } output "service_account" { - description = "The service account to default running nodes as if not overridden in `node_pools`." + description = "The default service account used for running nodes." value = module.gke.service_account } diff --git a/examples/node_pool_update_variant/README.md b/examples/node_pool_update_variant/README.md new file mode 100644 index 0000000000..9215f091cb --- /dev/null +++ b/examples/node_pool_update_variant/README.md @@ -0,0 +1,45 @@ +# Node Pool Cluster + +This example illustrates how to create a cluster with multiple custom node-pool configurations with node labels, taints, and network tags. + + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| cluster\_name\_suffix | A suffix to append to the default cluster name | string | `""` | no | +| compute\_engine\_service\_account | Service account to associate to the nodes in the cluster | string | n/a | yes | +| ip\_range\_pods | The secondary ip range to use for pods | string | n/a | yes | +| ip\_range\_services | The secondary ip range to use for pods | string | n/a | yes | +| network | The VPC network to host the cluster in | string | n/a | yes | +| project\_id | The project ID to host the cluster in | string | n/a | yes | +| region | The region to host the cluster in | string | n/a | yes | +| subnetwork | The subnetwork to host the cluster in | string | n/a | yes | +| zones | The zone to host the cluster in (required if is a zonal cluster) | list(string) | n/a | yes | + +## Outputs + +| Name | Description | +|------|-------------| +| ca\_certificate | | +| client\_token | | +| cluster\_name | Cluster name | +| ip\_range\_pods | The secondary IP range used for pods | +| ip\_range\_services | The secondary IP range used for services | +| kubernetes\_endpoint | | +| location | | +| master\_kubernetes\_version | The master Kubernetes version | +| network | | +| project\_id | | +| region | | +| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | +| subnetwork | | +| zones | List of zones in which the cluster resides | + + + +To provision this example, run the following from within this directory: +- `terraform init` to get the plugins +- `terraform plan` to see the infrastructure plan +- `terraform apply` to apply the infrastructure build +- `terraform destroy` to destroy the built infrastructure diff --git a/test/boilerplate/boilerplate.sh.txt b/examples/node_pool_update_variant/data/shutdown-script.sh similarity index 80% rename from test/boilerplate/boilerplate.sh.txt rename to examples/node_pool_update_variant/data/shutdown-script.sh index 2e94f3e551..f1ff19c353 100644 --- a/test/boilerplate/boilerplate.sh.txt +++ b/examples/node_pool_update_variant/data/shutdown-script.sh @@ -1,3 +1,5 @@ +#!/bin/bash -e + # Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -11,3 +13,5 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +kubectl --kubeconfig=/var/lib/kubelet/kubeconfig drain --force=true --ignore-daemonsets=true --delete-local-data "$HOSTNAME" diff --git a/examples/node_pool_update_variant/main.tf b/examples/node_pool_update_variant/main.tf new file mode 100644 index 0000000000..c10e797511 --- /dev/null +++ b/examples/node_pool_update_variant/main.tf @@ -0,0 +1,119 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +locals { + cluster_type = "node-pool-update-variant" +} + +provider "google" { + version = "~> 2.12.0" + region = var.region +} + +data "google_compute_subnetwork" "subnetwork" { + name = var.subnetwork + project = var.project_id + region = var.region +} + +module "gke" { + source = "../../modules/private-cluster-update-variant" + project_id = var.project_id + name = "${local.cluster_type}-cluster${var.cluster_name_suffix}" + regional = false + region = var.region + zones = var.zones + network = var.network + subnetwork = var.subnetwork + ip_range_pods = var.ip_range_pods + ip_range_services = var.ip_range_services + create_service_account = false + service_account = var.compute_engine_service_account + enable_private_endpoint = true + enable_private_nodes = true + master_ipv4_cidr_block = "172.16.0.0/28" + + master_authorized_networks_config = [ + { + cidr_blocks = [ + { + cidr_block = data.google_compute_subnetwork.subnetwork.ip_cidr_range + display_name = "VPC" + }, + ] + }, + ] + + node_pools = [ + { + name = "pool-01" + min_count = 1 + max_count = 2 + service_account = var.compute_engine_service_account + auto_upgrade = true + }, + { + name = "pool-02" + machine_type = "n1-standard-2" + min_count = 1 + max_count = 2 + disk_size_gb = 30 + disk_type = "pd-standard" + accelerator_count = 1 + accelerator_type = "nvidia-tesla-p4" + image_type = "COS" + auto_repair = false + service_account = var.compute_engine_service_account + }, + ] + + node_pools_oauth_scopes = { + all = [] + pool-01 = [] + pool-02 = [] + } + + node_pools_metadata = { + all = {} + pool-01 = { + shutdown-script = file("${path.module}/data/shutdown-script.sh") + } + pool-02 = {} + } + + node_pools_labels = { + all = { + all-pools-example = true + } + pool-01 = { + pool-01-example = true + } + pool-02 = {} + } + + node_pools_tags = { + all = [ + "all-node-example", + ] + pool-01 = [ + "pool-01-example", + ] + pool-02 = [] + } +} + +data "google_client_config" "default" { +} diff --git a/examples/node_pool_update_variant/outputs.tf b/examples/node_pool_update_variant/outputs.tf new file mode 100644 index 0000000000..0d972dcd88 --- /dev/null +++ b/examples/node_pool_update_variant/outputs.tf @@ -0,0 +1,35 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +output "kubernetes_endpoint" { + sensitive = true + value = module.gke.endpoint +} + +output "client_token" { + sensitive = true + value = base64encode(data.google_client_config.default.access_token) +} + +output "ca_certificate" { + value = module.gke.ca_certificate +} + +output "service_account" { + description = "The service account to default running nodes as if not overridden in `node_pools`." + value = module.gke.service_account +} + diff --git a/examples/node_pool_update_variant/test_outputs.tf b/examples/node_pool_update_variant/test_outputs.tf new file mode 100644 index 0000000000..e64c40e477 --- /dev/null +++ b/examples/node_pool_update_variant/test_outputs.tf @@ -0,0 +1,63 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// These outputs are used to test the module with kitchen-terraform +// They do not need to be included in real-world uses of this module + +output "project_id" { + value = var.project_id +} + +output "region" { + value = module.gke.region +} + +output "cluster_name" { + description = "Cluster name" + value = module.gke.name +} + +output "network" { + value = var.network +} + +output "subnetwork" { + value = var.subnetwork +} + +output "location" { + value = module.gke.location +} + +output "ip_range_pods" { + description = "The secondary IP range used for pods" + value = var.ip_range_pods +} + +output "ip_range_services" { + description = "The secondary IP range used for services" + value = var.ip_range_services +} + +output "zones" { + description = "List of zones in which the cluster resides" + value = module.gke.zones +} + +output "master_kubernetes_version" { + description = "The master Kubernetes version" + value = module.gke.master_version +} diff --git a/examples/node_pool_update_variant/variables.tf b/examples/node_pool_update_variant/variables.tf new file mode 100644 index 0000000000..040c78d2c4 --- /dev/null +++ b/examples/node_pool_update_variant/variables.tf @@ -0,0 +1,54 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +variable "project_id" { + description = "The project ID to host the cluster in" +} + +variable "cluster_name_suffix" { + description = "A suffix to append to the default cluster name" + default = "" +} + +variable "region" { + description = "The region to host the cluster in" +} + +variable "zones" { + type = list(string) + description = "The zone to host the cluster in (required if is a zonal cluster)" +} + +variable "network" { + description = "The VPC network to host the cluster in" +} + +variable "subnetwork" { + description = "The subnetwork to host the cluster in" +} + +variable "ip_range_pods" { + description = "The secondary ip range to use for pods" +} + +variable "ip_range_services" { + description = "The secondary ip range to use for pods" +} + +variable "compute_engine_service_account" { + description = "Service account to associate to the nodes in the cluster" +} + diff --git a/examples/node_pool_update_variant_beta/README.md b/examples/node_pool_update_variant_beta/README.md new file mode 100644 index 0000000000..e95af795e9 --- /dev/null +++ b/examples/node_pool_update_variant_beta/README.md @@ -0,0 +1,46 @@ +# Node Pool Cluster + +This example illustrates how to create a cluster with multiple custom node-pool configurations with node labels, taints, and network tags. + + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| cluster\_name\_suffix | A suffix to append to the default cluster name | string | `""` | no | +| compute\_engine\_service\_account | Service account to associate to the nodes in the cluster | string | n/a | yes | +| credentials\_path | The path to the GCP credentials JSON file | string | n/a | yes | +| ip\_range\_pods | The secondary ip range to use for pods | string | n/a | yes | +| ip\_range\_services | The secondary ip range to use for pods | string | n/a | yes | +| network | The VPC network to host the cluster in | string | n/a | yes | +| project\_id | The project ID to host the cluster in | string | n/a | yes | +| region | The region to host the cluster in | string | n/a | yes | +| subnetwork | The subnetwork to host the cluster in | string | n/a | yes | +| zones | The zone to host the cluster in (required if is a zonal cluster) | list(string) | n/a | yes | + +## Outputs + +| Name | Description | +|------|-------------| +| ca\_certificate | | +| client\_token | | +| cluster\_name | Cluster name | +| ip\_range\_pods | The secondary IP range used for pods | +| ip\_range\_services | The secondary IP range used for services | +| kubernetes\_endpoint | | +| location | | +| master\_kubernetes\_version | The master Kubernetes version | +| network | | +| project\_id | | +| region | | +| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | +| subnetwork | | +| zones | List of zones in which the cluster resides | + + + +To provision this example, run the following from within this directory: +- `terraform init` to get the plugins +- `terraform plan` to see the infrastructure plan +- `terraform apply` to apply the infrastructure build +- `terraform destroy` to destroy the built infrastructure diff --git a/Gemfile b/examples/node_pool_update_variant_beta/data/shutdown-script.sh similarity index 79% rename from Gemfile rename to examples/node_pool_update_variant_beta/data/shutdown-script.sh index a54d14ec29..f1ff19c353 100644 --- a/Gemfile +++ b/examples/node_pool_update_variant_beta/data/shutdown-script.sh @@ -1,3 +1,5 @@ +#!/bin/bash -e + # Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -12,10 +14,4 @@ # See the License for the specific language governing permissions and # limitations under the License. -ruby "~> 2.5" - -source 'https://rubygems.org/' do - gem "kitchen-terraform", "~> 4.9" - gem "kubeclient", "~> 4.0" - gem "rest-client", "~> 2.0" -end +kubectl --kubeconfig=/var/lib/kubelet/kubeconfig drain --force=true --ignore-daemonsets=true --delete-local-data "$HOSTNAME" diff --git a/examples/node_pool_update_variant_beta/main.tf b/examples/node_pool_update_variant_beta/main.tf new file mode 100644 index 0000000000..37b595f793 --- /dev/null +++ b/examples/node_pool_update_variant_beta/main.tf @@ -0,0 +1,138 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +locals { + cluster_type = "node-pool-update-variant-beta" +} + +provider "google-beta" { + version = "~> 2.18.0" + credentials = file(var.credentials_path) + region = var.region +} + +data "google_compute_subnetwork" "subnetwork" { + name = var.subnetwork + project = var.project_id + region = var.region +} + +module "gke" { + source = "../../modules/beta-private-cluster-update-variant" + project_id = var.project_id + name = "${local.cluster_type}-cluster${var.cluster_name_suffix}" + regional = false + region = var.region + zones = var.zones + network = var.network + subnetwork = var.subnetwork + ip_range_pods = var.ip_range_pods + ip_range_services = var.ip_range_services + create_service_account = false + service_account = var.compute_engine_service_account + enable_private_endpoint = true + enable_private_nodes = true + master_ipv4_cidr_block = "172.16.0.0/28" + + master_authorized_networks_config = [ + { + cidr_blocks = [ + { + cidr_block = data.google_compute_subnetwork.subnetwork.ip_cidr_range + display_name = "VPC" + }, + ] + }, + ] + + node_pools = [ + { + name = "pool-01" + min_count = 1 + max_count = 2 + service_account = var.compute_engine_service_account + auto_upgrade = true + }, + { + name = "pool-02" + machine_type = "n1-standard-2" + min_count = 1 + max_count = 2 + disk_size_gb = 30 + disk_type = "pd-standard" + accelerator_count = 1 + accelerator_type = "nvidia-tesla-p4" + image_type = "COS" + auto_repair = false + service_account = var.compute_engine_service_account + }, + ] + + node_pools_oauth_scopes = { + all = [] + pool-01 = [] + pool-02 = [] + } + + node_pools_metadata = { + all = {} + pool-01 = { + shutdown-script = file("${path.module}/data/shutdown-script.sh") + } + pool-02 = {} + } + + node_pools_labels = { + all = { + all-pools-example = true + } + pool-01 = { + pool-01-example = true + } + pool-02 = {} + } + + node_pools_taints = { + all = [ + { + key = "all-pools-example" + value = true + effect = "PREFER_NO_SCHEDULE" + }, + ] + pool-01 = [ + { + key = "pool-01-example" + value = true + effect = "PREFER_NO_SCHEDULE" + }, + ] + pool-02 = [] + } + + node_pools_tags = { + all = [ + "all-node-example", + ] + pool-01 = [ + "pool-01-example", + ] + pool-02 = [] + } +} + +data "google_client_config" "default" { +} diff --git a/examples/node_pool_update_variant_beta/outputs.tf b/examples/node_pool_update_variant_beta/outputs.tf new file mode 100644 index 0000000000..0d972dcd88 --- /dev/null +++ b/examples/node_pool_update_variant_beta/outputs.tf @@ -0,0 +1,35 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +output "kubernetes_endpoint" { + sensitive = true + value = module.gke.endpoint +} + +output "client_token" { + sensitive = true + value = base64encode(data.google_client_config.default.access_token) +} + +output "ca_certificate" { + value = module.gke.ca_certificate +} + +output "service_account" { + description = "The service account to default running nodes as if not overridden in `node_pools`." + value = module.gke.service_account +} + diff --git a/examples/node_pool_update_variant_beta/test_outputs.tf b/examples/node_pool_update_variant_beta/test_outputs.tf new file mode 100644 index 0000000000..e64c40e477 --- /dev/null +++ b/examples/node_pool_update_variant_beta/test_outputs.tf @@ -0,0 +1,63 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// These outputs are used to test the module with kitchen-terraform +// They do not need to be included in real-world uses of this module + +output "project_id" { + value = var.project_id +} + +output "region" { + value = module.gke.region +} + +output "cluster_name" { + description = "Cluster name" + value = module.gke.name +} + +output "network" { + value = var.network +} + +output "subnetwork" { + value = var.subnetwork +} + +output "location" { + value = module.gke.location +} + +output "ip_range_pods" { + description = "The secondary IP range used for pods" + value = var.ip_range_pods +} + +output "ip_range_services" { + description = "The secondary IP range used for services" + value = var.ip_range_services +} + +output "zones" { + description = "List of zones in which the cluster resides" + value = module.gke.zones +} + +output "master_kubernetes_version" { + description = "The master Kubernetes version" + value = module.gke.master_version +} diff --git a/examples/node_pool_update_variant_beta/variables.tf b/examples/node_pool_update_variant_beta/variables.tf new file mode 100644 index 0000000000..9dc3873177 --- /dev/null +++ b/examples/node_pool_update_variant_beta/variables.tf @@ -0,0 +1,57 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +variable "project_id" { + description = "The project ID to host the cluster in" +} + +variable "credentials_path" { + description = "The path to the GCP credentials JSON file" +} + +variable "cluster_name_suffix" { + description = "A suffix to append to the default cluster name" + default = "" +} + +variable "region" { + description = "The region to host the cluster in" +} + +variable "zones" { + type = list(string) + description = "The zone to host the cluster in (required if is a zonal cluster)" +} + +variable "network" { + description = "The VPC network to host the cluster in" +} + +variable "subnetwork" { + description = "The subnetwork to host the cluster in" +} + +variable "ip_range_pods" { + description = "The secondary ip range to use for pods" +} + +variable "ip_range_services" { + description = "The secondary ip range to use for pods" +} + +variable "compute_engine_service_account" { + description = "Service account to associate to the nodes in the cluster" +} diff --git a/examples/shared_vpc/README.md b/examples/shared_vpc/README.md index 3b0f5a6157..964a346349 100644 --- a/examples/shared_vpc/README.md +++ b/examples/shared_vpc/README.md @@ -32,7 +32,7 @@ This example illustrates how to create a simple cluster where the host network i | network | | | project\_id | | | region | | -| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | +| service\_account | The default service account used for running nodes. | | subnetwork | | | zones | List of zones in which the cluster resides | diff --git a/examples/shared_vpc/outputs.tf b/examples/shared_vpc/outputs.tf index 0d972dcd88..01a13147c2 100644 --- a/examples/shared_vpc/outputs.tf +++ b/examples/shared_vpc/outputs.tf @@ -29,7 +29,7 @@ output "ca_certificate" { } output "service_account" { - description = "The service account to default running nodes as if not overridden in `node_pools`." + description = "The default service account used for running nodes." value = module.gke.service_account } diff --git a/examples/simple_regional/README.md b/examples/simple_regional/README.md index fb209e47b5..4950b21e9a 100644 --- a/examples/simple_regional/README.md +++ b/examples/simple_regional/README.md @@ -14,6 +14,7 @@ This example illustrates how to create a simple cluster. | network | The VPC network to host the cluster in | string | n/a | yes | | project\_id | The project ID to host the cluster in | string | n/a | yes | | region | The region to host the cluster in | string | n/a | yes | +| skip\_provisioners | Flag to skip local-exec provisioners | bool | `"false"` | no | | subnetwork | The subnetwork to host the cluster in | string | n/a | yes | ## Outputs @@ -31,7 +32,7 @@ This example illustrates how to create a simple cluster. | network | | | project\_id | | | region | | -| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | +| service\_account | The default service account used for running nodes. | | subnetwork | | | zones | List of zones in which the cluster resides | diff --git a/examples/simple_regional/main.tf b/examples/simple_regional/main.tf index 4662435fbd..353ae91906 100644 --- a/examples/simple_regional/main.tf +++ b/examples/simple_regional/main.tf @@ -35,6 +35,7 @@ module "gke" { ip_range_services = var.ip_range_services create_service_account = false service_account = var.compute_engine_service_account + skip_provisioners = var.skip_provisioners } data "google_client_config" "default" { diff --git a/examples/simple_regional/outputs.tf b/examples/simple_regional/outputs.tf index 0d972dcd88..01a13147c2 100644 --- a/examples/simple_regional/outputs.tf +++ b/examples/simple_regional/outputs.tf @@ -29,7 +29,7 @@ output "ca_certificate" { } output "service_account" { - description = "The service account to default running nodes as if not overridden in `node_pools`." + description = "The default service account used for running nodes." value = module.gke.service_account } diff --git a/examples/simple_regional/variables.tf b/examples/simple_regional/variables.tf index 6121eab9ea..e7405d9e21 100644 --- a/examples/simple_regional/variables.tf +++ b/examples/simple_regional/variables.tf @@ -47,3 +47,8 @@ variable "compute_engine_service_account" { description = "Service account to associate to the nodes in the cluster" } +variable "skip_provisioners" { + type = bool + description = "Flag to skip local-exec provisioners" + default = false +} diff --git a/examples/simple_regional_beta/README.md b/examples/simple_regional_beta/README.md index bd676115b9..72bb221d9f 100644 --- a/examples/simple_regional_beta/README.md +++ b/examples/simple_regional_beta/README.md @@ -2,8 +2,7 @@ This example illustrates how to create a simple cluster with beta features. -[^]: (autogen_docs_start) - + ## Inputs | Name | Description | Type | Default | Required | @@ -11,13 +10,16 @@ This example illustrates how to create a simple cluster with beta features. | cloudrun | Boolean to enable / disable CloudRun | string | `"true"` | no | | cluster\_name\_suffix | A suffix to append to the default cluster name | string | `""` | no | | compute\_engine\_service\_account | Service account to associate to the nodes in the cluster | string | n/a | yes | -| credentials\_path | The path to the GCP credentials JSON file | string | n/a | yes | | ip\_range\_pods | The secondary ip range to use for pods | string | n/a | yes | | ip\_range\_services | The secondary ip range to use for pods | string | n/a | yes | | istio | Boolean to enable / disable Istio | string | `"true"` | no | | network | The VPC network to host the cluster in | string | n/a | yes | +| node\_metadata | Specifies how node metadata is exposed to the workload running on the node | string | `"SECURE"` | no | +| node\_pools | List of maps containing node pools | list(map(string)) | `` | no | | project\_id | The project ID to host the cluster in | string | n/a | yes | | region | The region to host the cluster in | string | n/a | yes | +| remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | +| sandbox\_enabled | (Beta) Enable GKE Sandbox (Do not forget to set `image_type` = `COS_CONTAINERD` and `node_version` = `1.12.7-gke.17` or later to use it). | bool | `"false"` | no | | subnetwork | The subnetwork to host the cluster in | string | n/a | yes | ## Outputs @@ -27,7 +29,6 @@ This example illustrates how to create a simple cluster with beta features. | ca\_certificate | | | client\_token | | | cluster\_name | Cluster name | -| credentials\_path | | | ip\_range\_pods | The secondary IP range used for pods | | ip\_range\_services | The secondary IP range used for services | | kubernetes\_endpoint | | @@ -36,11 +37,11 @@ This example illustrates how to create a simple cluster with beta features. | network | | | project\_id | | | region | | -| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | +| service\_account | The default service account used for running nodes. | | subnetwork | | | zones | List of zones in which the cluster resides | -[^]: (autogen_docs_end) + To provision this example, run the following from within this directory: - `terraform init` to get the plugins diff --git a/examples/simple_regional_beta/main.tf b/examples/simple_regional_beta/main.tf index fc95090ede..0863cc51de 100644 --- a/examples/simple_regional_beta/main.tf +++ b/examples/simple_regional_beta/main.tf @@ -19,27 +19,29 @@ locals { } provider "google-beta" { - version = "~> 2.12.0" - credentials = file(var.credentials_path) - region = var.region + version = "~> 2.18.0" + region = var.region } module "gke" { - source = "../../modules/beta-public-cluster/" - project_id = var.project_id - name = "${local.cluster_type}-cluster${var.cluster_name_suffix}" - regional = true - region = var.region - network = var.network - subnetwork = var.subnetwork - ip_range_pods = var.ip_range_pods - ip_range_services = var.ip_range_services - create_service_account = false - service_account = var.compute_engine_service_account - istio = var.istio - cloudrun = var.cloudrun + source = "../../modules/beta-public-cluster/" + project_id = var.project_id + name = "${local.cluster_type}-cluster${var.cluster_name_suffix}" + regional = true + region = var.region + network = var.network + subnetwork = var.subnetwork + ip_range_pods = var.ip_range_pods + ip_range_services = var.ip_range_services + create_service_account = false + service_account = var.compute_engine_service_account + istio = var.istio + cloudrun = var.cloudrun + node_metadata = var.node_metadata + sandbox_enabled = var.sandbox_enabled + remove_default_node_pool = var.remove_default_node_pool + node_pools = var.node_pools } data "google_client_config" "default" { } - diff --git a/examples/simple_regional_beta/outputs.tf b/examples/simple_regional_beta/outputs.tf index ad152e186c..0d770aa809 100644 --- a/examples/simple_regional_beta/outputs.tf +++ b/examples/simple_regional_beta/outputs.tf @@ -29,6 +29,6 @@ output "ca_certificate" { } output "service_account" { - description = "The service account to default running nodes as if not overridden in `node_pools`." + description = "The default service account used for running nodes." value = module.gke.service_account } diff --git a/examples/simple_regional_beta/test_outputs.tf b/examples/simple_regional_beta/test_outputs.tf index f250fef192..e64c40e477 100644 --- a/examples/simple_regional_beta/test_outputs.tf +++ b/examples/simple_regional_beta/test_outputs.tf @@ -21,10 +21,6 @@ output "project_id" { value = var.project_id } -output "credentials_path" { - value = var.credentials_path -} - output "region" { value = module.gke.region } diff --git a/examples/simple_regional_beta/variables.tf b/examples/simple_regional_beta/variables.tf index 1da408a790..ed16642774 100644 --- a/examples/simple_regional_beta/variables.tf +++ b/examples/simple_regional_beta/variables.tf @@ -18,10 +18,6 @@ variable "project_id" { description = "The project ID to host the cluster in" } -variable "credentials_path" { - description = "The path to the GCP credentials JSON file" -} - variable "cluster_name_suffix" { description = "A suffix to append to the default cluster name" default = "" @@ -60,3 +56,32 @@ variable "cloudrun" { description = "Boolean to enable / disable CloudRun" default = true } + +variable "node_metadata" { + description = "Specifies how node metadata is exposed to the workload running on the node" + default = "SECURE" + type = string +} + +variable "sandbox_enabled" { + type = bool + description = "(Beta) Enable GKE Sandbox (Do not forget to set `image_type` = `COS_CONTAINERD` and `node_version` = `1.12.7-gke.17` or later to use it)." + default = false +} + +variable "remove_default_node_pool" { + type = bool + description = "Remove default node pool while setting up the cluster" + default = false +} + +variable "node_pools" { + type = list(map(string)) + description = "List of maps containing node pools" + + default = [ + { + name = "default-node-pool" + }, + ] +} diff --git a/examples/simple_regional_private/README.md b/examples/simple_regional_private/README.md index 8175482731..917c097951 100644 --- a/examples/simple_regional_private/README.md +++ b/examples/simple_regional_private/README.md @@ -31,7 +31,7 @@ This example illustrates how to create a simple private cluster. | network | | | project\_id | | | region | | -| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | +| service\_account | The default service account used for running nodes. | | subnetwork | | | zones | List of zones in which the cluster resides | diff --git a/examples/simple_regional_private/outputs.tf b/examples/simple_regional_private/outputs.tf index 0d972dcd88..01a13147c2 100644 --- a/examples/simple_regional_private/outputs.tf +++ b/examples/simple_regional_private/outputs.tf @@ -29,7 +29,7 @@ output "ca_certificate" { } output "service_account" { - description = "The service account to default running nodes as if not overridden in `node_pools`." + description = "The default service account used for running nodes." value = module.gke.service_account } diff --git a/examples/simple_regional_private_beta/main.tf b/examples/simple_regional_private_beta/main.tf index 0ca1873d86..db6c8a8204 100644 --- a/examples/simple_regional_private_beta/main.tf +++ b/examples/simple_regional_private_beta/main.tf @@ -19,7 +19,7 @@ locals { } provider "google-beta" { - version = "~> 2.12.0" + version = "~> 2.18.0" credentials = file(var.credentials_path) region = var.region } @@ -62,4 +62,3 @@ module "gke" { data "google_client_config" "default" { } - diff --git a/examples/simple_regional_private_beta/outputs.tf b/examples/simple_regional_private_beta/outputs.tf index 0d972dcd88..01a13147c2 100644 --- a/examples/simple_regional_private_beta/outputs.tf +++ b/examples/simple_regional_private_beta/outputs.tf @@ -29,7 +29,7 @@ output "ca_certificate" { } output "service_account" { - description = "The service account to default running nodes as if not overridden in `node_pools`." + description = "The default service account used for running nodes." value = module.gke.service_account } diff --git a/examples/simple_regional_with_networking/README.md b/examples/simple_regional_with_networking/README.md new file mode 100644 index 0000000000..8ef0dad0ee --- /dev/null +++ b/examples/simple_regional_with_networking/README.md @@ -0,0 +1,46 @@ +# Simple Regional Cluster with Networking + +This example illustrates how to create a VPC and a simple cluster. + + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| cluster\_name | The name for the GKE cluster | string | `"gke-on-vpc-cluster"` | no | +| ip\_range\_pods\_name | The secondary ip range to use for pods | string | `"ip-range-pods"` | no | +| ip\_range\_services\_name | The secondary ip range to use for pods | string | `"ip-range-scv"` | no | +| network | The VPC network created to host the cluster in | string | `"gke-network"` | no | +| project\_id | The project ID to host the cluster in | string | n/a | yes | +| region | The region to host the cluster in | string | `"us-central1"` | no | +| subnetwork | The subnetwork created to host the cluster in | string | `"gke-subnet"` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| ca\_certificate | The cluster ca certificate (base64 encoded) | +| client\_token | The bearer token for auth | +| cluster\_name | Cluster name | +| ip\_range\_pods\_name | The secondary IP range used for pods | +| ip\_range\_services\_name | The secondary IP range used for services | +| kubernetes\_endpoint | The cluster endpoint | +| location | | +| master\_kubernetes\_version | The master Kubernetes version | +| network | | +| network\_name | The name of the VPC being created | +| project\_id | | +| region | | +| service\_account | The default service account used for running nodes. | +| subnet\_name | The name of the subnet being created | +| subnet\_secondary\_ranges | The secondary ranges associated with the subnet | +| subnetwork | | +| zones | List of zones in which the cluster resides | + + + +To provision this example, run the following from within this directory: +- `terraform init` to get the plugins +- `terraform plan` to see the infrastructure plan +- `terraform apply` to apply the infrastructure build +- `terraform destroy` to destroy the built infrastructure diff --git a/examples/simple_regional_with_networking/main.tf b/examples/simple_regional_with_networking/main.tf new file mode 100644 index 0000000000..7b39615114 --- /dev/null +++ b/examples/simple_regional_with_networking/main.tf @@ -0,0 +1,59 @@ +/** + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +module "gcp-network" { + source = "terraform-google-modules/network/google" + version = "~> 1.4.0" + project_id = var.project_id + network_name = var.network + + subnets = [ + { + subnet_name = var.subnetwork + subnet_ip = "10.0.0.0/17" + subnet_region = var.region + }, + ] + + secondary_ranges = { + "${var.subnetwork}" = [ + { + range_name = var.ip_range_pods_name + ip_cidr_range = "192.168.0.0/18" + }, + { + range_name = var.ip_range_services_name + ip_cidr_range = "192.168.64.0/18" + }, + ] + } +} + +module "gke" { + source = "../../" + project_id = var.project_id + name = var.cluster_name + regional = true + region = var.region + network = module.gcp-network.network_name + subnetwork = module.gcp-network.subnets_names[0] + ip_range_pods = var.ip_range_pods_name + ip_range_services = var.ip_range_services_name + create_service_account = true +} + +data "google_client_config" "default" { +} diff --git a/examples/simple_regional_with_networking/outputs.tf b/examples/simple_regional_with_networking/outputs.tf new file mode 100644 index 0000000000..bb255b54a2 --- /dev/null +++ b/examples/simple_regional_with_networking/outputs.tf @@ -0,0 +1,60 @@ +/** + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +output "kubernetes_endpoint" { + description = "The cluster endpoint" + sensitive = true + value = module.gke.endpoint +} + +output "client_token" { + description = "The bearer token for auth" + sensitive = true + value = base64encode(data.google_client_config.default.access_token) +} + +output "ca_certificate" { + description = "The cluster ca certificate (base64 encoded)" + value = module.gke.ca_certificate +} + +output "service_account" { + description = "The default service account used for running nodes." + value = module.gke.service_account +} + +output "cluster_name" { + description = "Cluster name" + value = module.gke.name +} + +output "network_name" { + description = "The name of the VPC being created" + value = module.gcp-network.network_name +} + +output "subnet_name" { + description = "The name of the subnet being created" + value = module.gcp-network.subnets_names +} + +output "subnet_secondary_ranges" { + description = "The secondary ranges associated with the subnet" + value = module.gcp-network.subnets_secondary_ranges +} + + + diff --git a/examples/simple_regional_with_networking/test_outputs.tf b/examples/simple_regional_with_networking/test_outputs.tf new file mode 100644 index 0000000000..a703679105 --- /dev/null +++ b/examples/simple_regional_with_networking/test_outputs.tf @@ -0,0 +1,58 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// These outputs are used to test the module with kitchen-terraform +// They do not need to be included in real-world uses of this module + +output "project_id" { + value = var.project_id +} + +output "region" { + value = module.gke.region +} + +output "network" { + value = var.network +} + +output "subnetwork" { + value = var.subnetwork +} + +output "location" { + value = module.gke.location +} + +output "ip_range_pods_name" { + description = "The secondary IP range used for pods" + value = var.ip_range_pods_name +} + +output "ip_range_services_name" { + description = "The secondary IP range used for services" + value = var.ip_range_services_name +} + +output "zones" { + description = "List of zones in which the cluster resides" + value = module.gke.zones +} + +output "master_kubernetes_version" { + description = "The master Kubernetes version" + value = module.gke.master_version +} diff --git a/examples/simple_regional_with_networking/variables.tf b/examples/simple_regional_with_networking/variables.tf new file mode 100644 index 0000000000..8e9c0688de --- /dev/null +++ b/examples/simple_regional_with_networking/variables.tf @@ -0,0 +1,50 @@ +/** + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +variable "project_id" { + description = "The project ID to host the cluster in" +} + +variable "cluster_name" { + description = "The name for the GKE cluster" + default = "gke-on-vpc-cluster" +} + +variable "region" { + description = "The region to host the cluster in" + default = "us-central1" +} + +variable "network" { + description = "The VPC network created to host the cluster in" + default = "gke-network" +} + +variable "subnetwork" { + description = "The subnetwork created to host the cluster in" + default = "gke-subnet" +} + +variable "ip_range_pods_name" { + description = "The secondary ip range to use for pods" + default = "ip-range-pods" +} + +variable "ip_range_services_name" { + description = "The secondary ip range to use for pods" + default = "ip-range-scv" +} + diff --git a/examples/simple_zonal/README.md b/examples/simple_zonal/README.md index 691f95c719..c086ea3a4b 100644 --- a/examples/simple_zonal/README.md +++ b/examples/simple_zonal/README.md @@ -31,7 +31,7 @@ This example illustrates how to create a simple cluster. | network | | | project\_id | | | region | | -| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | +| service\_account | The default service account used for running nodes. | | subnetwork | | | zones | List of zones in which the cluster resides | diff --git a/examples/simple_zonal/outputs.tf b/examples/simple_zonal/outputs.tf index 0d972dcd88..01a13147c2 100644 --- a/examples/simple_zonal/outputs.tf +++ b/examples/simple_zonal/outputs.tf @@ -29,7 +29,7 @@ output "ca_certificate" { } output "service_account" { - description = "The service account to default running nodes as if not overridden in `node_pools`." + description = "The default service account used for running nodes." value = module.gke.service_account } diff --git a/examples/simple_zonal_private/README.md b/examples/simple_zonal_private/README.md index e576800d72..83cb7c575b 100644 --- a/examples/simple_zonal_private/README.md +++ b/examples/simple_zonal_private/README.md @@ -32,7 +32,7 @@ This example illustrates how to create a simple private cluster. | network | | | project\_id | | | region | | -| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | +| service\_account | The default service account used for running nodes. | | subnetwork | | | zones | List of zones in which the cluster resides | diff --git a/examples/simple_zonal_private/outputs.tf b/examples/simple_zonal_private/outputs.tf index 0d972dcd88..01a13147c2 100644 --- a/examples/simple_zonal_private/outputs.tf +++ b/examples/simple_zonal_private/outputs.tf @@ -29,7 +29,7 @@ output "ca_certificate" { } output "service_account" { - description = "The service account to default running nodes as if not overridden in `node_pools`." + description = "The default service account used for running nodes." value = module.gke.service_account } diff --git a/examples/stub_domains/README.md b/examples/stub_domains/README.md index 126a1cd54c..bc4491b880 100644 --- a/examples/stub_domains/README.md +++ b/examples/stub_domains/README.md @@ -36,7 +36,7 @@ It will: | network | | | project\_id | | | region | | -| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | +| service\_account | The default service account used for running nodes. | | subnetwork | | | zones | List of zones in which the cluster resides | diff --git a/examples/stub_domains/outputs.tf b/examples/stub_domains/outputs.tf index 0d972dcd88..01a13147c2 100644 --- a/examples/stub_domains/outputs.tf +++ b/examples/stub_domains/outputs.tf @@ -29,7 +29,7 @@ output "ca_certificate" { } output "service_account" { - description = "The service account to default running nodes as if not overridden in `node_pools`." + description = "The default service account used for running nodes." value = module.gke.service_account } diff --git a/examples/stub_domains_private/README.md b/examples/stub_domains_private/README.md index ee4b89fa7f..205d5fdf76 100644 --- a/examples/stub_domains_private/README.md +++ b/examples/stub_domains_private/README.md @@ -38,7 +38,7 @@ It will: | network | | | project\_id | | | region | | -| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | +| service\_account | The default service account used for running nodes. | | subnetwork | | | zones | List of zones in which the cluster resides | diff --git a/examples/stub_domains_private/outputs.tf b/examples/stub_domains_private/outputs.tf index 0d972dcd88..01a13147c2 100644 --- a/examples/stub_domains_private/outputs.tf +++ b/examples/stub_domains_private/outputs.tf @@ -29,7 +29,7 @@ output "ca_certificate" { } output "service_account" { - description = "The service account to default running nodes as if not overridden in `node_pools`." + description = "The default service account used for running nodes." value = module.gke.service_account } diff --git a/examples/stub_domains_upstream_nameservers/outputs.tf b/examples/stub_domains_upstream_nameservers/outputs.tf index 0d972dcd88..01a13147c2 100644 --- a/examples/stub_domains_upstream_nameservers/outputs.tf +++ b/examples/stub_domains_upstream_nameservers/outputs.tf @@ -29,7 +29,7 @@ output "ca_certificate" { } output "service_account" { - description = "The service account to default running nodes as if not overridden in `node_pools`." + description = "The default service account used for running nodes." value = module.gke.service_account } diff --git a/examples/upstream_nameservers/outputs.tf b/examples/upstream_nameservers/outputs.tf index 0d972dcd88..01a13147c2 100644 --- a/examples/upstream_nameservers/outputs.tf +++ b/examples/upstream_nameservers/outputs.tf @@ -29,7 +29,7 @@ output "ca_certificate" { } output "service_account" { - description = "The service account to default running nodes as if not overridden in `node_pools`." + description = "The default service account used for running nodes." value = module.gke.service_account } diff --git a/examples/workload_metadata_config/main.tf b/examples/workload_metadata_config/main.tf index 11cae808d4..3d2254c2da 100644 --- a/examples/workload_metadata_config/main.tf +++ b/examples/workload_metadata_config/main.tf @@ -19,7 +19,7 @@ locals { } provider "google-beta" { - version = "~> 2.12.0" + version = "~> 2.18.0" region = var.region } @@ -40,8 +40,9 @@ module "gke" { subnetwork = var.subnetwork ip_range_pods = var.ip_range_pods ip_range_services = var.ip_range_services - create_service_account = false - service_account = var.compute_engine_service_account + create_service_account = true + grant_registry_access = true + registry_project_id = var.registry_project_id enable_private_endpoint = true enable_private_nodes = true master_ipv4_cidr_block = "172.16.0.0/28" diff --git a/examples/workload_metadata_config/outputs.tf b/examples/workload_metadata_config/outputs.tf index 0d972dcd88..01a13147c2 100644 --- a/examples/workload_metadata_config/outputs.tf +++ b/examples/workload_metadata_config/outputs.tf @@ -29,7 +29,7 @@ output "ca_certificate" { } output "service_account" { - description = "The service account to default running nodes as if not overridden in `node_pools`." + description = "The default service account used for running nodes." value = module.gke.service_account } diff --git a/examples/workload_metadata_config/variables.tf b/examples/workload_metadata_config/variables.tf index 040c78d2c4..eaa8c36e83 100644 --- a/examples/workload_metadata_config/variables.tf +++ b/examples/workload_metadata_config/variables.tf @@ -48,7 +48,6 @@ variable "ip_range_services" { description = "The secondary ip range to use for pods" } -variable "compute_engine_service_account" { - description = "Service account to associate to the nodes in the cluster" +variable "registry_project_id" { + description = "Project name for the GCR registry" } - diff --git a/helpers/generate_modules/generate_modules.py b/helpers/generate_modules/generate_modules.py index f6beb84832..b98b8bb69e 100755 --- a/helpers/generate_modules/generate_modules.py +++ b/helpers/generate_modules/generate_modules.py @@ -46,13 +46,27 @@ def template_options(self, base): 'private_cluster': False, }), Module("./modules/private-cluster", { + 'module_path': '//modules/private-cluster', 'private_cluster': True }), Module("./modules/beta-private-cluster", { + 'module_path': '//modules/beta-private-cluster', 'private_cluster': True, 'beta_cluster': True, }), + Module("./modules/private-cluster-update-variant", { + 'module_path': '//modules/private-cluster-update-variant', + 'private_cluster': True, + 'update_variant': True, + }), + Module("./modules/beta-private-cluster-update-variant", { + 'module_path': '//modules/beta-private-cluster-update-variant', + 'private_cluster': True, + 'update_variant': True, + 'beta_cluster': True, + }), Module("./modules/beta-public-cluster", { + 'module_path': '//modules/beta-public-cluster', 'private_cluster': False, 'beta_cluster': True, }), diff --git a/helpers/migrate.py b/helpers/migrate.py new file mode 100755 index 0000000000..8f2d71cfce --- /dev/null +++ b/helpers/migrate.py @@ -0,0 +1,318 @@ +#!/usr/bin/env python3 + +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import copy +import subprocess +import sys +import re + +MIGRATIONS = [ + { + "resource_type": "google_container_cluster", + "name": "zonal_primary", + "rename": "primary", + "module": "" + }, + { + "resource_type": "google_container_node_pool", + "name": "zonal_pools", + "rename": "pools", + "module": "" + }, + { + "resource_type": "null_resource", + "name": "wait_for_zonal_cluster", + "rename": "wait_for_cluster", + "module": "", + "plural": False + }, +] + + +class ModuleMigration: + """ + Migrate the resources from a flat project factory to match the new + module structure created by the G Suite refactor. + """ + + def __init__(self, source_module): + self.source_module = source_module + + def moves(self): + """ + Generate the set of old/new resource pairs that will be migrated + to the `destination` module. + """ + resources = self.targets() + moves = [] + for (old, migration) in resources: + new = copy.deepcopy(old) + new.module += migration["module"] + + # Update the copied resource with the "rename" value if it is set + if "rename" in migration: + new.name = migration["rename"] + + old.plural = migration.get("plural", True) + new.plural = migration.get("plural", True) + + pair = (old.path(), new.path()) + moves.append(pair) + return moves + + def targets(self): + """ + A list of resources that will be moved to the new module """ + to_move = [] + + for migration in MIGRATIONS: + resource_type = migration["resource_type"] + resource_name = migration["name"] + matching_resources = self.source_module.get_resources( + resource_type, + resource_name) + to_move += [(r, migration) for r in matching_resources] + + return to_move + + +class TerraformModule: + """ + A Terraform module with associated resources. + """ + + def __init__(self, name, resources): + """ + Create a new module and associate it with a list of resources. + """ + self.name = name + self.resources = resources + + def get_resources(self, resource_type=None, resource_name=None): + """ + Return a list of resources matching the given resource type and name. + """ + + ret = [] + for resource in self.resources: + matches_type = (resource_type is None or + resource_type == resource.resource_type) + + name_pattern = re.compile(r'%s(\[\d+\])?' % resource_name) + matches_name = (resource_name is None or + name_pattern.match(resource.name)) + + if matches_type and matches_name: + ret.append(resource) + + return ret + + def has_resource(self, resource_type=None, resource_name=None): + """ + Does this module contain a resource with the matching type and name? + """ + for resource in self.resources: + matches_type = (resource_type is None or + resource_type == resource.resource_type) + + matches_name = (resource_name is None or + resource_name in resource.name) + + if matches_type and matches_name: + return True + + return False + + def __repr__(self): + return "{}({!r}, {!r})".format( + self.__class__.__name__, + self.name, + [repr(resource) for resource in self.resources]) + + +class TerraformResource: + """ + A Terraform resource, defined by the the identifier of that resource. + """ + + @classmethod + def from_path(cls, path): + """ + Generate a new Terraform resource, based on the fully qualified + Terraform resource path. + """ + if re.match(r'\A[\w.\[\]-]+\Z', path) is None: + raise ValueError( + "Invalid Terraform resource path {!r}".format(path)) + + parts = path.split(".") + name = parts.pop() + resource_type = parts.pop() + module = ".".join(parts) + return cls(module, resource_type, name) + + def __init__(self, module, resource_type, name): + """ + Create a new TerraformResource from a pre-parsed path. + """ + self.module = module + self.resource_type = resource_type + + find_suffix = re.match(r'(^.+)\[(\d+)\]', name) + if find_suffix: + self.name = find_suffix.group(1) + self.index = find_suffix.group(2) + else: + self.name = name + self.index = -1 + + def path(self): + """ + Return the fully qualified resource path. + """ + parts = [self.module, self.resource_type, self.name] + if parts[0] == '': + del parts[0] + path = ".".join(parts) + if self.index != -1 and self.plural: + path = "{0}[{1}]".format(path, self.index) + return path + + def __repr__(self): + return "{}({!r}, {!r}, {!r})".format( + self.__class__.__name__, + self.module, + self.resource_type, + self.name) + + +def group_by_module(resources): + """ + Group a set of resources according to their containing module. + """ + + groups = {} + for resource in resources: + if resource.module in groups: + groups[resource.module].append(resource) + else: + groups[resource.module] = [resource] + + return [ + TerraformModule(name, contained) + for name, contained in groups.items() + ] + + +def read_state(statefile=None): + """ + Read the terraform state at the given path. + """ + argv = ["terraform", "state", "list"] + result = subprocess.run(argv, + capture_output=True, + check=True, + encoding='utf-8') + elements = result.stdout.split("\n") + elements.pop() + return elements + + +def state_changes_for_module(module, statefile=None): + """ + Compute the Terraform state changes (deletions and moves) for a single + module. + """ + commands = [] + + migration = ModuleMigration(module) + + for (old, new) in migration.moves(): + wrapper = '"{0}"' + argv = ["terraform", + "state", + "mv", + wrapper.format(old), + wrapper.format(new)] + commands.append(argv) + + return commands + + +def migrate(statefile=None, dryrun=False): + """ + Migrate the terraform state in `statefile` to match the post-refactor + resource structure. + """ + + # Generate a list of Terraform resource states from the output of + # `terraform state list` + resources = [ + TerraformResource.from_path(path) + for path in read_state(statefile) + ] + + # Group resources based on the module where they're defined. + modules = group_by_module(resources) + + # Filter our list of Terraform modules down to anything that looks like a + # zonal GKE module. We key this off the presence off of + # `google_container_cluster.zonal_primary` since that should almost always + # be unique to a GKE module. + modules_to_migrate = [ + module for module in modules + if module.has_resource("google_container_cluster", "zonal_primary") + ] + + print("---- Migrating the following modules:") + for module in modules_to_migrate: + print("-- " + module.name) + + # Collect a list of resources for each module + commands = [] + for module in modules_to_migrate: + commands += state_changes_for_module(module, statefile) + + print("---- Commands to run:") + for argv in commands: + if dryrun: + print(" ".join(argv)) + else: + argv = [arg.strip('"') for arg in argv] + subprocess.run(argv, check=True, encoding='utf-8') + + +def main(argv): + parser = argparser() + args = parser.parse_args(argv[1:]) + + # print("cp {} {}".format(args.oldstate, args.newstate)) + # shutil.copy(args.oldstate, args.newstate) + + migrate(dryrun=args.dryrun) + + +def argparser(): + parser = argparse.ArgumentParser(description='Migrate Terraform state') + parser.add_argument('--dryrun', action='store_true', + help='Print the `terraform state mv` commands instead ' + 'of running the commands.') + return parser + + +if __name__ == "__main__": + main(sys.argv) diff --git a/helpers/terraform_docs b/helpers/terraform_docs deleted file mode 100755 index c33230959b..0000000000 --- a/helpers/terraform_docs +++ /dev/null @@ -1,694 +0,0 @@ -#!/usr/bin/env bash - -set -e - -main() { - declare argv - argv=$(getopt -o a: --long args: -- "$@") || return - eval "set -- $argv" - - declare args - declare files - - for argv; do - case $argv in - (-a|--args) - shift - args="$1" - shift - ;; - (--) - shift - files="$@" - break - ;; - esac - done - - local hack_terraform_docs=$(terraform version | head -1 | grep -c 0.12) - - if [[ "$hack_terraform_docs" == "1" ]]; then - which awk 2>&1 >/dev/null || ( echo "awk is required for terraform-docs hack to work with Terraform 0.12"; exit 1) - - tmp_file_awk=$(mktemp "${TMPDIR:-/tmp}/terraform-docs-XXXXXXXXXX") - terraform_docs_awk "$tmp_file_awk" - terraform_docs "$tmp_file_awk" "$args" "$files" - rm -f "$tmp_file_awk" - else - terraform_docs "0" "$args" "$files" - fi - -} - -terraform_docs() { - readonly terraform_docs_awk_file="$1" - readonly args="$2" - readonly files="$3" - - declare -a paths - declare -a tfvars_files - - index=0 - - for file_with_path in $files; do - file_with_path="${file_with_path// /__REPLACED__SPACE__}" - - paths[index]=$(dirname "$file_with_path") - - if [[ "$file_with_path" == *".tfvars" ]]; then - tfvars_files+=("$file_with_path") - fi - - ((index+=1)) - done - - readonly tmp_file=$(mktemp) - readonly text_file="README.md" - - for path_uniq in $(echo "${paths[*]}" | tr ' ' '\n' | sort -u); do - path_uniq="${path_uniq//__REPLACED__SPACE__/ }" - - pushd "$path_uniq" > /dev/null - - if [[ ! -f "$text_file" ]]; then - popd > /dev/null - continue - fi - - if [[ "$terraform_docs_awk_file" == "0" ]]; then - terraform-docs $args md ./ > "$tmp_file" - else - # Can't append extension for mktemp, so renaming instead - tmp_file_docs=$(mktemp "${TMPDIR:-/tmp}/terraform-docs-XXXXXXXXXX") - mv "$tmp_file_docs" "$tmp_file_docs.tf" - tmp_file_docs_tf="$tmp_file_docs.tf" - - awk -f "$terraform_docs_awk_file" ./*.tf > "$tmp_file_docs_tf" - terraform-docs $args md "$tmp_file_docs_tf" > "$tmp_file" - rm -f "$tmp_file_docs_tf" - fi - - # Replace content between markers with the placeholder - https://stackoverflow.com/questions/1212799/how-do-i-extract-lines-between-two-line-delimiters-in-perl#1212834 - perl -i -ne 'if (/BEGINNING OF PRE-COMMIT-TERRAFORM DOCS HOOK/../END OF PRE-COMMIT-TERRAFORM DOCS HOOK/) { print $_ if /BEGINNING OF PRE-COMMIT-TERRAFORM DOCS HOOK/; print "I_WANT_TO_BE_REPLACED\n$_" if /END OF PRE-COMMIT-TERRAFORM DOCS HOOK/;} else { print $_ }' "$text_file" - - # Replace placeholder with the content of the file - perl -i -e 'open(F, "'"$tmp_file"'"); $f = join "", ; while(<>){if (/I_WANT_TO_BE_REPLACED/) {print $f} else {print $_};}' "$text_file" - - rm -f "$tmp_file" - - popd > /dev/null - done -} - -terraform_docs_awk() { - readonly output_file=$1 - - cat <<"EOF" > $output_file -# This script converts Terraform 0.12 variables/outputs to something suitable for `terraform-docs` -# As of terraform-docs v0.6.0, HCL2 is not supported. This script is a *dirty hack* to get around it. -# https://github.com/segmentio/terraform-docs/ -# https://github.com/segmentio/terraform-docs/issues/62 - -# Script was originally found here: https://github.com/cloudposse/build-harness/blob/master/bin/terraform-docs.awk - -{ - if ( $0 ~ /\{/ ) { - braceCnt++ - } - - if ( $0 ~ /\}/ ) { - braceCnt-- - } - - # [START] variable or output block started - if ($0 ~ /^[[:space:]]*(variable|output)[[:space:]][[:space:]]*"(.*?)"/) { - # Normalize the braceCnt (should be 1 now) - braceCnt = 1 - # [CLOSE] "default" block - if (blockDefCnt > 0) { - blockDefCnt = 0 - } - blockCnt++ - print $0 - } - - # [START] multiline default statement started - if (blockCnt > 0) { - if ($0 ~ /^[[:space:]][[:space:]]*(default)[[:space:]][[:space:]]*=/) { - if ($3 ~ "null") { - print " default = \"null\"" - } else { - print $0 - blockDefCnt++ - blockDefStart=1 - } - } - } - - # [PRINT] single line "description" - if (blockCnt > 0) { - if (blockDefCnt == 0) { - if ($0 ~ /^[[:space:]][[:space:]]*description[[:space:]][[:space:]]*=/) { - # [CLOSE] "default" block - if (blockDefCnt > 0) { - blockDefCnt = 0 - } - print $0 - } - } - } - - # [PRINT] single line "type" - if (blockCnt > 0) { - if ($0 ~ /^[[:space:]][[:space:]]*type[[:space:]][[:space:]]*=/ ) { - # [CLOSE] "default" block - if (blockDefCnt > 0) { - blockDefCnt = 0 - } - type=$3 - if (type ~ "object") { - print " type = \"object\"" - } else { - # legacy quoted types: "string", "list", and "map" - if ($3 ~ /^[[:space:]]*"(.*?)"[[:space:]]*$/) { - print " type = " $3 - } else { - print " type = \"" $3 "\"" - } - } - } - } - - # [CLOSE] variable/output block - if (blockCnt > 0) { - if (braceCnt == 0 && blockCnt > 0) { - blockCnt-- - print $0 - } - } - - # [PRINT] Multiline "default" statement - if (blockCnt > 0 && blockDefCnt > 0) { - if (blockDefStart == 1) { - blockDefStart = 0 - } else { - print $0 - } - } -} -EOF - -} - -getopt() { - # pure-getopt, a drop-in replacement for GNU getopt in pure Bash. - # version 1.4.3 - # - # Copyright 2012-2018 Aron Griffis - # - # Permission is hereby granted, free of charge, to any person obtaining - # a copy of this software and associated documentation files (the - # "Software"), to deal in the Software without restriction, including - # without limitation the rights to use, copy, modify, merge, publish, - # distribute, sublicense, and/or sell copies of the Software, and to - # permit persons to whom the Software is furnished to do so, subject to - # the following conditions: - # - # The above copyright notice and this permission notice shall be included - # in all copies or substantial portions of the Software. - # - # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - _getopt_main() { - # Returns one of the following statuses: - # 0 success - # 1 error parsing parameters - # 2 error in getopt invocation - # 3 internal error - # 4 reserved for -T - # - # For statuses 0 and 1, generates normalized and shell-quoted - # "options -- parameters" on stdout. - - declare parsed status - declare short long name flags - declare have_short=false - - # Synopsis from getopt man-page: - # - # getopt optstring parameters - # getopt [options] [--] optstring parameters - # getopt [options] -o|--options optstring [options] [--] parameters - # - # The first form can be normalized to the third form which - # _getopt_parse() understands. The second form can be recognized after - # first parse when $short hasn't been set. - - if [[ -n ${GETOPT_COMPATIBLE+isset} || $1 == [^-]* ]]; then - # Enable compatibility mode - flags=c$flags - # Normalize first to third synopsis form - set -- -o "$1" -- "${@:2}" - fi - - # First parse always uses flags=p since getopt always parses its own - # arguments effectively in this mode. - parsed=$(_getopt_parse getopt ahl:n:o:qQs:TuV \ - alternative,help,longoptions:,name:,options:,quiet,quiet-output,shell:,test,version \ - p "$@") - status=$? - if [[ $status != 0 ]]; then - if [[ $status == 1 ]]; then - echo "Try \`getopt --help' for more information." >&2 - # Since this is the first parse, convert status 1 to 2 - status=2 - fi - return $status - fi - eval "set -- $parsed" - - while [[ $# -gt 0 ]]; do - case $1 in - (-a|--alternative) - flags=a$flags ;; - - (-h|--help) - _getopt_help - return 2 # as does GNU getopt - ;; - - (-l|--longoptions) - long="$long${long:+,}$2" - shift ;; - - (-n|--name) - name=$2 - shift ;; - - (-o|--options) - short=$2 - have_short=true - shift ;; - - (-q|--quiet) - flags=q$flags ;; - - (-Q|--quiet-output) - flags=Q$flags ;; - - (-s|--shell) - case $2 in - (sh|bash) - flags=${flags//t/} ;; - (csh|tcsh) - flags=t$flags ;; - (*) - echo 'getopt: unknown shell after -s or --shell argument' >&2 - echo "Try \`getopt --help' for more information." >&2 - return 2 ;; - esac - shift ;; - - (-u|--unquoted) - flags=u$flags ;; - - (-T|--test) - return 4 ;; - - (-V|--version) - echo "pure-getopt 1.4.3" - return 0 ;; - - (--) - shift - break ;; - esac - - shift - done - - if ! $have_short; then - # $short was declared but never set, not even to an empty string. - # This implies the second form in the synopsis. - if [[ $# == 0 ]]; then - echo 'getopt: missing optstring argument' >&2 - echo "Try \`getopt --help' for more information." >&2 - return 2 - fi - short=$1 - have_short=true - shift - fi - - if [[ $short == -* ]]; then - # Leading dash means generate output in place rather than reordering, - # unless we're already in compatibility mode. - [[ $flags == *c* ]] || flags=i$flags - short=${short#?} - elif [[ $short == +* ]]; then - # Leading plus means POSIXLY_CORRECT, unless we're already in - # compatibility mode. - [[ $flags == *c* ]] || flags=p$flags - short=${short#?} - fi - - # This should fire if POSIXLY_CORRECT is in the environment, even if - # it's an empty string. That's the difference between :+ and + - flags=${POSIXLY_CORRECT+p}$flags - - _getopt_parse "${name:-getopt}" "$short" "$long" "$flags" "$@" - } - - _getopt_parse() { - # Inner getopt parser, used for both first parse and second parse. - # Returns 0 for success, 1 for error parsing, 3 for internal error. - # In the case of status 1, still generates stdout with whatever could - # be parsed. - # - # $flags is a string of characters with the following meanings: - # a - alternative parsing mode - # c - GETOPT_COMPATIBLE - # i - generate output in place rather than reordering - # p - POSIXLY_CORRECT - # q - disable error reporting - # Q - disable normal output - # t - quote for csh/tcsh - # u - unquoted output - - declare name="$1" short="$2" long="$3" flags="$4" - shift 4 - - # Split $long on commas, prepend double-dashes, strip colons; - # for use with _getopt_resolve_abbrev - declare -a longarr - _getopt_split longarr "$long" - longarr=( "${longarr[@]/#/--}" ) - longarr=( "${longarr[@]%:}" ) - longarr=( "${longarr[@]%:}" ) - - # Parse and collect options and parameters - declare -a opts params - declare o alt_recycled=false error=0 - - while [[ $# -gt 0 ]]; do - case $1 in - (--) - params=( "${params[@]}" "${@:2}" ) - break ;; - - (--*=*) - o=${1%%=*} - if ! o=$(_getopt_resolve_abbrev "$o" "${longarr[@]}"); then - error=1 - elif [[ ,"$long", == *,"${o#--}"::,* ]]; then - opts=( "${opts[@]}" "$o" "${1#*=}" ) - elif [[ ,"$long", == *,"${o#--}":,* ]]; then - opts=( "${opts[@]}" "$o" "${1#*=}" ) - elif [[ ,"$long", == *,"${o#--}",* ]]; then - if $alt_recycled; then o=${o#-}; fi - _getopt_err "$name: option '$o' doesn't allow an argument" - error=1 - else - echo "getopt: assertion failed (1)" >&2 - return 3 - fi - alt_recycled=false - ;; - - (--?*) - o=$1 - if ! o=$(_getopt_resolve_abbrev "$o" "${longarr[@]}"); then - error=1 - elif [[ ,"$long", == *,"${o#--}",* ]]; then - opts=( "${opts[@]}" "$o" ) - elif [[ ,"$long", == *,"${o#--}::",* ]]; then - opts=( "${opts[@]}" "$o" '' ) - elif [[ ,"$long", == *,"${o#--}:",* ]]; then - if [[ $# -ge 2 ]]; then - shift - opts=( "${opts[@]}" "$o" "$1" ) - else - if $alt_recycled; then o=${o#-}; fi - _getopt_err "$name: option '$o' requires an argument" - error=1 - fi - else - echo "getopt: assertion failed (2)" >&2 - return 3 - fi - alt_recycled=false - ;; - - (-*) - if [[ $flags == *a* ]]; then - # Alternative parsing mode! - # Try to handle as a long option if any of the following apply: - # 1. There's an equals sign in the mix -x=3 or -xy=3 - # 2. There's 2+ letters and an abbreviated long match -xy - # 3. There's a single letter and an exact long match - # 4. There's a single letter and no short match - o=${1::2} # temp for testing #4 - if [[ $1 == *=* || $1 == -?? || \ - ,$long, == *,"${1#-}"[:,]* || \ - ,$short, != *,"${o#-}"[:,]* ]]; then - o=$(_getopt_resolve_abbrev "${1%%=*}" "${longarr[@]}" 2>/dev/null) - case $? in - (0) - # Unambiguous match. Let the long options parser handle - # it, with a flag to get the right error message. - set -- "-$1" "${@:2}" - alt_recycled=true - continue ;; - (1) - # Ambiguous match, generate error and continue. - _getopt_resolve_abbrev "${1%%=*}" "${longarr[@]}" >/dev/null - error=1 - shift - continue ;; - (2) - # No match, fall through to single-character check. - true ;; - (*) - echo "getopt: assertion failed (3)" >&2 - return 3 ;; - esac - fi - fi - - o=${1::2} - if [[ "$short" == *"${o#-}"::* ]]; then - if [[ ${#1} -gt 2 ]]; then - opts=( "${opts[@]}" "$o" "${1:2}" ) - else - opts=( "${opts[@]}" "$o" '' ) - fi - elif [[ "$short" == *"${o#-}":* ]]; then - if [[ ${#1} -gt 2 ]]; then - opts=( "${opts[@]}" "$o" "${1:2}" ) - elif [[ $# -ge 2 ]]; then - shift - opts=( "${opts[@]}" "$o" "$1" ) - else - _getopt_err "$name: option requires an argument -- '${o#-}'" - error=1 - fi - elif [[ "$short" == *"${o#-}"* ]]; then - opts=( "${opts[@]}" "$o" ) - if [[ ${#1} -gt 2 ]]; then - set -- "$o" "-${1:2}" "${@:2}" - fi - else - if [[ $flags == *a* ]]; then - # Alternative parsing mode! Report on the entire failed - # option. GNU includes =value but we omit it for sanity with - # very long values. - _getopt_err "$name: unrecognized option '${1%%=*}'" - else - _getopt_err "$name: invalid option -- '${o#-}'" - if [[ ${#1} -gt 2 ]]; then - set -- "$o" "-${1:2}" "${@:2}" - fi - fi - error=1 - fi ;; - - (*) - # GNU getopt in-place mode (leading dash on short options) - # overrides POSIXLY_CORRECT - if [[ $flags == *i* ]]; then - opts=( "${opts[@]}" "$1" ) - elif [[ $flags == *p* ]]; then - params=( "${params[@]}" "$@" ) - break - else - params=( "${params[@]}" "$1" ) - fi - esac - - shift - done - - if [[ $flags == *Q* ]]; then - true # generate no output - else - echo -n ' ' - if [[ $flags == *[cu]* ]]; then - printf '%s -- %s' "${opts[*]}" "${params[*]}" - else - if [[ $flags == *t* ]]; then - _getopt_quote_csh "${opts[@]}" -- "${params[@]}" - else - _getopt_quote "${opts[@]}" -- "${params[@]}" - fi - fi - echo - fi - - return $error - } - - _getopt_err() { - if [[ $flags != *q* ]]; then - printf '%s\n' "$1" >&2 - fi - } - - _getopt_resolve_abbrev() { - # Resolves an abbrevation from a list of possibilities. - # If the abbreviation is unambiguous, echoes the expansion on stdout - # and returns 0. If the abbreviation is ambiguous, prints a message on - # stderr and returns 1. (For first parse this should convert to exit - # status 2.) If there is no match at all, prints a message on stderr - # and returns 2. - declare a q="$1" - declare -a matches - shift - for a; do - if [[ $q == "$a" ]]; then - # Exact match. Squash any other partial matches. - matches=( "$a" ) - break - elif [[ $flags == *a* && $q == -[^-]* && $a == -"$q" ]]; then - # Exact alternative match. Squash any other partial matches. - matches=( "$a" ) - break - elif [[ $a == "$q"* ]]; then - # Abbreviated match. - matches=( "${matches[@]}" "$a" ) - elif [[ $flags == *a* && $q == -[^-]* && $a == -"$q"* ]]; then - # Abbreviated alternative match. - matches=( "${matches[@]}" "${a#-}" ) - fi - done - case ${#matches[@]} in - (0) - [[ $flags == *q* ]] || \ - printf "$name: unrecognized option %s\\n" >&2 \ - "$(_getopt_quote "$q")" - return 2 ;; - (1) - printf '%s' "${matches[0]}"; return 0 ;; - (*) - [[ $flags == *q* ]] || \ - printf "$name: option %s is ambiguous; possibilities: %s\\n" >&2 \ - "$(_getopt_quote "$q")" "$(_getopt_quote "${matches[@]}")" - return 1 ;; - esac - } - - _getopt_split() { - # Splits $2 at commas to build array specified by $1 - declare IFS=, - eval "$1=( \$2 )" - } - - _getopt_quote() { - # Quotes arguments with single quotes, escaping inner single quotes - declare s space q=\' - for s; do - printf "$space'%s'" "${s//$q/$q\\$q$q}" - space=' ' - done - } - - _getopt_quote_csh() { - # Quotes arguments with single quotes, escaping inner single quotes, - # bangs, backslashes and newlines - declare s i c space - for s; do - echo -n "$space'" - for ((i=0; i<${#s}; i++)); do - c=${s:i:1} - case $c in - (\\|\'|!) - echo -n "'\\$c'" ;; - ($'\n') - echo -n "\\$c" ;; - (*) - echo -n "$c" ;; - esac - done - echo -n \' - space=' ' - done - } - - _getopt_help() { - cat <<-EOT >&2 - - Usage: - getopt - getopt [options] [--] - getopt [options] -o|--options [options] [--] - - Parse command options. - - Options: - -a, --alternative allow long options starting with single - - -l, --longoptions the long options to be recognized - -n, --name the name under which errors are reported - -o, --options the short options to be recognized - -q, --quiet disable error reporting by getopt(3) - -Q, --quiet-output no normal output - -s, --shell set quoting conventions to those of - -T, --test test for getopt(1) version - -u, --unquoted do not quote the output - - -h, --help display this help and exit - -V, --version output version information and exit - - For more details see getopt(1). - EOT - } - - _getopt_version_check() { - if [[ -z $BASH_VERSION ]]; then - echo "getopt: unknown version of bash might not be compatible" >&2 - return 1 - fi - - # This is a lexical comparison that should be sufficient forever. - if [[ $BASH_VERSION < 2.05b ]]; then - echo "getopt: bash $BASH_VERSION might not be compatible" >&2 - return 1 - fi - - return 0 - } - - _getopt_version_check - _getopt_main "$@" - declare status=$? - unset -f _getopt_main _getopt_err _getopt_parse _getopt_quote \ - _getopt_quote_csh _getopt_resolve_abbrev _getopt_split _getopt_help \ - _getopt_version_check - return $status -} - -[[ $BASH_SOURCE != "$0" ]] || main "$@" \ No newline at end of file diff --git a/helpers/terraform_validate b/helpers/terraform_validate deleted file mode 100755 index 0c284194ac..0000000000 --- a/helpers/terraform_validate +++ /dev/null @@ -1,23 +0,0 @@ -#! /bin/bash -# -# Copyright 2019 Google LLC. This software is provided as-is, without warranty -# or representation for any use or purpose. Your use of it is subject to your -# agreement with Google. -# -# This script initializes modules so that terraform validate as of 0.12 behaves -# as expected and does not issue errors such as: -# -# Error: Module not installed -# -# on test/fixtures/shared_vpc_no_subnets/main.tf line 37: -# 37: module "project-factory" { -# -# This module is not yet installed. Run "terraform init" to install all modules -# required by this configuration. - -# The first and only argument to this script is the directory containing *.tf -# files to validate. This directory is assumed to be a root module. - -cd "$1" -terraform init -backend=false -terraform validate \ No newline at end of file diff --git a/main.tf b/main.tf index b63d60f884..1090227fd8 100644 --- a/main.tf +++ b/main.tf @@ -23,7 +23,7 @@ data "google_compute_zones" "available" { provider = google project = var.project_id - region = var.region + region = local.region } resource "random_shuffle" "available_zones" { @@ -34,6 +34,7 @@ resource "random_shuffle" "available_zones" { locals { // location location = var.regional ? var.region : var.zones[0] + region = var.region == null ? join("-", slice(split("-", var.zones[0]), 0, 2)) : var.region // for regional cluster - use var.zones if provided, use available otherwise, for zonal cluster use var.zones with first element extracted node_locations = var.regional ? coalescelist(compact(var.zones), sort(random_shuffle.available_zones.result)) : slice(var.zones, 1, length(var.zones)) // kuberentes version @@ -44,6 +45,7 @@ locals { master_version = var.regional ? local.master_version_regional : local.master_version_zonal node_version = var.regional ? local.node_version_regional : local.node_version_zonal + custom_kube_dns_config = length(keys(var.stub_domains)) > 0 upstream_nameservers_config = length(var.upstream_nameservers) > 0 network_project_id = var.network_project_id != "" ? var.network_project_id : var.project_id diff --git a/modules/beta-private-cluster-update-variant/README.md b/modules/beta-private-cluster-update-variant/README.md new file mode 100644 index 0000000000..7a3be69fc4 --- /dev/null +++ b/modules/beta-private-cluster-update-variant/README.md @@ -0,0 +1,293 @@ +# Terraform Kubernetes Engine Module + +This module handles opinionated Google Cloud Platform Kubernetes Engine cluster creation and configuration with Node Pools, IP MASQ, Network Policy, etc. This particular submodule creates a [private cluster](https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters)Beta features are enabled in this submodule. +The resources/services/activations/deletions that this module will create/trigger are: +- Create a GKE cluster with the provided addons +- Create GKE Node Pool(s) with provided configuration and attach to cluster +- Replace the default kube-dns configmap if `stub_domains` are provided +- Activate network policy if `network_policy` is true +- Add `ip-masq-agent` configmap with provided `non_masquerade_cidrs` if `configure_ip_masq` is true + +Sub modules are provided from creating private clusters, beta private clusters, and beta public clusters as well. Beta sub modules allow for the use of various GKE beta features. See the modules directory for the various sub modules. + +**Note**: You must run Terraform from a VM on the same VPC as your cluster, otherwise there will be issues connecting to the GKE master. + + +## Compatibility + +This module is meant for use with Terraform 0.12. If you haven't +[upgraded][terraform-0.12-upgrade] and need a Terraform +0.11.x-compatible version of this module, the last released version +intended for Terraform 0.11.x is [3.0.0]. + +## Usage +There are multiple examples included in the [examples](./examples/) folder but simple usage is as follows: + +```hcl +module "gke" { + source = "terraform-google-modules/kubernetes-engine/google//modules/beta-private-cluster-update-variant" + project_id = "" + name = "gke-test-1" + region = "us-central1" + zones = ["us-central1-a", "us-central1-b", "us-central1-f"] + network = "vpc-01" + subnetwork = "us-central1-01" + ip_range_pods = "us-central1-01-gke-01-pods" + ip_range_services = "us-central1-01-gke-01-services" + http_load_balancing = false + horizontal_pod_autoscaling = true + kubernetes_dashboard = true + network_policy = true + enable_private_endpoint = true + enable_private_nodes = true + master_ipv4_cidr_block = "10.0.0.0/28" + istio = true + cloudrun = true + + node_pools = [ + { + name = "default-node-pool" + machine_type = "n1-standard-2" + min_count = 1 + max_count = 100 + disk_size_gb = 100 + disk_type = "pd-standard" + image_type = "COS" + auto_repair = true + auto_upgrade = true + service_account = "project-service-account@.iam.gserviceaccount.com" + preemptible = false + initial_node_count = 80 + }, + ] + + node_pools_oauth_scopes = { + all = [] + + default-node-pool = [ + "https://www.googleapis.com/auth/cloud-platform", + ] + } + + node_pools_labels = { + all = {} + + default-node-pool = { + default-node-pool = true + } + } + + node_pools_metadata = { + all = {} + + default-node-pool = { + node-pool-metadata-custom-value = "my-node-pool" + } + } + + node_pools_taints = { + all = [] + + default-node-pool = [ + { + key = "default-node-pool" + value = true + effect = "PREFER_NO_SCHEDULE" + }, + ] + } + + node_pools_tags = { + all = [] + + default-node-pool = [ + "default-node-pool", + ] + } +} +``` + + +Then perform the following commands on the root folder: + +- `terraform init` to get the plugins +- `terraform plan` to see the infrastructure plan +- `terraform apply` to apply the infrastructure build +- `terraform destroy` to destroy the built infrastructure + +## Upgrade to v3.0.0 + +v3.0.0 is a breaking release. Refer to the +[Upgrading to v3.0 guide][upgrading-to-v3.0] for details. + +## Upgrade to v2.0.0 + +v2.0.0 is a breaking release. Refer to the +[Upgrading to v2.0 guide][upgrading-to-v2.0] for details. + +## Upgrade to v1.0.0 + +Version 1.0.0 of this module introduces a breaking change: adding the `disable-legacy-endpoints` metadata field to all node pools. This metadata is required by GKE and [determines whether the `/0.1/` and `/v1beta1/` paths are available in the nodes' metadata server](https://cloud.google.com/kubernetes-engine/docs/how-to/protecting-cluster-metadata#disable-legacy-apis). If your applications do not require access to the node's metadata server, you can leave the default value of `true` provided by the module. If your applications require access to the metadata server, be sure to read the linked documentation to see if you need to set the value for this field to `false` to allow your applications access to the above metadata server paths. + +In either case, upgrading to module version `v1.0.0` will trigger a recreation of all node pools in the cluster. + + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| authenticator\_security\_group | The name of the RBAC security group for use with Google security groups in Kubernetes RBAC. Group name must be in format gke-security-groups@yourdomain.com | string | `"null"` | no | +| basic\_auth\_password | The password to be used with Basic Authentication. | string | `""` | no | +| basic\_auth\_username | The username to be used with Basic Authentication. An empty value will disable Basic Authentication, which is the recommended configuration. | string | `""` | no | +| cloudrun | (Beta) Enable CloudRun addon | string | `"false"` | no | +| cluster\_ipv4\_cidr | The IP address range of the kubernetes pods in this cluster. Default is an automatically assigned CIDR. | string | `""` | no | +| cluster\_resource\_labels | The GCE resource labels (a map of key/value pairs) to be applied to the cluster | map(string) | `` | no | +| configure\_ip\_masq | Enables the installation of ip masquerading, which is usually no longer required when using aliasied IP addresses. IP masquerading uses a kubectl call, so when you have a private cluster, you will need access to the API server. | string | `"false"` | no | +| create\_service\_account | Defines if service account specified to run nodes should be created. | bool | `"true"` | no | +| database\_encryption | Application-layer Secrets Encryption settings. The object format is {state = string, key_name = string}. Valid values of state are: "ENCRYPTED"; "DECRYPTED". key_name is the name of a CloudKMS key. | object | `` | no | +| default\_max\_pods\_per\_node | The maximum number of pods to schedule per node | string | `"110"` | no | +| deploy\_using\_private\_endpoint | (Beta) A toggle for Terraform and kubectl to connect to the master's internal IP address during deployment. | bool | `"false"` | no | +| description | The description of the cluster | string | `""` | no | +| disable\_legacy\_metadata\_endpoints | Disable the /0.1/ and /v1beta1/ metadata server endpoints on the node. Changing this value will cause all node pools to be recreated. | bool | `"true"` | no | +| enable\_binary\_authorization | Enable BinAuthZ Admission controller | string | `"false"` | no | +| enable\_intranode\_visibility | Whether Intra-node visibility is enabled for this cluster. This makes same node pod to pod traffic visible for VPC network | bool | `"false"` | no | +| enable\_private\_endpoint | (Beta) Whether the master's internal IP address is used as the cluster endpoint | bool | `"false"` | no | +| enable\_private\_nodes | (Beta) Whether nodes have internal IP addresses only | bool | `"false"` | no | +| enable\_vertical\_pod\_autoscaling | Vertical Pod Autoscaling automatically adjusts the resources of pods controlled by it | bool | `"false"` | no | +| grant\_registry\_access | Grants created cluster-specific service account storage.objectViewer role. | bool | `"false"` | no | +| horizontal\_pod\_autoscaling | Enable horizontal pod autoscaling addon | bool | `"true"` | no | +| http\_load\_balancing | Enable httpload balancer addon | bool | `"true"` | no | +| identity\_namespace | Workload Identity namespace | string | `""` | no | +| initial\_node\_count | The number of nodes to create in this cluster's default node pool. | number | `"0"` | no | +| ip\_masq\_link\_local | Whether to masquerade traffic to the link-local prefix (169.254.0.0/16). | bool | `"false"` | no | +| ip\_masq\_resync\_interval | The interval at which the agent attempts to sync its ConfigMap file from the disk. | string | `"60s"` | no | +| ip\_range\_pods | The _name_ of the secondary subnet ip range to use for pods | string | n/a | yes | +| ip\_range\_services | The _name_ of the secondary subnet range to use for services | string | n/a | yes | +| issue\_client\_certificate | Issues a client certificate to authenticate to the cluster endpoint. To maximize the security of your cluster, leave this option disabled. Client certificates don't automatically rotate and aren't easily revocable. WARNING: changing this after cluster creation is destructive! | bool | `"false"` | no | +| istio | (Beta) Enable Istio addon | string | `"false"` | no | +| kubernetes\_dashboard | Enable kubernetes dashboard addon | bool | `"false"` | no | +| kubernetes\_version | The Kubernetes version of the masters. If set to 'latest' it will pull latest available version in the selected region. | string | `"latest"` | no | +| logging\_service | The logging service that the cluster should write logs to. Available options include logging.googleapis.com, logging.googleapis.com/kubernetes (beta), and none | string | `"logging.googleapis.com"` | no | +| maintenance\_start\_time | Time window specified for daily maintenance operations in RFC3339 format | string | `"05:00"` | no | +| master\_authorized\_networks\_config | The desired configuration options for master authorized networks. The object format is {cidr_blocks = list(object({cidr_block = string, display_name = string}))}. Omit the nested cidr_blocks attribute to disallow external access (except the cluster node IPs, which GKE automatically whitelists). | object | `` | no | +| master\_ipv4\_cidr\_block | (Beta) The IP range in CIDR notation to use for the hosted master network | string | `"10.0.0.0/28"` | no | +| monitoring\_service | The monitoring service that the cluster should write metrics to. Automatically send metrics from pods in the cluster to the Google Cloud Monitoring API. VM metrics will be collected by Google Compute Engine regardless of this setting Available options include monitoring.googleapis.com, monitoring.googleapis.com/kubernetes (beta) and none | string | `"monitoring.googleapis.com"` | no | +| name | The name of the cluster (required) | string | n/a | yes | +| network | The VPC network to host the cluster in (required) | string | n/a | yes | +| network\_policy | Enable network policy addon | bool | `"false"` | no | +| network\_policy\_provider | The network policy provider. | string | `"CALICO"` | no | +| network\_project\_id | The project ID of the shared VPC's host (for shared vpc support) | string | `""` | no | +| node\_metadata | Specifies how node metadata is exposed to the workload running on the node | string | `"SECURE"` | no | +| node\_pools | List of maps containing node pools | list(map(string)) | `` | no | +| node\_pools\_labels | Map of maps containing node labels by node-pool name | map(map(string)) | `` | no | +| node\_pools\_metadata | Map of maps containing node metadata by node-pool name | map(map(string)) | `` | no | +| node\_pools\_oauth\_scopes | Map of lists containing node oauth scopes by node-pool name | map(list(string)) | `` | no | +| node\_pools\_tags | Map of lists containing node network tags by node-pool name | map(list(string)) | `` | no | +| node\_pools\_taints | Map of lists containing node taints by node-pool name | object | `` | no | +| node\_version | The Kubernetes version of the node pools. Defaults kubernetes_version (master) variable and can be overridden for individual node pools by setting the `version` key on them. Must be empyty or set the same as master at cluster creation. | string | `""` | no | +| non\_masquerade\_cidrs | List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading. | list(string) | `` | no | +| pod\_security\_policy\_config | enabled - Enable the PodSecurityPolicy controller for this cluster. If enabled, pods must be valid under a PodSecurityPolicy to be created. | list | `` | no | +| project\_id | The project ID to host the cluster in (required) | string | n/a | yes | +| region | The region to host the cluster in (optional if zonal cluster / required if regional) | string | `"null"` | no | +| regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | +| registry\_project\_id | Project holding the Google Container Registry. If empty, we use the cluster project. If grant_registry_access is true, storage.objectViewer role is assigned on this project. | string | `""` | no | +| release\_channel | (Beta) The release channel of this cluster. Accepted values are `UNSPECIFIED`, `RAPID`, `REGULAR` and `STABLE`. Defaults to `UNSPECIFIED`. | string | `"null"` | no | +| remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | +| resource\_usage\_export\_dataset\_id | The dataset id for which network egress metering for this cluster will be enabled. If enabled, a daemonset will be created in the cluster to meter network egress traffic. | string | `""` | no | +| sandbox\_enabled | (Beta) Enable GKE Sandbox (Do not forget to set `image_type` = `COS_CONTAINERD` and `node_version` = `1.12.7-gke.17` or later to use it). | bool | `"false"` | no | +| service\_account | The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created. | string | `""` | no | +| skip\_provisioners | Flag to skip all local-exec provisioners. It breaks `stub_domains` and `upstream_nameservers` variables functionality. | bool | `"false"` | no | +| stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | map(list(string)) | `` | no | +| subnetwork | The subnetwork to host the cluster in (required) | string | n/a | yes | +| upstream\_nameservers | If specified, the values replace the nameservers taken by default from the node’s /etc/resolv.conf | list | `` | no | +| zones | The zones to host the cluster in (optional if regional cluster / required if zonal) | list(string) | `` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| ca\_certificate | Cluster ca certificate (base64 encoded) | +| cloudrun\_enabled | Whether CloudRun enabled | +| endpoint | Cluster endpoint | +| horizontal\_pod\_autoscaling\_enabled | Whether horizontal pod autoscaling enabled | +| http\_load\_balancing\_enabled | Whether http load balancing enabled | +| intranode\_visibility\_enabled | Whether intra-node visibility is enabled | +| istio\_enabled | Whether Istio is enabled | +| kubernetes\_dashboard\_enabled | Whether kubernetes dashboard enabled | +| location | Cluster location (region if regional cluster, zone if zonal cluster) | +| logging\_service | Logging service used | +| master\_authorized\_networks\_config | Networks from which access to master is permitted | +| master\_version | Current master kubernetes version | +| min\_master\_version | Minimum master kubernetes version | +| monitoring\_service | Monitoring service used | +| name | Cluster name | +| network\_policy\_enabled | Whether network policy enabled | +| node\_pools\_names | List of node pools names | +| node\_pools\_versions | List of node pools versions | +| pod\_security\_policy\_enabled | Whether pod security policy is enabled | +| region | Cluster region | +| release\_channel | The release channel of this cluster | +| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | +| type | Cluster type (regional / zonal) | +| vertical\_pod\_autoscaling\_enabled | Whether veritical pod autoscaling is enabled | +| zones | List of zones in which the cluster resides | + + + +## Requirements + +Before this module can be used on a project, you must ensure that the following pre-requisites are fulfilled: + +1. Terraform and kubectl are [installed](#software-dependencies) on the machine where Terraform is executed. +2. The Service Account you execute the module with has the right [permissions](#configure-a-service-account). +3. The Compute Engine and Kubernetes Engine APIs are [active](#enable-apis) on the project you will launch the cluster in. +4. If you are using a Shared VPC, the APIs must also be activated on the Shared VPC host project and your service account needs the proper permissions there. + +The [project factory](https://github.com/terraform-google-modules/terraform-google-project-factory) can be used to provision projects with the correct APIs active and the necessary Shared VPC connections. + +### Software Dependencies +#### Kubectl +- [kubectl](https://github.com/kubernetes/kubernetes/releases) 1.9.x +#### Terraform and Plugins +- [Terraform](https://www.terraform.io/downloads.html) 0.12 +- [Terraform Provider for GCP Beta][terraform-provider-google-beta] v2.9 + +### Configure a Service Account +In order to execute this module you must have a Service Account with the +following project roles: +- roles/compute.viewer +- roles/container.clusterAdmin +- roles/container.developer +- roles/iam.serviceAccountAdmin +- roles/iam.serviceAccountUser +- roles/resourcemanager.projectIamAdmin (only required if `service_account` is set to `create`) + +Additionally, if `service_account` is set to `create` and `grant_registry_access` is requested, the service account requires the following role on the `registry_project_id` project: +- roles/resourcemanager.projectIamAdmin + +### Enable APIs +In order to operate with the Service Account you must activate the following APIs on the project where the Service Account was created: + +- Compute Engine API - compute.googleapis.com +- Kubernetes Engine API - container.googleapis.com + +## File structure +The project has the following folders and files: + +- /: root folder +- /examples: Examples for using this module and sub module. +- /helpers: Helper scripts. +- /scripts: Scripts for specific tasks on module (see Infrastructure section on this file). +- /test: Folders with files for testing the module (see Testing section on this file). +- /main.tf: `main` file for the public module, contains all the resources to create. +- /variables.tf: Variables for the public cluster module. +- /output.tf: The outputs for the public cluster module. +- /README.MD: This file. +- /modules: Private and beta sub modules. + + +[upgrading-to-v2.0]: ../../docs/upgrading_to_v2.0.md +[upgrading-to-v3.0]: ../../docs/upgrading_to_v3.0.md +[terraform-provider-google-beta]: https://github.com/terraform-providers/terraform-provider-google-beta +[3.0.0]: https://registry.terraform.io/modules/terraform-google-modules/kubernetes-engine/google/3.0.0 +[terraform-0.12-upgrade]: https://www.terraform.io/upgrade-guides/0-12.html diff --git a/modules/beta-private-cluster-update-variant/auth.tf b/modules/beta-private-cluster-update-variant/auth.tf new file mode 100644 index 0000000000..c177eee5a7 --- /dev/null +++ b/modules/beta-private-cluster-update-variant/auth.tf @@ -0,0 +1,34 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file was automatically generated from a template in ./autogen + +/****************************************** + Retrieve authentication token + *****************************************/ +data "google_client_config" "default" { + provider = google-beta +} + +/****************************************** + Configure provider + *****************************************/ +provider "kubernetes" { + load_config_file = false + host = "https://${local.cluster_endpoint}" + token = data.google_client_config.default.access_token + cluster_ca_certificate = base64decode(local.cluster_ca_certificate) +} diff --git a/modules/beta-private-cluster-update-variant/cluster.tf b/modules/beta-private-cluster-update-variant/cluster.tf new file mode 100644 index 0000000000..366280d7b8 --- /dev/null +++ b/modules/beta-private-cluster-update-variant/cluster.tf @@ -0,0 +1,419 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file was automatically generated from a template in ./autogen + +/****************************************** + Create Container Cluster + *****************************************/ +resource "google_container_cluster" "primary" { + provider = google-beta + + name = var.name + description = var.description + project = var.project_id + resource_labels = var.cluster_resource_labels + + location = local.location + node_locations = local.node_locations + cluster_ipv4_cidr = var.cluster_ipv4_cidr + network = data.google_compute_network.gke_network.self_link + + dynamic "network_policy" { + for_each = local.cluster_network_policy + + content { + enabled = network_policy.value.enabled + provider = network_policy.value.provider + } + } + + dynamic "release_channel" { + for_each = local.release_channel + + content { + channel = release_channel.value.channel + } + } + + subnetwork = data.google_compute_subnetwork.gke_subnetwork.self_link + min_master_version = local.master_version + + logging_service = var.logging_service + monitoring_service = var.monitoring_service + + enable_binary_authorization = var.enable_binary_authorization + enable_intranode_visibility = var.enable_intranode_visibility + default_max_pods_per_node = var.default_max_pods_per_node + + vertical_pod_autoscaling { + enabled = var.enable_vertical_pod_autoscaling + } + + dynamic "pod_security_policy_config" { + for_each = var.pod_security_policy_config + content { + enabled = pod_security_policy_config.value.enabled + } + } + + dynamic "resource_usage_export_config" { + for_each = var.resource_usage_export_dataset_id != "" ? [var.resource_usage_export_dataset_id] : [] + content { + enable_network_egress_metering = true + bigquery_destination { + dataset_id = resource_usage_export_config.value + } + } + } + dynamic "master_authorized_networks_config" { + for_each = var.master_authorized_networks_config + content { + dynamic "cidr_blocks" { + for_each = master_authorized_networks_config.value.cidr_blocks + content { + cidr_block = lookup(cidr_blocks.value, "cidr_block", "") + display_name = lookup(cidr_blocks.value, "display_name", "") + } + } + } + } + + master_auth { + username = var.basic_auth_username + password = var.basic_auth_password + + client_certificate_config { + issue_client_certificate = var.issue_client_certificate + } + } + + addons_config { + http_load_balancing { + disabled = ! var.http_load_balancing + } + + horizontal_pod_autoscaling { + disabled = ! var.horizontal_pod_autoscaling + } + + kubernetes_dashboard { + disabled = ! var.kubernetes_dashboard + } + + network_policy_config { + disabled = ! var.network_policy + } + + istio_config { + disabled = ! var.istio + } + + dynamic "cloudrun_config" { + for_each = local.cluster_cloudrun_config + + content { + disabled = cloudrun_config.value.disabled + } + } + } + + ip_allocation_policy { + cluster_secondary_range_name = var.ip_range_pods + services_secondary_range_name = var.ip_range_services + } + + maintenance_policy { + daily_maintenance_window { + start_time = var.maintenance_start_time + } + } + + lifecycle { + ignore_changes = [node_pool, initial_node_count] + } + + timeouts { + create = "30m" + update = "30m" + delete = "30m" + } + + node_pool { + name = "default-pool" + initial_node_count = var.initial_node_count + + node_config { + service_account = lookup(var.node_pools[0], "service_account", local.service_account) + + dynamic "workload_metadata_config" { + for_each = local.cluster_node_metadata_config + + content { + node_metadata = workload_metadata_config.value.node_metadata + } + } + } + } + + private_cluster_config { + enable_private_endpoint = var.enable_private_endpoint + enable_private_nodes = var.enable_private_nodes + master_ipv4_cidr_block = var.master_ipv4_cidr_block + } + + remove_default_node_pool = var.remove_default_node_pool + + dynamic "database_encryption" { + for_each = var.database_encryption + + content { + key_name = database_encryption.value.key_name + state = database_encryption.value.state + } + } + + dynamic "workload_identity_config" { + for_each = local.cluster_workload_identity_config + + content { + identity_namespace = workload_identity_config.value.identity_namespace + } + } + + dynamic "authenticator_groups_config" { + for_each = local.cluster_authenticator_security_group + content { + security_group = authenticator_groups_config.value.security_group + } + } +} + +/****************************************** + Create Container Cluster node pools + *****************************************/ +locals { + force_node_pool_recreation_resources = [ + "disk_size_gb", + "disk_type", + "accelerator_count", + "accelerator_type", + "local_ssd_count", + "machine_type", + "preemptible", + "service_account", + ] +} + +# This keepers list is based on the terraform google provider schemaNodeConfig +# resources where "ForceNew" is "true". schemaNodeConfig can be found in node_config.go at +# https://github.com/terraform-providers/terraform-provider-google/blob/master/google/node_config.go#L22 +resource "random_id" "name" { + count = length(var.node_pools) + byte_length = 2 + prefix = format("%s-", lookup(var.node_pools[count.index], "name")) + keepers = merge( + zipmap( + local.force_node_pool_recreation_resources, + [for keeper in local.force_node_pool_recreation_resources : lookup(var.node_pools[count.index], keeper, "")] + ), + { + labels = join(",", + sort( + concat( + keys(var.node_pools_labels["all"]), + values(var.node_pools_labels["all"]), + keys(var.node_pools_labels[var.node_pools[count.index]["name"]]), + values(var.node_pools_labels[var.node_pools[count.index]["name"]]) + ) + ) + ) + }, + { + metadata = join(",", + sort( + concat( + keys(var.node_pools_metadata["all"]), + values(var.node_pools_metadata["all"]), + keys(var.node_pools_metadata[var.node_pools[count.index]["name"]]), + values(var.node_pools_metadata[var.node_pools[count.index]["name"]]) + ) + ) + ) + }, + { + oauth_scopes = join(",", + sort( + concat( + var.node_pools_oauth_scopes["all"], + var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]] + ) + ) + ) + }, + { + tags = join(",", + sort( + concat( + var.node_pools_tags["all"], + var.node_pools_tags[var.node_pools[count.index]["name"]] + ) + ) + ) + } + ) +} + +resource "google_container_node_pool" "pools" { + provider = google-beta + count = length(var.node_pools) + name = random_id.name.*.hex[count.index] + project = var.project_id + location = local.location + cluster = google_container_cluster.primary.name + version = lookup(var.node_pools[count.index], "auto_upgrade", false) ? "" : lookup( + var.node_pools[count.index], + "version", + local.node_version, + ) + initial_node_count = lookup( + var.node_pools[count.index], + "initial_node_count", + lookup(var.node_pools[count.index], "min_count", 1), + ) + max_pods_per_node = lookup(var.node_pools[count.index], "max_pods_per_node", null) + + node_count = lookup(var.node_pools[count.index], "autoscaling", true) ? null : lookup(var.node_pools[count.index], "min_count", 1) + + dynamic "autoscaling" { + for_each = lookup(var.node_pools[count.index], "autoscaling", true) ? [var.node_pools[count.index]] : [] + content { + min_node_count = lookup(autoscaling.value, "min_count", 1) + max_node_count = lookup(autoscaling.value, "max_count", 100) + } + } + + management { + auto_repair = lookup(var.node_pools[count.index], "auto_repair", true) + auto_upgrade = lookup(var.node_pools[count.index], "auto_upgrade", local.default_auto_upgrade) + } + + node_config { + image_type = lookup(var.node_pools[count.index], "image_type", "COS") + machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2") + labels = merge( + lookup(lookup(var.node_pools_labels, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(var.node_pools_labels, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index]["name"] } : {}, + var.node_pools_labels["all"], + var.node_pools_labels[var.node_pools[count.index]["name"]], + ) + metadata = merge( + lookup(lookup(var.node_pools_metadata, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(var.node_pools_metadata, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index]["name"] } : {}, + var.node_pools_metadata["all"], + var.node_pools_metadata[var.node_pools[count.index]["name"]], + { + "disable-legacy-endpoints" = var.disable_legacy_metadata_endpoints + }, + ) + dynamic "taint" { + for_each = concat( + var.node_pools_taints["all"], + var.node_pools_taints[var.node_pools[count.index]["name"]], + ) + content { + effect = taint.value.effect + key = taint.value.key + value = taint.value.value + } + } + tags = concat( + lookup(var.node_pools_tags, "default_values", [true, true])[0] ? ["gke-${var.name}"] : [], + lookup(var.node_pools_tags, "default_values", [true, true])[1] ? ["gke-${var.name}-${var.node_pools[count.index]["name"]}"] : [], + var.node_pools_tags["all"], + var.node_pools_tags[var.node_pools[count.index]["name"]], + ) + + disk_size_gb = lookup(var.node_pools[count.index], "disk_size_gb", 100) + disk_type = lookup(var.node_pools[count.index], "disk_type", "pd-standard") + service_account = lookup( + var.node_pools[count.index], + "service_account", + local.service_account, + ) + preemptible = lookup(var.node_pools[count.index], "preemptible", false) + + oauth_scopes = concat( + var.node_pools_oauth_scopes["all"], + var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]], + ) + + guest_accelerator = [ + for guest_accelerator in lookup(var.node_pools[count.index], "accelerator_count", 0) > 0 ? [{ + type = lookup(var.node_pools[count.index], "accelerator_type", "") + count = lookup(var.node_pools[count.index], "accelerator_count", 0) + }] : [] : { + type = guest_accelerator["type"] + count = guest_accelerator["count"] + } + ] + + dynamic "workload_metadata_config" { + for_each = local.cluster_node_metadata_config + + content { + node_metadata = workload_metadata_config.value.node_metadata + } + } + + dynamic "sandbox_config" { + for_each = local.cluster_sandbox_enabled + + content { + sandbox_type = sandbox_config.value + } + } + } + + lifecycle { + ignore_changes = [initial_node_count] + create_before_destroy = true + } + + timeouts { + create = "30m" + update = "30m" + delete = "30m" + } +} + +resource "null_resource" "wait_for_cluster" { + count = var.skip_provisioners ? 0 : 1 + + provisioner "local-exec" { + command = "${path.module}/scripts/wait-for-cluster.sh ${var.project_id} ${var.name}" + } + + provisioner "local-exec" { + when = destroy + command = "${path.module}/scripts/wait-for-cluster.sh ${var.project_id} ${var.name}" + } + + depends_on = [ + google_container_cluster.primary, + google_container_node_pool.pools, + ] +} diff --git a/modules/beta-private-cluster-update-variant/dns.tf b/modules/beta-private-cluster-update-variant/dns.tf new file mode 100644 index 0000000000..8a581ff68e --- /dev/null +++ b/modules/beta-private-cluster-update-variant/dns.tf @@ -0,0 +1,120 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file was automatically generated from a template in ./autogen + +/****************************************** + Delete default kube-dns configmap + *****************************************/ +resource "null_resource" "delete_default_kube_dns_configmap" { + count = (local.custom_kube_dns_config || local.upstream_nameservers_config) && ! var.skip_provisioners ? 1 : 0 + + provisioner "local-exec" { + command = "${path.module}/scripts/kubectl_wrapper.sh https://${local.cluster_endpoint} ${data.google_client_config.default.access_token} ${local.cluster_ca_certificate} ${path.module}/scripts/delete-default-resource.sh kube-system configmap kube-dns" + } + + depends_on = [ + data.google_client_config.default, + google_container_cluster.primary, + google_container_node_pool.pools, + ] +} + +/****************************************** + Create kube-dns confimap + *****************************************/ +resource "kubernetes_config_map" "kube-dns" { + count = local.custom_kube_dns_config && ! local.upstream_nameservers_config ? 1 : 0 + + metadata { + name = "kube-dns" + namespace = "kube-system" + + labels = { + maintained_by = "terraform" + } + } + + data = { + stubDomains = < 0 + upstream_nameservers_config = length(var.upstream_nameservers) > 0 + network_project_id = var.network_project_id != "" ? var.network_project_id : var.project_id + zone_count = length(var.zones) + cluster_type = var.regional ? "regional" : "zonal" + // auto upgrade by defaults only for regional cluster as long it has multiple masters versus zonal clusters have only have a single master so upgrades are more dangerous. + default_auto_upgrade = var.regional ? true : false + + cluster_network_policy = var.network_policy ? [{ + enabled = true + provider = var.network_policy_provider + }] : [{ + enabled = false + provider = null + }] + + cluster_cloudrun_config = var.cloudrun ? [{ disabled = false }] : [] + + cluster_node_metadata_config = var.node_metadata == "UNSPECIFIED" ? [] : [{ + node_metadata = var.node_metadata + }] + + cluster_authenticator_security_group = var.authenticator_security_group == null ? [] : [{ + security_group = var.authenticator_security_group + }] + + cluster_sandbox_enabled = var.sandbox_enabled ? ["gvisor"] : [] + + + cluster_output_name = google_container_cluster.primary.name + cluster_output_location = google_container_cluster.primary.location + cluster_output_region = google_container_cluster.primary.region + cluster_output_regional_zones = google_container_cluster.primary.node_locations + cluster_output_zonal_zones = local.zone_count > 1 ? slice(var.zones, 1, local.zone_count) : [] + cluster_output_zones = local.cluster_output_regional_zones + + cluster_output_endpoint = var.deploy_using_private_endpoint ? google_container_cluster.primary.private_cluster_config.0.private_endpoint : google_container_cluster.primary.endpoint + + cluster_output_master_auth = concat(google_container_cluster.primary.*.master_auth, []) + cluster_output_master_version = google_container_cluster.primary.master_version + cluster_output_min_master_version = google_container_cluster.primary.min_master_version + cluster_output_logging_service = google_container_cluster.primary.logging_service + cluster_output_monitoring_service = google_container_cluster.primary.monitoring_service + cluster_output_network_policy_enabled = google_container_cluster.primary.addons_config.0.network_policy_config.0.disabled + cluster_output_http_load_balancing_enabled = google_container_cluster.primary.addons_config.0.http_load_balancing.0.disabled + cluster_output_horizontal_pod_autoscaling_enabled = google_container_cluster.primary.addons_config.0.horizontal_pod_autoscaling.0.disabled + cluster_output_kubernetes_dashboard_enabled = google_container_cluster.primary.addons_config.0.kubernetes_dashboard.0.disabled + + # BETA features + cluster_output_istio_disabled = google_container_cluster.primary.addons_config.0.istio_config != null && length(google_container_cluster.primary.addons_config.0.istio_config) == 1 ? google_container_cluster.primary.addons_config.0.istio_config.0.disabled : false + cluster_output_pod_security_policy_enabled = google_container_cluster.primary.pod_security_policy_config != null && length(google_container_cluster.primary.pod_security_policy_config) == 1 ? google_container_cluster.primary.pod_security_policy_config.0.enabled : false + cluster_output_intranode_visbility_enabled = google_container_cluster.primary.enable_intranode_visibility + cluster_output_vertical_pod_autoscaling_enabled = google_container_cluster.primary.vertical_pod_autoscaling != null && length(google_container_cluster.primary.vertical_pod_autoscaling) == 1 ? google_container_cluster.primary.vertical_pod_autoscaling.0.enabled : false + + # /BETA features + + cluster_output_node_pools_names = concat(google_container_node_pool.pools.*.name, [""]) + cluster_output_node_pools_versions = concat(google_container_node_pool.pools.*.version, [""]) + + cluster_master_auth_list_layer1 = local.cluster_output_master_auth + cluster_master_auth_list_layer2 = local.cluster_master_auth_list_layer1[0] + cluster_master_auth_map = local.cluster_master_auth_list_layer2[0] + # cluster locals + cluster_name = local.cluster_output_name + cluster_location = local.cluster_output_location + cluster_region = local.cluster_output_region + cluster_zones = sort(local.cluster_output_zones) + cluster_endpoint = local.cluster_output_endpoint + cluster_ca_certificate = local.cluster_master_auth_map["cluster_ca_certificate"] + cluster_master_version = local.cluster_output_master_version + cluster_min_master_version = local.cluster_output_min_master_version + cluster_logging_service = local.cluster_output_logging_service + cluster_monitoring_service = local.cluster_output_monitoring_service + cluster_node_pools_names = local.cluster_output_node_pools_names + cluster_node_pools_versions = local.cluster_output_node_pools_versions + cluster_network_policy_enabled = ! local.cluster_output_network_policy_enabled + cluster_http_load_balancing_enabled = ! local.cluster_output_http_load_balancing_enabled + cluster_horizontal_pod_autoscaling_enabled = ! local.cluster_output_horizontal_pod_autoscaling_enabled + cluster_kubernetes_dashboard_enabled = ! local.cluster_output_kubernetes_dashboard_enabled + # BETA features + cluster_istio_enabled = ! local.cluster_output_istio_disabled + cluster_cloudrun_enabled = var.cloudrun + cluster_pod_security_policy_enabled = local.cluster_output_pod_security_policy_enabled + cluster_intranode_visibility_enabled = local.cluster_output_intranode_visbility_enabled + cluster_vertical_pod_autoscaling_enabled = local.cluster_output_vertical_pod_autoscaling_enabled + cluster_workload_identity_config = var.identity_namespace == "" ? [] : [{ + identity_namespace = var.identity_namespace + }] + # /BETA features +} + +/****************************************** + Get available container engine versions + *****************************************/ +data "google_container_engine_versions" "region" { + location = local.location + project = var.project_id +} + +data "google_container_engine_versions" "zone" { + // Work around to prevent a lack of zone declaration from causing regional cluster creation from erroring out due to error + // + // data.google_container_engine_versions.zone: Cannot determine zone: set in this resource, or set provider-level zone. + // + location = local.zone_count == 0 ? data.google_compute_zones.available.names[0] : var.zones[0] + project = var.project_id +} diff --git a/modules/beta-private-cluster-update-variant/masq.tf b/modules/beta-private-cluster-update-variant/masq.tf new file mode 100644 index 0000000000..b6e411fc42 --- /dev/null +++ b/modules/beta-private-cluster-update-variant/masq.tf @@ -0,0 +1,48 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file was automatically generated from a template in ./autogen + +/****************************************** + Create ip-masq-agent confimap + *****************************************/ +resource "kubernetes_config_map" "ip-masq-agent" { + count = var.configure_ip_masq ? 1 : 0 + + metadata { + name = "ip-masq-agent" + namespace = "kube-system" + + labels = { + maintained_by = "terraform" + } + } + + data = { + config = <&2 echo "3 arguments expected. Exiting." + exit 1 +fi + +RESOURCE_NAMESPACE=$1 +RESOURCE_TYPE=$2 +RESOURCE_NAME=$3 + +RESOURCE_LIST=$(kubectl -n "${RESOURCE_NAMESPACE}" get "${RESOURCE_TYPE}" || exit 1) + +# Delete requested resource +if [[ $RESOURCE_LIST = *"${RESOURCE_NAME}"* ]]; then + RESOURCE_MAINTAINED_LABEL=$(kubectl -n "${RESOURCE_NAMESPACE}" get "${RESOURCE_TYPE}" -o json "${RESOURCE_NAME}" | jq -r '.metadata.labels."maintained_by"') + if [[ $RESOURCE_MAINTAINED_LABEL = "terraform" ]]; then + echo "Terraform maintained ${RESOURCE_NAME} ${RESOURCE_TYPE} appears to have already been created in ${RESOURCE_NAMESPACE} namespace" + else + echo "Deleting default ${RESOURCE_NAME} ${RESOURCE_TYPE} found in ${RESOURCE_NAMESPACE} namespace" + kubectl -n "${RESOURCE_NAMESPACE}" delete "${RESOURCE_TYPE}" "${RESOURCE_NAME}" + fi +else + echo "No default ${RESOURCE_NAME} ${RESOURCE_TYPE} found in ${RESOURCE_NAMESPACE} namespace" +fi diff --git a/modules/beta-private-cluster-update-variant/scripts/kubectl_wrapper.sh b/modules/beta-private-cluster-update-variant/scripts/kubectl_wrapper.sh new file mode 100755 index 0000000000..e92300bcb5 --- /dev/null +++ b/modules/beta-private-cluster-update-variant/scripts/kubectl_wrapper.sh @@ -0,0 +1,53 @@ +#!/bin/bash +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +set -e + +if [ "$#" -lt 3 ]; then + >&2 echo "Not all expected arguments set." + exit 1 +fi + +HOST=$1 +TOKEN=$2 +CA_CERTIFICATE=$3 + +shift 3 + +RANDOM_ID="${RANDOM}_${RANDOM}" +export TMPDIR="/tmp/kubectl_wrapper_${RANDOM_ID}" + +function cleanup { + rm -rf "${TMPDIR}" +} +trap cleanup EXIT + +mkdir "${TMPDIR}" + +export KUBECONFIG="${TMPDIR}/config" + +# shellcheck disable=SC1117 +base64 --help | grep "\--decode" && B64_ARG="--decode" || B64_ARG="-d" +echo "${CA_CERTIFICATE}" | base64 ${B64_ARG} > "${TMPDIR}/ca_certificate" + +kubectl config set-cluster kubectl-wrapper --server="${HOST}" --certificate-authority="${TMPDIR}/ca_certificate" --embed-certs=true 1>/dev/null +rm -f "${TMPDIR}/ca_certificate" +kubectl config set-context kubectl-wrapper --cluster=kubectl-wrapper --user=kubectl-wrapper --namespace=default 1>/dev/null +kubectl config set-credentials kubectl-wrapper --token="${TOKEN}" 1>/dev/null +kubectl config use-context kubectl-wrapper 1>/dev/null +kubectl version 1>/dev/null + +"$@" diff --git a/modules/beta-private-cluster-update-variant/scripts/wait-for-cluster.sh b/modules/beta-private-cluster-update-variant/scripts/wait-for-cluster.sh new file mode 100755 index 0000000000..b7019eace1 --- /dev/null +++ b/modules/beta-private-cluster-update-variant/scripts/wait-for-cluster.sh @@ -0,0 +1,38 @@ +#!/bin/bash +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e + +# shellcheck disable=SC2034 +if [ -n "${GOOGLE_APPLICATION_CREDENTIALS}" ]; then + export CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE="${GOOGLE_APPLICATION_CREDENTIALS}" +fi + +PROJECT=$1 +CLUSTER_NAME=$2 +gcloud_command="gcloud container clusters list --project=$PROJECT --format=json" +jq_query=".[] | select(.name==\"$CLUSTER_NAME\") | .status" + +echo "Waiting for cluster $2 in project $1 to reconcile..." + +current_status=$($gcloud_command | jq -r "$jq_query") + +while [[ "${current_status}" == "RECONCILING" ]]; do + printf "." + sleep 5 + current_status=$($gcloud_command | jq -r "$jq_query") +done + +echo "Cluster is ready!" diff --git a/modules/beta-private-cluster-update-variant/variables.tf b/modules/beta-private-cluster-update-variant/variables.tf new file mode 100644 index 0000000000..07461351c1 --- /dev/null +++ b/modules/beta-private-cluster-update-variant/variables.tf @@ -0,0 +1,424 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file was automatically generated from a template in ./autogen + +variable "project_id" { + type = string + description = "The project ID to host the cluster in (required)" +} + +variable "name" { + type = string + description = "The name of the cluster (required)" +} + +variable "description" { + type = string + description = "The description of the cluster" + default = "" +} + +variable "regional" { + type = bool + description = "Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!)" + default = true +} + +variable "region" { + type = string + description = "The region to host the cluster in (optional if zonal cluster / required if regional)" + default = null +} + +variable "zones" { + type = list(string) + description = "The zones to host the cluster in (optional if regional cluster / required if zonal)" + default = [] +} + +variable "network" { + type = string + description = "The VPC network to host the cluster in (required)" +} + +variable "network_project_id" { + type = string + description = "The project ID of the shared VPC's host (for shared vpc support)" + default = "" +} + +variable "subnetwork" { + type = string + description = "The subnetwork to host the cluster in (required)" +} + +variable "kubernetes_version" { + type = string + description = "The Kubernetes version of the masters. If set to 'latest' it will pull latest available version in the selected region." + default = "latest" +} + +variable "node_version" { + type = string + description = "The Kubernetes version of the node pools. Defaults kubernetes_version (master) variable and can be overridden for individual node pools by setting the `version` key on them. Must be empyty or set the same as master at cluster creation." + default = "" +} + +variable "master_authorized_networks_config" { + type = list(object({ cidr_blocks = list(object({ cidr_block = string, display_name = string })) })) + description = "The desired configuration options for master authorized networks. The object format is {cidr_blocks = list(object({cidr_block = string, display_name = string}))}. Omit the nested cidr_blocks attribute to disallow external access (except the cluster node IPs, which GKE automatically whitelists)." + default = [] +} + +variable "horizontal_pod_autoscaling" { + type = bool + description = "Enable horizontal pod autoscaling addon" + default = true +} + +variable "http_load_balancing" { + type = bool + description = "Enable httpload balancer addon" + default = true +} + +variable "kubernetes_dashboard" { + type = bool + description = "Enable kubernetes dashboard addon" + default = false +} + +variable "network_policy" { + type = bool + description = "Enable network policy addon" + default = false +} + +variable "network_policy_provider" { + type = string + description = "The network policy provider." + default = "CALICO" +} + +variable "maintenance_start_time" { + type = string + description = "Time window specified for daily maintenance operations in RFC3339 format" + default = "05:00" +} + +variable "ip_range_pods" { + type = string + description = "The _name_ of the secondary subnet ip range to use for pods" +} + +variable "ip_range_services" { + type = string + description = "The _name_ of the secondary subnet range to use for services" +} + +variable "initial_node_count" { + type = number + description = "The number of nodes to create in this cluster's default node pool." + default = 0 +} + +variable "remove_default_node_pool" { + type = bool + description = "Remove default node pool while setting up the cluster" + default = false +} + +variable "disable_legacy_metadata_endpoints" { + type = bool + description = "Disable the /0.1/ and /v1beta1/ metadata server endpoints on the node. Changing this value will cause all node pools to be recreated." + default = true +} + +variable "node_pools" { + type = list(map(string)) + description = "List of maps containing node pools" + + default = [ + { + name = "default-node-pool" + }, + ] +} + +variable "node_pools_labels" { + type = map(map(string)) + description = "Map of maps containing node labels by node-pool name" + + default = { + all = {} + default-node-pool = {} + } +} + +variable "node_pools_metadata" { + type = map(map(string)) + description = "Map of maps containing node metadata by node-pool name" + + default = { + all = {} + default-node-pool = {} + } +} + +variable "node_pools_taints" { + type = map(list(object({ key = string, value = string, effect = string }))) + description = "Map of lists containing node taints by node-pool name" + + default = { + all = [] + default-node-pool = [] + } +} + +variable "node_pools_tags" { + type = map(list(string)) + description = "Map of lists containing node network tags by node-pool name" + + default = { + all = [] + default-node-pool = [] + } +} + +variable "node_pools_oauth_scopes" { + type = map(list(string)) + description = "Map of lists containing node oauth scopes by node-pool name" + + default = { + all = ["https://www.googleapis.com/auth/cloud-platform"] + default-node-pool = [] + } +} + +variable "stub_domains" { + type = map(list(string)) + description = "Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server" + default = {} +} + +variable "upstream_nameservers" { + type = "list" + description = "If specified, the values replace the nameservers taken by default from the node’s /etc/resolv.conf" + default = [] +} + +variable "non_masquerade_cidrs" { + type = list(string) + description = "List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading." + default = ["10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16"] +} + +variable "ip_masq_resync_interval" { + type = string + description = "The interval at which the agent attempts to sync its ConfigMap file from the disk." + default = "60s" +} + +variable "ip_masq_link_local" { + type = bool + description = "Whether to masquerade traffic to the link-local prefix (169.254.0.0/16)." + default = false +} + +variable "configure_ip_masq" { + description = "Enables the installation of ip masquerading, which is usually no longer required when using aliasied IP addresses. IP masquerading uses a kubectl call, so when you have a private cluster, you will need access to the API server." + default = false +} + +variable "logging_service" { + type = string + description = "The logging service that the cluster should write logs to. Available options include logging.googleapis.com, logging.googleapis.com/kubernetes (beta), and none" + default = "logging.googleapis.com" +} + +variable "monitoring_service" { + type = string + description = "The monitoring service that the cluster should write metrics to. Automatically send metrics from pods in the cluster to the Google Cloud Monitoring API. VM metrics will be collected by Google Compute Engine regardless of this setting Available options include monitoring.googleapis.com, monitoring.googleapis.com/kubernetes (beta) and none" + default = "monitoring.googleapis.com" +} + +variable "create_service_account" { + type = bool + description = "Defines if service account specified to run nodes should be created." + default = true +} + +variable "grant_registry_access" { + type = bool + description = "Grants created cluster-specific service account storage.objectViewer role." + default = false +} + +variable "registry_project_id" { + type = string + description = "Project holding the Google Container Registry. If empty, we use the cluster project. If grant_registry_access is true, storage.objectViewer role is assigned on this project." + default = "" +} + +variable "service_account" { + type = string + description = "The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created." + default = "" +} + +variable "basic_auth_username" { + type = string + description = "The username to be used with Basic Authentication. An empty value will disable Basic Authentication, which is the recommended configuration." + default = "" +} + +variable "basic_auth_password" { + type = string + description = "The password to be used with Basic Authentication." + default = "" +} + +variable "issue_client_certificate" { + type = bool + description = "Issues a client certificate to authenticate to the cluster endpoint. To maximize the security of your cluster, leave this option disabled. Client certificates don't automatically rotate and aren't easily revocable. WARNING: changing this after cluster creation is destructive!" + default = false +} + +variable "cluster_ipv4_cidr" { + default = "" + description = "The IP address range of the kubernetes pods in this cluster. Default is an automatically assigned CIDR." +} + +variable "cluster_resource_labels" { + type = map(string) + description = "The GCE resource labels (a map of key/value pairs) to be applied to the cluster" + default = {} +} + +variable "skip_provisioners" { + type = bool + description = "Flag to skip all local-exec provisioners. It breaks `stub_domains` and `upstream_nameservers` variables functionality." + default = false +} + +variable "deploy_using_private_endpoint" { + type = bool + description = "(Beta) A toggle for Terraform and kubectl to connect to the master's internal IP address during deployment." + default = false +} + +variable "enable_private_endpoint" { + type = bool + description = "(Beta) Whether the master's internal IP address is used as the cluster endpoint" + default = false +} + +variable "enable_private_nodes" { + type = bool + description = "(Beta) Whether nodes have internal IP addresses only" + default = false +} + +variable "master_ipv4_cidr_block" { + type = string + description = "(Beta) The IP range in CIDR notation to use for the hosted master network" + default = "10.0.0.0/28" +} + +variable "istio" { + description = "(Beta) Enable Istio addon" + default = false +} + +variable "default_max_pods_per_node" { + description = "The maximum number of pods to schedule per node" + default = 110 +} + +variable "database_encryption" { + description = "Application-layer Secrets Encryption settings. The object format is {state = string, key_name = string}. Valid values of state are: \"ENCRYPTED\"; \"DECRYPTED\". key_name is the name of a CloudKMS key." + type = list(object({ state = string, key_name = string })) + default = [{ + state = "DECRYPTED" + key_name = "" + }] +} + +variable "cloudrun" { + description = "(Beta) Enable CloudRun addon" + default = false +} + +variable "enable_binary_authorization" { + description = "Enable BinAuthZ Admission controller" + default = false +} + +variable "pod_security_policy_config" { + description = "enabled - Enable the PodSecurityPolicy controller for this cluster. If enabled, pods must be valid under a PodSecurityPolicy to be created." + default = [{ + "enabled" = false + }] +} + +variable "resource_usage_export_dataset_id" { + type = string + description = "The dataset id for which network egress metering for this cluster will be enabled. If enabled, a daemonset will be created in the cluster to meter network egress traffic." + default = "" +} + +variable "node_metadata" { + description = "Specifies how node metadata is exposed to the workload running on the node" + default = "SECURE" + type = string +} + +variable "sandbox_enabled" { + type = bool + description = "(Beta) Enable GKE Sandbox (Do not forget to set `image_type` = `COS_CONTAINERD` and `node_version` = `1.12.7-gke.17` or later to use it)." + default = false +} + +variable "enable_intranode_visibility" { + type = bool + description = "Whether Intra-node visibility is enabled for this cluster. This makes same node pod to pod traffic visible for VPC network" + default = false +} + +variable "enable_vertical_pod_autoscaling" { + type = bool + description = "Vertical Pod Autoscaling automatically adjusts the resources of pods controlled by it" + default = false +} + +variable "identity_namespace" { + description = "Workload Identity namespace" + type = string + default = "" +} + +variable "authenticator_security_group" { + type = string + description = "The name of the RBAC security group for use with Google security groups in Kubernetes RBAC. Group name must be in format gke-security-groups@yourdomain.com" + default = null +} + +variable "release_channel" { + type = string + description = "(Beta) The release channel of this cluster. Accepted values are `UNSPECIFIED`, `RAPID`, `REGULAR` and `STABLE`. Defaults to `UNSPECIFIED`." + default = null +} diff --git a/test/boilerplate/boilerplate.tf.txt b/modules/beta-private-cluster-update-variant/versions.tf similarity index 92% rename from test/boilerplate/boilerplate.tf.txt rename to modules/beta-private-cluster-update-variant/versions.tf index cfccff84ca..832ec1df39 100644 --- a/test/boilerplate/boilerplate.tf.txt +++ b/modules/beta-private-cluster-update-variant/versions.tf @@ -13,3 +13,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + +terraform { + required_version = ">= 0.12" +} diff --git a/modules/beta-private-cluster/README.md b/modules/beta-private-cluster/README.md index 74bd64c022..daf98949a1 100644 --- a/modules/beta-private-cluster/README.md +++ b/modules/beta-private-cluster/README.md @@ -25,7 +25,7 @@ There are multiple examples included in the [examples](./examples/) folder but s ```hcl module "gke" { - source = "terraform-google-modules/kubernetes-engine/google//modules/private-cluster" + source = "terraform-google-modules/kubernetes-engine/google//modules/beta-private-cluster" project_id = "" name = "gke-test-1" region = "us-central1" @@ -177,7 +177,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | network\_policy | Enable network policy addon | bool | `"false"` | no | | network\_policy\_provider | The network policy provider. | string | `"CALICO"` | no | | network\_project\_id | The project ID of the shared VPC's host (for shared vpc support) | string | `""` | no | -| node\_metadata | Specifies how node metadata is exposed to the workload running on the node | string | `"UNSPECIFIED"` | no | +| node\_metadata | Specifies how node metadata is exposed to the workload running on the node | string | `"SECURE"` | no | | node\_pools | List of maps containing node pools | list(map(string)) | `` | no | | node\_pools\_labels | Map of maps containing node labels by node-pool name | map(map(string)) | `` | no | | node\_pools\_metadata | Map of maps containing node metadata by node-pool name | map(map(string)) | `` | no | @@ -188,12 +188,15 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | non\_masquerade\_cidrs | List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading. | list(string) | `` | no | | pod\_security\_policy\_config | enabled - Enable the PodSecurityPolicy controller for this cluster. If enabled, pods must be valid under a PodSecurityPolicy to be created. | list | `` | no | | project\_id | The project ID to host the cluster in (required) | string | n/a | yes | -| region | The region to host the cluster in (required) | string | n/a | yes | +| region | The region to host the cluster in (optional if zonal cluster / required if regional) | string | `"null"` | no | | regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | +| registry\_project\_id | Project holding the Google Container Registry. If empty, we use the cluster project. If grant_registry_access is true, storage.objectViewer role is assigned on this project. | string | `""` | no | +| release\_channel | (Beta) The release channel of this cluster. Accepted values are `UNSPECIFIED`, `RAPID`, `REGULAR` and `STABLE`. Defaults to `UNSPECIFIED`. | string | `"null"` | no | | remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | | resource\_usage\_export\_dataset\_id | The dataset id for which network egress metering for this cluster will be enabled. If enabled, a daemonset will be created in the cluster to meter network egress traffic. | string | `""` | no | | sandbox\_enabled | (Beta) Enable GKE Sandbox (Do not forget to set `image_type` = `COS_CONTAINERD` and `node_version` = `1.12.7-gke.17` or later to use it). | bool | `"false"` | no | | service\_account | The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created. | string | `""` | no | +| skip\_provisioners | Flag to skip all local-exec provisioners. It breaks `stub_domains` and `upstream_nameservers` variables functionality. | bool | `"false"` | no | | stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | map(list(string)) | `` | no | | subnetwork | The subnetwork to host the cluster in (required) | string | n/a | yes | | upstream\_nameservers | If specified, the values replace the nameservers taken by default from the node’s /etc/resolv.conf | list | `` | no | @@ -223,6 +226,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | node\_pools\_versions | List of node pools versions | | pod\_security\_policy\_enabled | Whether pod security policy is enabled | | region | Cluster region | +| release\_channel | The release channel of this cluster | | service\_account | The service account to default running nodes as if not overridden in `node_pools`. | | type | Cluster type (regional / zonal) | | vertical\_pod\_autoscaling\_enabled | Whether veritical pod autoscaling is enabled | @@ -258,6 +262,9 @@ following project roles: - roles/iam.serviceAccountUser - roles/resourcemanager.projectIamAdmin (only required if `service_account` is set to `create`) +Additionally, if `service_account` is set to `create` and `grant_registry_access` is requested, the service account requires the following role on the `registry_project_id` project: +- roles/resourcemanager.projectIamAdmin + ### Enable APIs In order to operate with the Service Account you must activate the following APIs on the project where the Service Account was created: @@ -278,141 +285,6 @@ The project has the following folders and files: - /README.MD: This file. - /modules: Private and beta sub modules. -## Templating - -To more cleanly handle cases where desired functionality would require complex duplication of Terraform resources (i.e. [PR 51](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/51)), this repository is largely generated from the [`autogen`](/autogen) directory. - -The root module is generated by running `make generate`. Changes to this repository should be made in the [`autogen`](/autogen) directory where appropriate. - -Note: The correct sequence to update the repo using autogen functionality is to run -`make generate && make generate_docs`. This will create the various Terraform files, and then -generate the Terraform documentation using `terraform-docs`. - -## Testing - -### Requirements -- [bundler](https://github.com/bundler/bundler) -- [gcloud](https://cloud.google.com/sdk/install) -- [terraform-docs](https://github.com/segmentio/terraform-docs/releases) 0.6.0 - -### Autogeneration of documentation from .tf files -Run -``` -make generate_docs -``` - -### Integration test - -Integration tests are run though [test-kitchen](https://github.com/test-kitchen/test-kitchen), [kitchen-terraform](https://github.com/newcontext-oss/kitchen-terraform), and [InSpec](https://github.com/inspec/inspec). - -Six test-kitchen instances are defined: - -- `deploy-service` -- `node-pool` -- `shared-vpc` -- `simple-regional` -- `simple-zonal` -- `stub-domains` - -The test-kitchen instances in `test/fixtures/` wrap identically-named examples in the `examples/` directory. - -#### Setup - -1. Configure the [test fixtures](#test-configuration) -2. Download a Service Account key with the necessary permissions and put it in the module's root directory with the name `credentials.json`. - - Requires the [permissions to run the module](#configure-a-service-account) - - Requires `roles/compute.networkAdmin` to create the test suite's networks - - Requires `roles/resourcemanager.projectIamAdmin` since service account creation is tested -3. Build the Docker container for testing: - - ``` - make docker_build_kitchen_terraform - ``` -4. Run the testing container in interactive mode: - - ``` - make docker_run - ``` - - The module root directory will be loaded into the Docker container at `/cft/workdir/`. -5. Run kitchen-terraform to test the infrastructure: - - 1. `kitchen create` creates Terraform state and downloads modules, if applicable. - 2. `kitchen converge` creates the underlying resources. Run `kitchen converge ` to create resources for a specific test case. - 3. Run `kitchen converge` again. This is necessary due to an oddity in how `networkPolicyConfig` is handled by the upstream API. (See [#72](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/issues/72) for details). - 4. `kitchen verify` tests the created infrastructure. Run `kitchen verify ` to run a specific test case. - 5. `kitchen destroy` tears down the underlying resources created by `kitchen converge`. Run `kitchen destroy ` to tear down resources for a specific test case. - -Alternatively, you can simply run `make test_integration_docker` to run all the test steps non-interactively. - -If you wish to parallelize running the test suites, it is also possible to offload the work onto Concourse to run each test suite for you using the command `make test_integration_concourse`. The `.concourse` directory will be created and contain all of the logs from the running test suites. - -When running tests locally, you will need to use your own test project environment. You can configure your environment by setting all of the following variables: - -``` -export COMPUTE_ENGINE_SERVICE_ACCOUNT="" -export PROJECT_ID="" -export REGION="" -export ZONES='[""]' -export SERVICE_ACCOUNT_JSON="$(cat "")" -export CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE="" -export GOOGLE_APPLICATION_CREDENTIALS="" -``` - -#### Test configuration - -Each test-kitchen instance is configured with a `variables.tfvars` file in the test fixture directory, e.g. `test/fixtures/node_pool/terraform.tfvars`. -For convenience, since all of the variables are project-specific, these files have been symlinked to `test/fixtures/shared/terraform.tfvars`. -Similarly, each test fixture has a `variables.tf` to define these variables, and an `outputs.tf` to facilitate providing necessary information for `inspec` to locate and query against created resources. - -Each test-kitchen instance creates a GCP Network and Subnetwork fixture to house resources, and may create any other necessary fixture data as needed. - -### Autogeneration of documentation from .tf files -Run -``` -make generate_docs -``` - -### Linting -The makefile in this project will lint or sometimes just format any shell, -Python, golang, Terraform, or Dockerfiles. The linters will only be run if -the makefile finds files with the appropriate file extension. - -All of the linter checks are in the default make target, so you just have to -run - -``` -make -s -``` - -The -s is for 'silent'. Successful output looks like this - -``` -Running shellcheck -Running flake8 -Running go fmt and go vet -Running terraform validate -Running hadolint on Dockerfiles -Checking for required files -Testing the validity of the header check -.. ----------------------------------------------------------------------- -Ran 2 tests in 0.026s - -OK -Checking file headers -The following lines have trailing whitespace -``` - -The linters -are as follows: -* Shell - shellcheck. Can be found in homebrew -* Python - flake8. Can be installed with 'pip install flake8' -* Golang - gofmt. gofmt comes with the standard golang installation. golang -is a compiled language so there is no standard linter. -* Terraform - terraform has a built-in linter in the 'terraform validate' -command. -* Dockerfiles - hadolint. Can be found in homebrew [upgrading-to-v2.0]: ../../docs/upgrading_to_v2.0.md [upgrading-to-v3.0]: ../../docs/upgrading_to_v3.0.md diff --git a/modules/beta-private-cluster/cluster.tf b/modules/beta-private-cluster/cluster.tf index 901ae674eb..d0cc6d7c20 100644 --- a/modules/beta-private-cluster/cluster.tf +++ b/modules/beta-private-cluster/cluster.tf @@ -41,6 +41,14 @@ resource "google_container_cluster" "primary" { } } + dynamic "release_channel" { + for_each = local.release_channel + + content { + channel = release_channel.value.channel + } + } + subnetwork = data.google_compute_subnetwork.gke_subnetwork.self_link min_master_version = local.master_version @@ -135,7 +143,7 @@ resource "google_container_cluster" "primary" { } lifecycle { - ignore_changes = [node_pool] + ignore_changes = [node_pool, initial_node_count] } timeouts { @@ -158,14 +166,6 @@ resource "google_container_cluster" "primary" { node_metadata = workload_metadata_config.value.node_metadata } } - - dynamic "sandbox_config" { - for_each = local.cluster_sandbox_enabled - - content { - sandbox_type = sandbox_config.value - } - } } } @@ -243,22 +243,14 @@ resource "google_container_node_pool" "pools" { image_type = lookup(var.node_pools[count.index], "image_type", "COS") machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2") labels = merge( - { - "cluster_name" = var.name - }, - { - "node_pool" = var.node_pools[count.index]["name"] - }, + lookup(lookup(var.node_pools_labels, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(var.node_pools_labels, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index]["name"] } : {}, var.node_pools_labels["all"], var.node_pools_labels[var.node_pools[count.index]["name"]], ) metadata = merge( - { - "cluster_name" = var.name - }, - { - "node_pool" = var.node_pools[count.index]["name"] - }, + lookup(lookup(var.node_pools_metadata, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(var.node_pools_metadata, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index]["name"] } : {}, var.node_pools_metadata["all"], var.node_pools_metadata[var.node_pools[count.index]["name"]], { @@ -277,8 +269,8 @@ resource "google_container_node_pool" "pools" { } } tags = concat( - ["gke-${var.name}"], - ["gke-${var.name}-${var.node_pools[count.index]["name"]}"], + lookup(var.node_pools_tags, "default_values", [true, true])[0] ? ["gke-${var.name}"] : [], + lookup(var.node_pools_tags, "default_values", [true, true])[1] ? ["gke-${var.name}-${var.node_pools[count.index]["name"]}"] : [], var.node_pools_tags["all"], var.node_pools_tags[var.node_pools[count.index]["name"]], ) @@ -314,6 +306,14 @@ resource "google_container_node_pool" "pools" { node_metadata = workload_metadata_config.value.node_metadata } } + + dynamic "sandbox_config" { + for_each = local.cluster_sandbox_enabled + + content { + sandbox_type = sandbox_config.value + } + } } lifecycle { @@ -328,6 +328,7 @@ resource "google_container_node_pool" "pools" { } resource "null_resource" "wait_for_cluster" { + count = var.skip_provisioners ? 0 : 1 provisioner "local-exec" { command = "${path.module}/scripts/wait-for-cluster.sh ${var.project_id} ${var.name}" diff --git a/modules/beta-private-cluster/dns.tf b/modules/beta-private-cluster/dns.tf index b240a23e65..8a581ff68e 100644 --- a/modules/beta-private-cluster/dns.tf +++ b/modules/beta-private-cluster/dns.tf @@ -20,7 +20,7 @@ Delete default kube-dns configmap *****************************************/ resource "null_resource" "delete_default_kube_dns_configmap" { - count = local.custom_kube_dns_config || local.upstream_nameservers_config ? 1 : 0 + count = (local.custom_kube_dns_config || local.upstream_nameservers_config) && ! var.skip_provisioners ? 1 : 0 provisioner "local-exec" { command = "${path.module}/scripts/kubectl_wrapper.sh https://${local.cluster_endpoint} ${data.google_client_config.default.access_token} ${local.cluster_ca_certificate} ${path.module}/scripts/delete-default-resource.sh kube-system configmap kube-dns" diff --git a/modules/beta-private-cluster/main.tf b/modules/beta-private-cluster/main.tf index ad6116e7ef..2de95c063d 100644 --- a/modules/beta-private-cluster/main.tf +++ b/modules/beta-private-cluster/main.tf @@ -23,7 +23,7 @@ data "google_compute_zones" "available" { provider = google-beta project = var.project_id - region = var.region + region = local.region } resource "random_shuffle" "available_zones" { @@ -34,6 +34,7 @@ resource "random_shuffle" "available_zones" { locals { // location location = var.regional ? var.region : var.zones[0] + region = var.region == null ? join("-", slice(split("-", var.zones[0]), 0, 2)) : var.region // for regional cluster - use var.zones if provided, use available otherwise, for zonal cluster use var.zones with first element extracted node_locations = var.regional ? coalescelist(compact(var.zones), sort(random_shuffle.available_zones.result)) : slice(var.zones, 1, length(var.zones)) // kuberentes version @@ -43,6 +44,8 @@ locals { node_version_zonal = var.node_version != "" && ! var.regional ? var.node_version : local.master_version_zonal master_version = var.regional ? local.master_version_regional : local.master_version_zonal node_version = var.regional ? local.node_version_regional : local.node_version_zonal + release_channel = var.release_channel != null ? [{ channel : var.release_channel }] : [] + custom_kube_dns_config = length(keys(var.stub_domains)) > 0 upstream_nameservers_config = length(var.upstream_nameservers) > 0 @@ -93,10 +96,10 @@ locals { cluster_output_kubernetes_dashboard_enabled = google_container_cluster.primary.addons_config.0.kubernetes_dashboard.0.disabled # BETA features - cluster_output_istio_enabled = google_container_cluster.primary.addons_config.0.istio_config.0.disabled - cluster_output_pod_security_policy_enabled = google_container_cluster.primary.pod_security_policy_config.0.enabled + cluster_output_istio_disabled = google_container_cluster.primary.addons_config.0.istio_config != null && length(google_container_cluster.primary.addons_config.0.istio_config) == 1 ? google_container_cluster.primary.addons_config.0.istio_config.0.disabled : false + cluster_output_pod_security_policy_enabled = google_container_cluster.primary.pod_security_policy_config != null && length(google_container_cluster.primary.pod_security_policy_config) == 1 ? google_container_cluster.primary.pod_security_policy_config.0.enabled : false cluster_output_intranode_visbility_enabled = google_container_cluster.primary.enable_intranode_visibility - cluster_output_vertical_pod_autoscaling_enabled = google_container_cluster.primary.vertical_pod_autoscaling.0.enabled + cluster_output_vertical_pod_autoscaling_enabled = google_container_cluster.primary.vertical_pod_autoscaling != null && length(google_container_cluster.primary.vertical_pod_autoscaling) == 1 ? google_container_cluster.primary.vertical_pod_autoscaling.0.enabled : false # /BETA features @@ -124,7 +127,7 @@ locals { cluster_horizontal_pod_autoscaling_enabled = ! local.cluster_output_horizontal_pod_autoscaling_enabled cluster_kubernetes_dashboard_enabled = ! local.cluster_output_kubernetes_dashboard_enabled # BETA features - cluster_istio_enabled = ! local.cluster_output_istio_enabled + cluster_istio_enabled = ! local.cluster_output_istio_disabled cluster_cloudrun_enabled = var.cloudrun cluster_pod_security_policy_enabled = local.cluster_output_pod_security_policy_enabled cluster_intranode_visibility_enabled = local.cluster_output_intranode_visbility_enabled diff --git a/modules/beta-private-cluster/networks.tf b/modules/beta-private-cluster/networks.tf index 14ea500e03..2456654130 100644 --- a/modules/beta-private-cluster/networks.tf +++ b/modules/beta-private-cluster/networks.tf @@ -27,6 +27,6 @@ data "google_compute_subnetwork" "gke_subnetwork" { provider = google-beta name = var.subnetwork - region = var.region + region = local.region project = local.network_project_id } diff --git a/modules/beta-private-cluster/outputs.tf b/modules/beta-private-cluster/outputs.tf index 4153960069..956c8c2d5d 100644 --- a/modules/beta-private-cluster/outputs.tf +++ b/modules/beta-private-cluster/outputs.tf @@ -149,3 +149,7 @@ output "vertical_pod_autoscaling_enabled" { value = local.cluster_vertical_pod_autoscaling_enabled } +output "release_channel" { + description = "The release channel of this cluster" + value = var.release_channel +} diff --git a/modules/beta-private-cluster/sa.tf b/modules/beta-private-cluster/sa.tf index 9e063fcc22..c7f34e4fbb 100644 --- a/modules/beta-private-cluster/sa.tf +++ b/modules/beta-private-cluster/sa.tf @@ -64,7 +64,7 @@ resource "google_project_iam_member" "cluster_service_account-monitoring_viewer" resource "google_project_iam_member" "cluster_service_account-gcr" { count = var.create_service_account && var.grant_registry_access ? 1 : 0 - project = var.project_id + project = var.registry_project_id == "" ? var.project_id : var.registry_project_id role = "roles/storage.objectViewer" member = "serviceAccount:${google_service_account.cluster_service_account[0].email}" } diff --git a/modules/beta-private-cluster/scripts/wait-for-cluster.sh b/modules/beta-private-cluster/scripts/wait-for-cluster.sh index 6ff3253d58..b7019eace1 100755 --- a/modules/beta-private-cluster/scripts/wait-for-cluster.sh +++ b/modules/beta-private-cluster/scripts/wait-for-cluster.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,6 +15,11 @@ set -e +# shellcheck disable=SC2034 +if [ -n "${GOOGLE_APPLICATION_CREDENTIALS}" ]; then + export CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE="${GOOGLE_APPLICATION_CREDENTIALS}" +fi + PROJECT=$1 CLUSTER_NAME=$2 gcloud_command="gcloud container clusters list --project=$PROJECT --format=json" diff --git a/modules/beta-private-cluster/variables.tf b/modules/beta-private-cluster/variables.tf index 975fe7a173..07461351c1 100644 --- a/modules/beta-private-cluster/variables.tf +++ b/modules/beta-private-cluster/variables.tf @@ -40,7 +40,8 @@ variable "regional" { variable "region" { type = string - description = "The region to host the cluster in (required)" + description = "The region to host the cluster in (optional if zonal cluster / required if regional)" + default = null } variable "zones" { @@ -267,6 +268,12 @@ variable "grant_registry_access" { default = false } +variable "registry_project_id" { + type = string + description = "Project holding the Google Container Registry. If empty, we use the cluster project. If grant_registry_access is true, storage.objectViewer role is assigned on this project." + default = "" +} + variable "service_account" { type = string description = "The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created." @@ -302,6 +309,11 @@ variable "cluster_resource_labels" { default = {} } +variable "skip_provisioners" { + type = bool + description = "Flag to skip all local-exec provisioners. It breaks `stub_domains` and `upstream_nameservers` variables functionality." + default = false +} variable "deploy_using_private_endpoint" { type = bool @@ -371,7 +383,8 @@ variable "resource_usage_export_dataset_id" { variable "node_metadata" { description = "Specifies how node metadata is exposed to the workload running on the node" - default = "UNSPECIFIED" + default = "SECURE" + type = string } variable "sandbox_enabled" { @@ -404,3 +417,8 @@ variable "authenticator_security_group" { default = null } +variable "release_channel" { + type = string + description = "(Beta) The release channel of this cluster. Accepted values are `UNSPECIFIED`, `RAPID`, `REGULAR` and `STABLE`. Defaults to `UNSPECIFIED`." + default = null +} diff --git a/modules/beta-public-cluster/README.md b/modules/beta-public-cluster/README.md index 316f46d43e..98f4526d9f 100644 --- a/modules/beta-public-cluster/README.md +++ b/modules/beta-public-cluster/README.md @@ -23,7 +23,7 @@ There are multiple examples included in the [examples](./examples/) folder but s ```hcl module "gke" { - source = "terraform-google-modules/kubernetes-engine/google" + source = "terraform-google-modules/kubernetes-engine/google//modules/beta-public-cluster" project_id = "" name = "gke-test-1" region = "us-central1" @@ -168,7 +168,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | network\_policy | Enable network policy addon | bool | `"false"` | no | | network\_policy\_provider | The network policy provider. | string | `"CALICO"` | no | | network\_project\_id | The project ID of the shared VPC's host (for shared vpc support) | string | `""` | no | -| node\_metadata | Specifies how node metadata is exposed to the workload running on the node | string | `"UNSPECIFIED"` | no | +| node\_metadata | Specifies how node metadata is exposed to the workload running on the node | string | `"SECURE"` | no | | node\_pools | List of maps containing node pools | list(map(string)) | `` | no | | node\_pools\_labels | Map of maps containing node labels by node-pool name | map(map(string)) | `` | no | | node\_pools\_metadata | Map of maps containing node metadata by node-pool name | map(map(string)) | `` | no | @@ -179,12 +179,15 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | non\_masquerade\_cidrs | List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading. | list(string) | `` | no | | pod\_security\_policy\_config | enabled - Enable the PodSecurityPolicy controller for this cluster. If enabled, pods must be valid under a PodSecurityPolicy to be created. | list | `` | no | | project\_id | The project ID to host the cluster in (required) | string | n/a | yes | -| region | The region to host the cluster in (required) | string | n/a | yes | +| region | The region to host the cluster in (optional if zonal cluster / required if regional) | string | `"null"` | no | | regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | +| registry\_project\_id | Project holding the Google Container Registry. If empty, we use the cluster project. If grant_registry_access is true, storage.objectViewer role is assigned on this project. | string | `""` | no | +| release\_channel | (Beta) The release channel of this cluster. Accepted values are `UNSPECIFIED`, `RAPID`, `REGULAR` and `STABLE`. Defaults to `UNSPECIFIED`. | string | `"null"` | no | | remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | | resource\_usage\_export\_dataset\_id | The dataset id for which network egress metering for this cluster will be enabled. If enabled, a daemonset will be created in the cluster to meter network egress traffic. | string | `""` | no | | sandbox\_enabled | (Beta) Enable GKE Sandbox (Do not forget to set `image_type` = `COS_CONTAINERD` and `node_version` = `1.12.7-gke.17` or later to use it). | bool | `"false"` | no | | service\_account | The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created. | string | `""` | no | +| skip\_provisioners | Flag to skip all local-exec provisioners. It breaks `stub_domains` and `upstream_nameservers` variables functionality. | bool | `"false"` | no | | stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | map(list(string)) | `` | no | | subnetwork | The subnetwork to host the cluster in (required) | string | n/a | yes | | upstream\_nameservers | If specified, the values replace the nameservers taken by default from the node’s /etc/resolv.conf | list | `` | no | @@ -214,6 +217,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | node\_pools\_versions | List of node pools versions | | pod\_security\_policy\_enabled | Whether pod security policy is enabled | | region | Cluster region | +| release\_channel | The release channel of this cluster | | service\_account | The service account to default running nodes as if not overridden in `node_pools`. | | type | Cluster type (regional / zonal) | | vertical\_pod\_autoscaling\_enabled | Whether veritical pod autoscaling is enabled | @@ -249,6 +253,9 @@ following project roles: - roles/iam.serviceAccountUser - roles/resourcemanager.projectIamAdmin (only required if `service_account` is set to `create`) +Additionally, if `service_account` is set to `create` and `grant_registry_access` is requested, the service account requires the following role on the `registry_project_id` project: +- roles/resourcemanager.projectIamAdmin + ### Enable APIs In order to operate with the Service Account you must activate the following APIs on the project where the Service Account was created: @@ -269,141 +276,6 @@ The project has the following folders and files: - /README.MD: This file. - /modules: Private and beta sub modules. -## Templating - -To more cleanly handle cases where desired functionality would require complex duplication of Terraform resources (i.e. [PR 51](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/51)), this repository is largely generated from the [`autogen`](/autogen) directory. - -The root module is generated by running `make generate`. Changes to this repository should be made in the [`autogen`](/autogen) directory where appropriate. - -Note: The correct sequence to update the repo using autogen functionality is to run -`make generate && make generate_docs`. This will create the various Terraform files, and then -generate the Terraform documentation using `terraform-docs`. - -## Testing - -### Requirements -- [bundler](https://github.com/bundler/bundler) -- [gcloud](https://cloud.google.com/sdk/install) -- [terraform-docs](https://github.com/segmentio/terraform-docs/releases) 0.6.0 - -### Autogeneration of documentation from .tf files -Run -``` -make generate_docs -``` - -### Integration test - -Integration tests are run though [test-kitchen](https://github.com/test-kitchen/test-kitchen), [kitchen-terraform](https://github.com/newcontext-oss/kitchen-terraform), and [InSpec](https://github.com/inspec/inspec). - -Six test-kitchen instances are defined: - -- `deploy-service` -- `node-pool` -- `shared-vpc` -- `simple-regional` -- `simple-zonal` -- `stub-domains` - -The test-kitchen instances in `test/fixtures/` wrap identically-named examples in the `examples/` directory. - -#### Setup - -1. Configure the [test fixtures](#test-configuration) -2. Download a Service Account key with the necessary permissions and put it in the module's root directory with the name `credentials.json`. - - Requires the [permissions to run the module](#configure-a-service-account) - - Requires `roles/compute.networkAdmin` to create the test suite's networks - - Requires `roles/resourcemanager.projectIamAdmin` since service account creation is tested -3. Build the Docker container for testing: - - ``` - make docker_build_kitchen_terraform - ``` -4. Run the testing container in interactive mode: - - ``` - make docker_run - ``` - - The module root directory will be loaded into the Docker container at `/cft/workdir/`. -5. Run kitchen-terraform to test the infrastructure: - - 1. `kitchen create` creates Terraform state and downloads modules, if applicable. - 2. `kitchen converge` creates the underlying resources. Run `kitchen converge ` to create resources for a specific test case. - 3. Run `kitchen converge` again. This is necessary due to an oddity in how `networkPolicyConfig` is handled by the upstream API. (See [#72](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/issues/72) for details). - 4. `kitchen verify` tests the created infrastructure. Run `kitchen verify ` to run a specific test case. - 5. `kitchen destroy` tears down the underlying resources created by `kitchen converge`. Run `kitchen destroy ` to tear down resources for a specific test case. - -Alternatively, you can simply run `make test_integration_docker` to run all the test steps non-interactively. - -If you wish to parallelize running the test suites, it is also possible to offload the work onto Concourse to run each test suite for you using the command `make test_integration_concourse`. The `.concourse` directory will be created and contain all of the logs from the running test suites. - -When running tests locally, you will need to use your own test project environment. You can configure your environment by setting all of the following variables: - -``` -export COMPUTE_ENGINE_SERVICE_ACCOUNT="" -export PROJECT_ID="" -export REGION="" -export ZONES='[""]' -export SERVICE_ACCOUNT_JSON="$(cat "")" -export CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE="" -export GOOGLE_APPLICATION_CREDENTIALS="" -``` - -#### Test configuration - -Each test-kitchen instance is configured with a `variables.tfvars` file in the test fixture directory, e.g. `test/fixtures/node_pool/terraform.tfvars`. -For convenience, since all of the variables are project-specific, these files have been symlinked to `test/fixtures/shared/terraform.tfvars`. -Similarly, each test fixture has a `variables.tf` to define these variables, and an `outputs.tf` to facilitate providing necessary information for `inspec` to locate and query against created resources. - -Each test-kitchen instance creates a GCP Network and Subnetwork fixture to house resources, and may create any other necessary fixture data as needed. - -### Autogeneration of documentation from .tf files -Run -``` -make generate_docs -``` - -### Linting -The makefile in this project will lint or sometimes just format any shell, -Python, golang, Terraform, or Dockerfiles. The linters will only be run if -the makefile finds files with the appropriate file extension. - -All of the linter checks are in the default make target, so you just have to -run - -``` -make -s -``` - -The -s is for 'silent'. Successful output looks like this - -``` -Running shellcheck -Running flake8 -Running go fmt and go vet -Running terraform validate -Running hadolint on Dockerfiles -Checking for required files -Testing the validity of the header check -.. ----------------------------------------------------------------------- -Ran 2 tests in 0.026s - -OK -Checking file headers -The following lines have trailing whitespace -``` - -The linters -are as follows: -* Shell - shellcheck. Can be found in homebrew -* Python - flake8. Can be installed with 'pip install flake8' -* Golang - gofmt. gofmt comes with the standard golang installation. golang -is a compiled language so there is no standard linter. -* Terraform - terraform has a built-in linter in the 'terraform validate' -command. -* Dockerfiles - hadolint. Can be found in homebrew [upgrading-to-v2.0]: docs/upgrading_to_v2.0.md [upgrading-to-v3.0]: ../../docs/upgrading_to_v3.0.md diff --git a/modules/beta-public-cluster/cluster.tf b/modules/beta-public-cluster/cluster.tf index 1c8561344a..304fcc8df3 100644 --- a/modules/beta-public-cluster/cluster.tf +++ b/modules/beta-public-cluster/cluster.tf @@ -41,6 +41,14 @@ resource "google_container_cluster" "primary" { } } + dynamic "release_channel" { + for_each = local.release_channel + + content { + channel = release_channel.value.channel + } + } + subnetwork = data.google_compute_subnetwork.gke_subnetwork.self_link min_master_version = local.master_version @@ -135,7 +143,7 @@ resource "google_container_cluster" "primary" { } lifecycle { - ignore_changes = [node_pool] + ignore_changes = [node_pool, initial_node_count] } timeouts { @@ -158,14 +166,6 @@ resource "google_container_cluster" "primary" { node_metadata = workload_metadata_config.value.node_metadata } } - - dynamic "sandbox_config" { - for_each = local.cluster_sandbox_enabled - - content { - sandbox_type = sandbox_config.value - } - } } } @@ -238,22 +238,14 @@ resource "google_container_node_pool" "pools" { image_type = lookup(var.node_pools[count.index], "image_type", "COS") machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2") labels = merge( - { - "cluster_name" = var.name - }, - { - "node_pool" = var.node_pools[count.index]["name"] - }, + lookup(lookup(var.node_pools_labels, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(var.node_pools_labels, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index]["name"] } : {}, var.node_pools_labels["all"], var.node_pools_labels[var.node_pools[count.index]["name"]], ) metadata = merge( - { - "cluster_name" = var.name - }, - { - "node_pool" = var.node_pools[count.index]["name"] - }, + lookup(lookup(var.node_pools_metadata, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(var.node_pools_metadata, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index]["name"] } : {}, var.node_pools_metadata["all"], var.node_pools_metadata[var.node_pools[count.index]["name"]], { @@ -272,8 +264,8 @@ resource "google_container_node_pool" "pools" { } } tags = concat( - ["gke-${var.name}"], - ["gke-${var.name}-${var.node_pools[count.index]["name"]}"], + lookup(var.node_pools_tags, "default_values", [true, true])[0] ? ["gke-${var.name}"] : [], + lookup(var.node_pools_tags, "default_values", [true, true])[1] ? ["gke-${var.name}-${var.node_pools[count.index]["name"]}"] : [], var.node_pools_tags["all"], var.node_pools_tags[var.node_pools[count.index]["name"]], ) @@ -309,6 +301,14 @@ resource "google_container_node_pool" "pools" { node_metadata = workload_metadata_config.value.node_metadata } } + + dynamic "sandbox_config" { + for_each = local.cluster_sandbox_enabled + + content { + sandbox_type = sandbox_config.value + } + } } lifecycle { @@ -323,6 +323,7 @@ resource "google_container_node_pool" "pools" { } resource "null_resource" "wait_for_cluster" { + count = var.skip_provisioners ? 0 : 1 provisioner "local-exec" { command = "${path.module}/scripts/wait-for-cluster.sh ${var.project_id} ${var.name}" diff --git a/modules/beta-public-cluster/dns.tf b/modules/beta-public-cluster/dns.tf index b240a23e65..8a581ff68e 100644 --- a/modules/beta-public-cluster/dns.tf +++ b/modules/beta-public-cluster/dns.tf @@ -20,7 +20,7 @@ Delete default kube-dns configmap *****************************************/ resource "null_resource" "delete_default_kube_dns_configmap" { - count = local.custom_kube_dns_config || local.upstream_nameservers_config ? 1 : 0 + count = (local.custom_kube_dns_config || local.upstream_nameservers_config) && ! var.skip_provisioners ? 1 : 0 provisioner "local-exec" { command = "${path.module}/scripts/kubectl_wrapper.sh https://${local.cluster_endpoint} ${data.google_client_config.default.access_token} ${local.cluster_ca_certificate} ${path.module}/scripts/delete-default-resource.sh kube-system configmap kube-dns" diff --git a/modules/beta-public-cluster/main.tf b/modules/beta-public-cluster/main.tf index c956463414..9668b6f1ea 100644 --- a/modules/beta-public-cluster/main.tf +++ b/modules/beta-public-cluster/main.tf @@ -23,7 +23,7 @@ data "google_compute_zones" "available" { provider = google-beta project = var.project_id - region = var.region + region = local.region } resource "random_shuffle" "available_zones" { @@ -34,6 +34,7 @@ resource "random_shuffle" "available_zones" { locals { // location location = var.regional ? var.region : var.zones[0] + region = var.region == null ? join("-", slice(split("-", var.zones[0]), 0, 2)) : var.region // for regional cluster - use var.zones if provided, use available otherwise, for zonal cluster use var.zones with first element extracted node_locations = var.regional ? coalescelist(compact(var.zones), sort(random_shuffle.available_zones.result)) : slice(var.zones, 1, length(var.zones)) // kuberentes version @@ -43,6 +44,8 @@ locals { node_version_zonal = var.node_version != "" && ! var.regional ? var.node_version : local.master_version_zonal master_version = var.regional ? local.master_version_regional : local.master_version_zonal node_version = var.regional ? local.node_version_regional : local.node_version_zonal + release_channel = var.release_channel != null ? [{ channel : var.release_channel }] : [] + custom_kube_dns_config = length(keys(var.stub_domains)) > 0 upstream_nameservers_config = length(var.upstream_nameservers) > 0 @@ -93,10 +96,10 @@ locals { cluster_output_kubernetes_dashboard_enabled = google_container_cluster.primary.addons_config.0.kubernetes_dashboard.0.disabled # BETA features - cluster_output_istio_enabled = google_container_cluster.primary.addons_config.0.istio_config.0.disabled - cluster_output_pod_security_policy_enabled = google_container_cluster.primary.pod_security_policy_config.0.enabled + cluster_output_istio_disabled = google_container_cluster.primary.addons_config.0.istio_config != null && length(google_container_cluster.primary.addons_config.0.istio_config) == 1 ? google_container_cluster.primary.addons_config.0.istio_config.0.disabled : false + cluster_output_pod_security_policy_enabled = google_container_cluster.primary.pod_security_policy_config != null && length(google_container_cluster.primary.pod_security_policy_config) == 1 ? google_container_cluster.primary.pod_security_policy_config.0.enabled : false cluster_output_intranode_visbility_enabled = google_container_cluster.primary.enable_intranode_visibility - cluster_output_vertical_pod_autoscaling_enabled = google_container_cluster.primary.vertical_pod_autoscaling.0.enabled + cluster_output_vertical_pod_autoscaling_enabled = google_container_cluster.primary.vertical_pod_autoscaling != null && length(google_container_cluster.primary.vertical_pod_autoscaling) == 1 ? google_container_cluster.primary.vertical_pod_autoscaling.0.enabled : false # /BETA features @@ -124,7 +127,7 @@ locals { cluster_horizontal_pod_autoscaling_enabled = ! local.cluster_output_horizontal_pod_autoscaling_enabled cluster_kubernetes_dashboard_enabled = ! local.cluster_output_kubernetes_dashboard_enabled # BETA features - cluster_istio_enabled = ! local.cluster_output_istio_enabled + cluster_istio_enabled = ! local.cluster_output_istio_disabled cluster_cloudrun_enabled = var.cloudrun cluster_pod_security_policy_enabled = local.cluster_output_pod_security_policy_enabled cluster_intranode_visibility_enabled = local.cluster_output_intranode_visbility_enabled diff --git a/modules/beta-public-cluster/networks.tf b/modules/beta-public-cluster/networks.tf index 14ea500e03..2456654130 100644 --- a/modules/beta-public-cluster/networks.tf +++ b/modules/beta-public-cluster/networks.tf @@ -27,6 +27,6 @@ data "google_compute_subnetwork" "gke_subnetwork" { provider = google-beta name = var.subnetwork - region = var.region + region = local.region project = local.network_project_id } diff --git a/modules/beta-public-cluster/outputs.tf b/modules/beta-public-cluster/outputs.tf index 4153960069..956c8c2d5d 100644 --- a/modules/beta-public-cluster/outputs.tf +++ b/modules/beta-public-cluster/outputs.tf @@ -149,3 +149,7 @@ output "vertical_pod_autoscaling_enabled" { value = local.cluster_vertical_pod_autoscaling_enabled } +output "release_channel" { + description = "The release channel of this cluster" + value = var.release_channel +} diff --git a/modules/beta-public-cluster/sa.tf b/modules/beta-public-cluster/sa.tf index 9e063fcc22..c7f34e4fbb 100644 --- a/modules/beta-public-cluster/sa.tf +++ b/modules/beta-public-cluster/sa.tf @@ -64,7 +64,7 @@ resource "google_project_iam_member" "cluster_service_account-monitoring_viewer" resource "google_project_iam_member" "cluster_service_account-gcr" { count = var.create_service_account && var.grant_registry_access ? 1 : 0 - project = var.project_id + project = var.registry_project_id == "" ? var.project_id : var.registry_project_id role = "roles/storage.objectViewer" member = "serviceAccount:${google_service_account.cluster_service_account[0].email}" } diff --git a/modules/beta-public-cluster/scripts/wait-for-cluster.sh b/modules/beta-public-cluster/scripts/wait-for-cluster.sh index 6ff3253d58..b7019eace1 100755 --- a/modules/beta-public-cluster/scripts/wait-for-cluster.sh +++ b/modules/beta-public-cluster/scripts/wait-for-cluster.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,6 +15,11 @@ set -e +# shellcheck disable=SC2034 +if [ -n "${GOOGLE_APPLICATION_CREDENTIALS}" ]; then + export CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE="${GOOGLE_APPLICATION_CREDENTIALS}" +fi + PROJECT=$1 CLUSTER_NAME=$2 gcloud_command="gcloud container clusters list --project=$PROJECT --format=json" diff --git a/modules/beta-public-cluster/variables.tf b/modules/beta-public-cluster/variables.tf index 850f38ea83..b41e5591b9 100644 --- a/modules/beta-public-cluster/variables.tf +++ b/modules/beta-public-cluster/variables.tf @@ -40,7 +40,8 @@ variable "regional" { variable "region" { type = string - description = "The region to host the cluster in (required)" + description = "The region to host the cluster in (optional if zonal cluster / required if regional)" + default = null } variable "zones" { @@ -267,6 +268,12 @@ variable "grant_registry_access" { default = false } +variable "registry_project_id" { + type = string + description = "Project holding the Google Container Registry. If empty, we use the cluster project. If grant_registry_access is true, storage.objectViewer role is assigned on this project." + default = "" +} + variable "service_account" { type = string description = "The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created." @@ -302,6 +309,11 @@ variable "cluster_resource_labels" { default = {} } +variable "skip_provisioners" { + type = bool + description = "Flag to skip all local-exec provisioners. It breaks `stub_domains` and `upstream_nameservers` variables functionality." + default = false +} variable "istio" { description = "(Beta) Enable Istio addon" @@ -347,7 +359,8 @@ variable "resource_usage_export_dataset_id" { variable "node_metadata" { description = "Specifies how node metadata is exposed to the workload running on the node" - default = "UNSPECIFIED" + default = "SECURE" + type = string } variable "sandbox_enabled" { @@ -380,3 +393,8 @@ variable "authenticator_security_group" { default = null } +variable "release_channel" { + type = string + description = "(Beta) The release channel of this cluster. Accepted values are `UNSPECIFIED`, `RAPID`, `REGULAR` and `STABLE`. Defaults to `UNSPECIFIED`." + default = null +} diff --git a/modules/private-cluster-update-variant/README.md b/modules/private-cluster-update-variant/README.md new file mode 100644 index 0000000000..fa9cdb8852 --- /dev/null +++ b/modules/private-cluster-update-variant/README.md @@ -0,0 +1,270 @@ +# Terraform Kubernetes Engine Module + +This module handles opinionated Google Cloud Platform Kubernetes Engine cluster creation and configuration with Node Pools, IP MASQ, Network Policy, etc. This particular submodule creates a [private cluster](https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters) +The resources/services/activations/deletions that this module will create/trigger are: +- Create a GKE cluster with the provided addons +- Create GKE Node Pool(s) with provided configuration and attach to cluster +- Replace the default kube-dns configmap if `stub_domains` are provided +- Activate network policy if `network_policy` is true +- Add `ip-masq-agent` configmap with provided `non_masquerade_cidrs` if `configure_ip_masq` is true + +Sub modules are provided from creating private clusters, beta private clusters, and beta public clusters as well. Beta sub modules allow for the use of various GKE beta features. See the modules directory for the various sub modules. + +**Note**: You must run Terraform from a VM on the same VPC as your cluster, otherwise there will be issues connecting to the GKE master. + + +## Compatibility + +This module is meant for use with Terraform 0.12. If you haven't +[upgraded][terraform-0.12-upgrade] and need a Terraform +0.11.x-compatible version of this module, the last released version +intended for Terraform 0.11.x is [3.0.0]. + +## Usage +There are multiple examples included in the [examples](./examples/) folder but simple usage is as follows: + +```hcl +module "gke" { + source = "terraform-google-modules/kubernetes-engine/google//modules/private-cluster-update-variant" + project_id = "" + name = "gke-test-1" + region = "us-central1" + zones = ["us-central1-a", "us-central1-b", "us-central1-f"] + network = "vpc-01" + subnetwork = "us-central1-01" + ip_range_pods = "us-central1-01-gke-01-pods" + ip_range_services = "us-central1-01-gke-01-services" + http_load_balancing = false + horizontal_pod_autoscaling = true + kubernetes_dashboard = true + network_policy = true + enable_private_endpoint = true + enable_private_nodes = true + master_ipv4_cidr_block = "10.0.0.0/28" + + node_pools = [ + { + name = "default-node-pool" + machine_type = "n1-standard-2" + min_count = 1 + max_count = 100 + disk_size_gb = 100 + disk_type = "pd-standard" + image_type = "COS" + auto_repair = true + auto_upgrade = true + service_account = "project-service-account@.iam.gserviceaccount.com" + preemptible = false + initial_node_count = 80 + }, + ] + + node_pools_oauth_scopes = { + all = [] + + default-node-pool = [ + "https://www.googleapis.com/auth/cloud-platform", + ] + } + + node_pools_labels = { + all = {} + + default-node-pool = { + default-node-pool = true + } + } + + node_pools_metadata = { + all = {} + + default-node-pool = { + node-pool-metadata-custom-value = "my-node-pool" + } + } + + node_pools_taints = { + all = [] + + default-node-pool = [ + { + key = "default-node-pool" + value = true + effect = "PREFER_NO_SCHEDULE" + }, + ] + } + + node_pools_tags = { + all = [] + + default-node-pool = [ + "default-node-pool", + ] + } +} +``` + + +Then perform the following commands on the root folder: + +- `terraform init` to get the plugins +- `terraform plan` to see the infrastructure plan +- `terraform apply` to apply the infrastructure build +- `terraform destroy` to destroy the built infrastructure + +## Upgrade to v3.0.0 + +v3.0.0 is a breaking release. Refer to the +[Upgrading to v3.0 guide][upgrading-to-v3.0] for details. + +## Upgrade to v2.0.0 + +v2.0.0 is a breaking release. Refer to the +[Upgrading to v2.0 guide][upgrading-to-v2.0] for details. + +## Upgrade to v1.0.0 + +Version 1.0.0 of this module introduces a breaking change: adding the `disable-legacy-endpoints` metadata field to all node pools. This metadata is required by GKE and [determines whether the `/0.1/` and `/v1beta1/` paths are available in the nodes' metadata server](https://cloud.google.com/kubernetes-engine/docs/how-to/protecting-cluster-metadata#disable-legacy-apis). If your applications do not require access to the node's metadata server, you can leave the default value of `true` provided by the module. If your applications require access to the metadata server, be sure to read the linked documentation to see if you need to set the value for this field to `false` to allow your applications access to the above metadata server paths. + +In either case, upgrading to module version `v1.0.0` will trigger a recreation of all node pools in the cluster. + + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| basic\_auth\_password | The password to be used with Basic Authentication. | string | `""` | no | +| basic\_auth\_username | The username to be used with Basic Authentication. An empty value will disable Basic Authentication, which is the recommended configuration. | string | `""` | no | +| cluster\_ipv4\_cidr | The IP address range of the kubernetes pods in this cluster. Default is an automatically assigned CIDR. | string | `""` | no | +| cluster\_resource\_labels | The GCE resource labels (a map of key/value pairs) to be applied to the cluster | map(string) | `` | no | +| configure\_ip\_masq | Enables the installation of ip masquerading, which is usually no longer required when using aliasied IP addresses. IP masquerading uses a kubectl call, so when you have a private cluster, you will need access to the API server. | string | `"false"` | no | +| create\_service\_account | Defines if service account specified to run nodes should be created. | bool | `"true"` | no | +| deploy\_using\_private\_endpoint | (Beta) A toggle for Terraform and kubectl to connect to the master's internal IP address during deployment. | bool | `"false"` | no | +| description | The description of the cluster | string | `""` | no | +| disable\_legacy\_metadata\_endpoints | Disable the /0.1/ and /v1beta1/ metadata server endpoints on the node. Changing this value will cause all node pools to be recreated. | bool | `"true"` | no | +| enable\_private\_endpoint | (Beta) Whether the master's internal IP address is used as the cluster endpoint | bool | `"false"` | no | +| enable\_private\_nodes | (Beta) Whether nodes have internal IP addresses only | bool | `"false"` | no | +| grant\_registry\_access | Grants created cluster-specific service account storage.objectViewer role. | bool | `"false"` | no | +| horizontal\_pod\_autoscaling | Enable horizontal pod autoscaling addon | bool | `"true"` | no | +| http\_load\_balancing | Enable httpload balancer addon | bool | `"true"` | no | +| initial\_node\_count | The number of nodes to create in this cluster's default node pool. | number | `"0"` | no | +| ip\_masq\_link\_local | Whether to masquerade traffic to the link-local prefix (169.254.0.0/16). | bool | `"false"` | no | +| ip\_masq\_resync\_interval | The interval at which the agent attempts to sync its ConfigMap file from the disk. | string | `"60s"` | no | +| ip\_range\_pods | The _name_ of the secondary subnet ip range to use for pods | string | n/a | yes | +| ip\_range\_services | The _name_ of the secondary subnet range to use for services | string | n/a | yes | +| issue\_client\_certificate | Issues a client certificate to authenticate to the cluster endpoint. To maximize the security of your cluster, leave this option disabled. Client certificates don't automatically rotate and aren't easily revocable. WARNING: changing this after cluster creation is destructive! | bool | `"false"` | no | +| kubernetes\_dashboard | Enable kubernetes dashboard addon | bool | `"false"` | no | +| kubernetes\_version | The Kubernetes version of the masters. If set to 'latest' it will pull latest available version in the selected region. | string | `"latest"` | no | +| logging\_service | The logging service that the cluster should write logs to. Available options include logging.googleapis.com, logging.googleapis.com/kubernetes (beta), and none | string | `"logging.googleapis.com"` | no | +| maintenance\_start\_time | Time window specified for daily maintenance operations in RFC3339 format | string | `"05:00"` | no | +| master\_authorized\_networks\_config | The desired configuration options for master authorized networks. The object format is {cidr_blocks = list(object({cidr_block = string, display_name = string}))}. Omit the nested cidr_blocks attribute to disallow external access (except the cluster node IPs, which GKE automatically whitelists). | object | `` | no | +| master\_ipv4\_cidr\_block | (Beta) The IP range in CIDR notation to use for the hosted master network | string | `"10.0.0.0/28"` | no | +| monitoring\_service | The monitoring service that the cluster should write metrics to. Automatically send metrics from pods in the cluster to the Google Cloud Monitoring API. VM metrics will be collected by Google Compute Engine regardless of this setting Available options include monitoring.googleapis.com, monitoring.googleapis.com/kubernetes (beta) and none | string | `"monitoring.googleapis.com"` | no | +| name | The name of the cluster (required) | string | n/a | yes | +| network | The VPC network to host the cluster in (required) | string | n/a | yes | +| network\_policy | Enable network policy addon | bool | `"false"` | no | +| network\_policy\_provider | The network policy provider. | string | `"CALICO"` | no | +| network\_project\_id | The project ID of the shared VPC's host (for shared vpc support) | string | `""` | no | +| node\_pools | List of maps containing node pools | list(map(string)) | `` | no | +| node\_pools\_labels | Map of maps containing node labels by node-pool name | map(map(string)) | `` | no | +| node\_pools\_metadata | Map of maps containing node metadata by node-pool name | map(map(string)) | `` | no | +| node\_pools\_oauth\_scopes | Map of lists containing node oauth scopes by node-pool name | map(list(string)) | `` | no | +| node\_pools\_tags | Map of lists containing node network tags by node-pool name | map(list(string)) | `` | no | +| node\_version | The Kubernetes version of the node pools. Defaults kubernetes_version (master) variable and can be overridden for individual node pools by setting the `version` key on them. Must be empyty or set the same as master at cluster creation. | string | `""` | no | +| non\_masquerade\_cidrs | List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading. | list(string) | `` | no | +| project\_id | The project ID to host the cluster in (required) | string | n/a | yes | +| region | The region to host the cluster in (optional if zonal cluster / required if regional) | string | `"null"` | no | +| regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | +| registry\_project\_id | Project holding the Google Container Registry. If empty, we use the cluster project. If grant_registry_access is true, storage.objectViewer role is assigned on this project. | string | `""` | no | +| remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | +| service\_account | The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created. | string | `""` | no | +| skip\_provisioners | Flag to skip all local-exec provisioners. It breaks `stub_domains` and `upstream_nameservers` variables functionality. | bool | `"false"` | no | +| stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | map(list(string)) | `` | no | +| subnetwork | The subnetwork to host the cluster in (required) | string | n/a | yes | +| upstream\_nameservers | If specified, the values replace the nameservers taken by default from the node’s /etc/resolv.conf | list | `` | no | +| zones | The zones to host the cluster in (optional if regional cluster / required if zonal) | list(string) | `` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| ca\_certificate | Cluster ca certificate (base64 encoded) | +| endpoint | Cluster endpoint | +| horizontal\_pod\_autoscaling\_enabled | Whether horizontal pod autoscaling enabled | +| http\_load\_balancing\_enabled | Whether http load balancing enabled | +| kubernetes\_dashboard\_enabled | Whether kubernetes dashboard enabled | +| location | Cluster location (region if regional cluster, zone if zonal cluster) | +| logging\_service | Logging service used | +| master\_authorized\_networks\_config | Networks from which access to master is permitted | +| master\_version | Current master kubernetes version | +| min\_master\_version | Minimum master kubernetes version | +| monitoring\_service | Monitoring service used | +| name | Cluster name | +| network\_policy\_enabled | Whether network policy enabled | +| node\_pools\_names | List of node pools names | +| node\_pools\_versions | List of node pools versions | +| region | Cluster region | +| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | +| type | Cluster type (regional / zonal) | +| zones | List of zones in which the cluster resides | + + + +## Requirements + +Before this module can be used on a project, you must ensure that the following pre-requisites are fulfilled: + +1. Terraform and kubectl are [installed](#software-dependencies) on the machine where Terraform is executed. +2. The Service Account you execute the module with has the right [permissions](#configure-a-service-account). +3. The Compute Engine and Kubernetes Engine APIs are [active](#enable-apis) on the project you will launch the cluster in. +4. If you are using a Shared VPC, the APIs must also be activated on the Shared VPC host project and your service account needs the proper permissions there. + +The [project factory](https://github.com/terraform-google-modules/terraform-google-project-factory) can be used to provision projects with the correct APIs active and the necessary Shared VPC connections. + +### Software Dependencies +#### Kubectl +- [kubectl](https://github.com/kubernetes/kubernetes/releases) 1.9.x +#### Terraform and Plugins +- [Terraform](https://www.terraform.io/downloads.html) 0.12 +- [Terraform Provider for GCP][terraform-provider-google] v2.9 + +### Configure a Service Account +In order to execute this module you must have a Service Account with the +following project roles: +- roles/compute.viewer +- roles/container.clusterAdmin +- roles/container.developer +- roles/iam.serviceAccountAdmin +- roles/iam.serviceAccountUser +- roles/resourcemanager.projectIamAdmin (only required if `service_account` is set to `create`) + +Additionally, if `service_account` is set to `create` and `grant_registry_access` is requested, the service account requires the following role on the `registry_project_id` project: +- roles/resourcemanager.projectIamAdmin + +### Enable APIs +In order to operate with the Service Account you must activate the following APIs on the project where the Service Account was created: + +- Compute Engine API - compute.googleapis.com +- Kubernetes Engine API - container.googleapis.com + +## File structure +The project has the following folders and files: + +- /: root folder +- /examples: Examples for using this module and sub module. +- /helpers: Helper scripts. +- /scripts: Scripts for specific tasks on module (see Infrastructure section on this file). +- /test: Folders with files for testing the module (see Testing section on this file). +- /main.tf: `main` file for the public module, contains all the resources to create. +- /variables.tf: Variables for the public cluster module. +- /output.tf: The outputs for the public cluster module. +- /README.MD: This file. +- /modules: Private and beta sub modules. + + +[upgrading-to-v2.0]: ../../docs/upgrading_to_v2.0.md +[upgrading-to-v3.0]: ../../docs/upgrading_to_v3.0.md +[terraform-provider-google]: https://github.com/terraform-providers/terraform-provider-google +[3.0.0]: https://registry.terraform.io/modules/terraform-google-modules/kubernetes-engine/google/3.0.0 +[terraform-0.12-upgrade]: https://www.terraform.io/upgrade-guides/0-12.html diff --git a/modules/private-cluster-update-variant/auth.tf b/modules/private-cluster-update-variant/auth.tf new file mode 100644 index 0000000000..48e7cc6a5f --- /dev/null +++ b/modules/private-cluster-update-variant/auth.tf @@ -0,0 +1,34 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file was automatically generated from a template in ./autogen + +/****************************************** + Retrieve authentication token + *****************************************/ +data "google_client_config" "default" { + provider = google +} + +/****************************************** + Configure provider + *****************************************/ +provider "kubernetes" { + load_config_file = false + host = "https://${local.cluster_endpoint}" + token = data.google_client_config.default.access_token + cluster_ca_certificate = base64decode(local.cluster_ca_certificate) +} diff --git a/modules/private-cluster-update-variant/cluster.tf b/modules/private-cluster-update-variant/cluster.tf new file mode 100644 index 0000000000..615fe84bcc --- /dev/null +++ b/modules/private-cluster-update-variant/cluster.tf @@ -0,0 +1,316 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file was automatically generated from a template in ./autogen + +/****************************************** + Create Container Cluster + *****************************************/ +resource "google_container_cluster" "primary" { + provider = google + + name = var.name + description = var.description + project = var.project_id + resource_labels = var.cluster_resource_labels + + location = local.location + node_locations = local.node_locations + cluster_ipv4_cidr = var.cluster_ipv4_cidr + network = data.google_compute_network.gke_network.self_link + + dynamic "network_policy" { + for_each = local.cluster_network_policy + + content { + enabled = network_policy.value.enabled + provider = network_policy.value.provider + } + } + + + subnetwork = data.google_compute_subnetwork.gke_subnetwork.self_link + min_master_version = local.master_version + + logging_service = var.logging_service + monitoring_service = var.monitoring_service + + dynamic "master_authorized_networks_config" { + for_each = var.master_authorized_networks_config + content { + dynamic "cidr_blocks" { + for_each = master_authorized_networks_config.value.cidr_blocks + content { + cidr_block = lookup(cidr_blocks.value, "cidr_block", "") + display_name = lookup(cidr_blocks.value, "display_name", "") + } + } + } + } + + master_auth { + username = var.basic_auth_username + password = var.basic_auth_password + + client_certificate_config { + issue_client_certificate = var.issue_client_certificate + } + } + + addons_config { + http_load_balancing { + disabled = ! var.http_load_balancing + } + + horizontal_pod_autoscaling { + disabled = ! var.horizontal_pod_autoscaling + } + + kubernetes_dashboard { + disabled = ! var.kubernetes_dashboard + } + + network_policy_config { + disabled = ! var.network_policy + } + } + + ip_allocation_policy { + cluster_secondary_range_name = var.ip_range_pods + services_secondary_range_name = var.ip_range_services + } + + maintenance_policy { + daily_maintenance_window { + start_time = var.maintenance_start_time + } + } + + lifecycle { + ignore_changes = [node_pool, initial_node_count] + } + + timeouts { + create = "30m" + update = "30m" + delete = "30m" + } + + node_pool { + name = "default-pool" + initial_node_count = var.initial_node_count + + node_config { + service_account = lookup(var.node_pools[0], "service_account", local.service_account) + } + } + + private_cluster_config { + enable_private_endpoint = var.enable_private_endpoint + enable_private_nodes = var.enable_private_nodes + master_ipv4_cidr_block = var.master_ipv4_cidr_block + } + + remove_default_node_pool = var.remove_default_node_pool +} + +/****************************************** + Create Container Cluster node pools + *****************************************/ +locals { + force_node_pool_recreation_resources = [ + "disk_size_gb", + "disk_type", + "accelerator_count", + "accelerator_type", + "local_ssd_count", + "machine_type", + "preemptible", + "service_account", + ] +} + +# This keepers list is based on the terraform google provider schemaNodeConfig +# resources where "ForceNew" is "true". schemaNodeConfig can be found in node_config.go at +# https://github.com/terraform-providers/terraform-provider-google/blob/master/google/node_config.go#L22 +resource "random_id" "name" { + count = length(var.node_pools) + byte_length = 2 + prefix = format("%s-", lookup(var.node_pools[count.index], "name")) + keepers = merge( + zipmap( + local.force_node_pool_recreation_resources, + [for keeper in local.force_node_pool_recreation_resources : lookup(var.node_pools[count.index], keeper, "")] + ), + { + labels = join(",", + sort( + concat( + keys(var.node_pools_labels["all"]), + values(var.node_pools_labels["all"]), + keys(var.node_pools_labels[var.node_pools[count.index]["name"]]), + values(var.node_pools_labels[var.node_pools[count.index]["name"]]) + ) + ) + ) + }, + { + metadata = join(",", + sort( + concat( + keys(var.node_pools_metadata["all"]), + values(var.node_pools_metadata["all"]), + keys(var.node_pools_metadata[var.node_pools[count.index]["name"]]), + values(var.node_pools_metadata[var.node_pools[count.index]["name"]]) + ) + ) + ) + }, + { + oauth_scopes = join(",", + sort( + concat( + var.node_pools_oauth_scopes["all"], + var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]] + ) + ) + ) + }, + { + tags = join(",", + sort( + concat( + var.node_pools_tags["all"], + var.node_pools_tags[var.node_pools[count.index]["name"]] + ) + ) + ) + } + ) +} + +resource "google_container_node_pool" "pools" { + provider = google + count = length(var.node_pools) + name = random_id.name.*.hex[count.index] + project = var.project_id + location = local.location + cluster = google_container_cluster.primary.name + version = lookup(var.node_pools[count.index], "auto_upgrade", false) ? "" : lookup( + var.node_pools[count.index], + "version", + local.node_version, + ) + initial_node_count = lookup( + var.node_pools[count.index], + "initial_node_count", + lookup(var.node_pools[count.index], "min_count", 1), + ) + + node_count = lookup(var.node_pools[count.index], "autoscaling", true) ? null : lookup(var.node_pools[count.index], "min_count", 1) + + dynamic "autoscaling" { + for_each = lookup(var.node_pools[count.index], "autoscaling", true) ? [var.node_pools[count.index]] : [] + content { + min_node_count = lookup(autoscaling.value, "min_count", 1) + max_node_count = lookup(autoscaling.value, "max_count", 100) + } + } + + management { + auto_repair = lookup(var.node_pools[count.index], "auto_repair", true) + auto_upgrade = lookup(var.node_pools[count.index], "auto_upgrade", local.default_auto_upgrade) + } + + node_config { + image_type = lookup(var.node_pools[count.index], "image_type", "COS") + machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2") + labels = merge( + lookup(lookup(var.node_pools_labels, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(var.node_pools_labels, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index]["name"] } : {}, + var.node_pools_labels["all"], + var.node_pools_labels[var.node_pools[count.index]["name"]], + ) + metadata = merge( + lookup(lookup(var.node_pools_metadata, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(var.node_pools_metadata, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index]["name"] } : {}, + var.node_pools_metadata["all"], + var.node_pools_metadata[var.node_pools[count.index]["name"]], + { + "disable-legacy-endpoints" = var.disable_legacy_metadata_endpoints + }, + ) + tags = concat( + lookup(var.node_pools_tags, "default_values", [true, true])[0] ? ["gke-${var.name}"] : [], + lookup(var.node_pools_tags, "default_values", [true, true])[1] ? ["gke-${var.name}-${var.node_pools[count.index]["name"]}"] : [], + var.node_pools_tags["all"], + var.node_pools_tags[var.node_pools[count.index]["name"]], + ) + + disk_size_gb = lookup(var.node_pools[count.index], "disk_size_gb", 100) + disk_type = lookup(var.node_pools[count.index], "disk_type", "pd-standard") + service_account = lookup( + var.node_pools[count.index], + "service_account", + local.service_account, + ) + preemptible = lookup(var.node_pools[count.index], "preemptible", false) + + oauth_scopes = concat( + var.node_pools_oauth_scopes["all"], + var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]], + ) + + guest_accelerator = [ + for guest_accelerator in lookup(var.node_pools[count.index], "accelerator_count", 0) > 0 ? [{ + type = lookup(var.node_pools[count.index], "accelerator_type", "") + count = lookup(var.node_pools[count.index], "accelerator_count", 0) + }] : [] : { + type = guest_accelerator["type"] + count = guest_accelerator["count"] + } + ] + } + + lifecycle { + ignore_changes = [initial_node_count] + create_before_destroy = true + } + + timeouts { + create = "30m" + update = "30m" + delete = "30m" + } +} + +resource "null_resource" "wait_for_cluster" { + count = var.skip_provisioners ? 0 : 1 + + provisioner "local-exec" { + command = "${path.module}/scripts/wait-for-cluster.sh ${var.project_id} ${var.name}" + } + + provisioner "local-exec" { + when = destroy + command = "${path.module}/scripts/wait-for-cluster.sh ${var.project_id} ${var.name}" + } + + depends_on = [ + google_container_cluster.primary, + google_container_node_pool.pools, + ] +} diff --git a/modules/private-cluster-update-variant/dns.tf b/modules/private-cluster-update-variant/dns.tf new file mode 100644 index 0000000000..8a581ff68e --- /dev/null +++ b/modules/private-cluster-update-variant/dns.tf @@ -0,0 +1,120 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file was automatically generated from a template in ./autogen + +/****************************************** + Delete default kube-dns configmap + *****************************************/ +resource "null_resource" "delete_default_kube_dns_configmap" { + count = (local.custom_kube_dns_config || local.upstream_nameservers_config) && ! var.skip_provisioners ? 1 : 0 + + provisioner "local-exec" { + command = "${path.module}/scripts/kubectl_wrapper.sh https://${local.cluster_endpoint} ${data.google_client_config.default.access_token} ${local.cluster_ca_certificate} ${path.module}/scripts/delete-default-resource.sh kube-system configmap kube-dns" + } + + depends_on = [ + data.google_client_config.default, + google_container_cluster.primary, + google_container_node_pool.pools, + ] +} + +/****************************************** + Create kube-dns confimap + *****************************************/ +resource "kubernetes_config_map" "kube-dns" { + count = local.custom_kube_dns_config && ! local.upstream_nameservers_config ? 1 : 0 + + metadata { + name = "kube-dns" + namespace = "kube-system" + + labels = { + maintained_by = "terraform" + } + } + + data = { + stubDomains = < 0 + upstream_nameservers_config = length(var.upstream_nameservers) > 0 + network_project_id = var.network_project_id != "" ? var.network_project_id : var.project_id + zone_count = length(var.zones) + cluster_type = var.regional ? "regional" : "zonal" + // auto upgrade by defaults only for regional cluster as long it has multiple masters versus zonal clusters have only have a single master so upgrades are more dangerous. + default_auto_upgrade = var.regional ? true : false + + cluster_network_policy = var.network_policy ? [{ + enabled = true + provider = var.network_policy_provider + }] : [{ + enabled = false + provider = null + }] + + + cluster_output_name = google_container_cluster.primary.name + cluster_output_location = google_container_cluster.primary.location + cluster_output_region = google_container_cluster.primary.region + cluster_output_regional_zones = google_container_cluster.primary.node_locations + cluster_output_zonal_zones = local.zone_count > 1 ? slice(var.zones, 1, local.zone_count) : [] + cluster_output_zones = local.cluster_output_regional_zones + + cluster_output_endpoint = var.deploy_using_private_endpoint ? google_container_cluster.primary.private_cluster_config.0.private_endpoint : google_container_cluster.primary.endpoint + + cluster_output_master_auth = concat(google_container_cluster.primary.*.master_auth, []) + cluster_output_master_version = google_container_cluster.primary.master_version + cluster_output_min_master_version = google_container_cluster.primary.min_master_version + cluster_output_logging_service = google_container_cluster.primary.logging_service + cluster_output_monitoring_service = google_container_cluster.primary.monitoring_service + cluster_output_network_policy_enabled = google_container_cluster.primary.addons_config.0.network_policy_config.0.disabled + cluster_output_http_load_balancing_enabled = google_container_cluster.primary.addons_config.0.http_load_balancing.0.disabled + cluster_output_horizontal_pod_autoscaling_enabled = google_container_cluster.primary.addons_config.0.horizontal_pod_autoscaling.0.disabled + cluster_output_kubernetes_dashboard_enabled = google_container_cluster.primary.addons_config.0.kubernetes_dashboard.0.disabled + + + cluster_output_node_pools_names = concat(google_container_node_pool.pools.*.name, [""]) + cluster_output_node_pools_versions = concat(google_container_node_pool.pools.*.version, [""]) + + cluster_master_auth_list_layer1 = local.cluster_output_master_auth + cluster_master_auth_list_layer2 = local.cluster_master_auth_list_layer1[0] + cluster_master_auth_map = local.cluster_master_auth_list_layer2[0] + # cluster locals + cluster_name = local.cluster_output_name + cluster_location = local.cluster_output_location + cluster_region = local.cluster_output_region + cluster_zones = sort(local.cluster_output_zones) + cluster_endpoint = local.cluster_output_endpoint + cluster_ca_certificate = local.cluster_master_auth_map["cluster_ca_certificate"] + cluster_master_version = local.cluster_output_master_version + cluster_min_master_version = local.cluster_output_min_master_version + cluster_logging_service = local.cluster_output_logging_service + cluster_monitoring_service = local.cluster_output_monitoring_service + cluster_node_pools_names = local.cluster_output_node_pools_names + cluster_node_pools_versions = local.cluster_output_node_pools_versions + cluster_network_policy_enabled = ! local.cluster_output_network_policy_enabled + cluster_http_load_balancing_enabled = ! local.cluster_output_http_load_balancing_enabled + cluster_horizontal_pod_autoscaling_enabled = ! local.cluster_output_horizontal_pod_autoscaling_enabled + cluster_kubernetes_dashboard_enabled = ! local.cluster_output_kubernetes_dashboard_enabled +} + +/****************************************** + Get available container engine versions + *****************************************/ +data "google_container_engine_versions" "region" { + location = local.location + project = var.project_id +} + +data "google_container_engine_versions" "zone" { + // Work around to prevent a lack of zone declaration from causing regional cluster creation from erroring out due to error + // + // data.google_container_engine_versions.zone: Cannot determine zone: set in this resource, or set provider-level zone. + // + location = local.zone_count == 0 ? data.google_compute_zones.available.names[0] : var.zones[0] + project = var.project_id +} diff --git a/modules/private-cluster-update-variant/masq.tf b/modules/private-cluster-update-variant/masq.tf new file mode 100644 index 0000000000..b6e411fc42 --- /dev/null +++ b/modules/private-cluster-update-variant/masq.tf @@ -0,0 +1,48 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file was automatically generated from a template in ./autogen + +/****************************************** + Create ip-masq-agent confimap + *****************************************/ +resource "kubernetes_config_map" "ip-masq-agent" { + count = var.configure_ip_masq ? 1 : 0 + + metadata { + name = "ip-masq-agent" + namespace = "kube-system" + + labels = { + maintained_by = "terraform" + } + } + + data = { + config = <&2 echo "3 arguments expected. Exiting." + exit 1 +fi + +RESOURCE_NAMESPACE=$1 +RESOURCE_TYPE=$2 +RESOURCE_NAME=$3 + +RESOURCE_LIST=$(kubectl -n "${RESOURCE_NAMESPACE}" get "${RESOURCE_TYPE}" || exit 1) + +# Delete requested resource +if [[ $RESOURCE_LIST = *"${RESOURCE_NAME}"* ]]; then + RESOURCE_MAINTAINED_LABEL=$(kubectl -n "${RESOURCE_NAMESPACE}" get "${RESOURCE_TYPE}" -o json "${RESOURCE_NAME}" | jq -r '.metadata.labels."maintained_by"') + if [[ $RESOURCE_MAINTAINED_LABEL = "terraform" ]]; then + echo "Terraform maintained ${RESOURCE_NAME} ${RESOURCE_TYPE} appears to have already been created in ${RESOURCE_NAMESPACE} namespace" + else + echo "Deleting default ${RESOURCE_NAME} ${RESOURCE_TYPE} found in ${RESOURCE_NAMESPACE} namespace" + kubectl -n "${RESOURCE_NAMESPACE}" delete "${RESOURCE_TYPE}" "${RESOURCE_NAME}" + fi +else + echo "No default ${RESOURCE_NAME} ${RESOURCE_TYPE} found in ${RESOURCE_NAMESPACE} namespace" +fi diff --git a/modules/private-cluster-update-variant/scripts/kubectl_wrapper.sh b/modules/private-cluster-update-variant/scripts/kubectl_wrapper.sh new file mode 100755 index 0000000000..e92300bcb5 --- /dev/null +++ b/modules/private-cluster-update-variant/scripts/kubectl_wrapper.sh @@ -0,0 +1,53 @@ +#!/bin/bash +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +set -e + +if [ "$#" -lt 3 ]; then + >&2 echo "Not all expected arguments set." + exit 1 +fi + +HOST=$1 +TOKEN=$2 +CA_CERTIFICATE=$3 + +shift 3 + +RANDOM_ID="${RANDOM}_${RANDOM}" +export TMPDIR="/tmp/kubectl_wrapper_${RANDOM_ID}" + +function cleanup { + rm -rf "${TMPDIR}" +} +trap cleanup EXIT + +mkdir "${TMPDIR}" + +export KUBECONFIG="${TMPDIR}/config" + +# shellcheck disable=SC1117 +base64 --help | grep "\--decode" && B64_ARG="--decode" || B64_ARG="-d" +echo "${CA_CERTIFICATE}" | base64 ${B64_ARG} > "${TMPDIR}/ca_certificate" + +kubectl config set-cluster kubectl-wrapper --server="${HOST}" --certificate-authority="${TMPDIR}/ca_certificate" --embed-certs=true 1>/dev/null +rm -f "${TMPDIR}/ca_certificate" +kubectl config set-context kubectl-wrapper --cluster=kubectl-wrapper --user=kubectl-wrapper --namespace=default 1>/dev/null +kubectl config set-credentials kubectl-wrapper --token="${TOKEN}" 1>/dev/null +kubectl config use-context kubectl-wrapper 1>/dev/null +kubectl version 1>/dev/null + +"$@" diff --git a/modules/private-cluster-update-variant/scripts/wait-for-cluster.sh b/modules/private-cluster-update-variant/scripts/wait-for-cluster.sh new file mode 100755 index 0000000000..b7019eace1 --- /dev/null +++ b/modules/private-cluster-update-variant/scripts/wait-for-cluster.sh @@ -0,0 +1,38 @@ +#!/bin/bash +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e + +# shellcheck disable=SC2034 +if [ -n "${GOOGLE_APPLICATION_CREDENTIALS}" ]; then + export CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE="${GOOGLE_APPLICATION_CREDENTIALS}" +fi + +PROJECT=$1 +CLUSTER_NAME=$2 +gcloud_command="gcloud container clusters list --project=$PROJECT --format=json" +jq_query=".[] | select(.name==\"$CLUSTER_NAME\") | .status" + +echo "Waiting for cluster $2 in project $1 to reconcile..." + +current_status=$($gcloud_command | jq -r "$jq_query") + +while [[ "${current_status}" == "RECONCILING" ]]; do + printf "." + sleep 5 + current_status=$($gcloud_command | jq -r "$jq_query") +done + +echo "Cluster is ready!" diff --git a/modules/private-cluster-update-variant/variables.tf b/modules/private-cluster-update-variant/variables.tf new file mode 100644 index 0000000000..508a4f1b96 --- /dev/null +++ b/modules/private-cluster-update-variant/variables.tf @@ -0,0 +1,330 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file was automatically generated from a template in ./autogen + +variable "project_id" { + type = string + description = "The project ID to host the cluster in (required)" +} + +variable "name" { + type = string + description = "The name of the cluster (required)" +} + +variable "description" { + type = string + description = "The description of the cluster" + default = "" +} + +variable "regional" { + type = bool + description = "Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!)" + default = true +} + +variable "region" { + type = string + description = "The region to host the cluster in (optional if zonal cluster / required if regional)" + default = null +} + +variable "zones" { + type = list(string) + description = "The zones to host the cluster in (optional if regional cluster / required if zonal)" + default = [] +} + +variable "network" { + type = string + description = "The VPC network to host the cluster in (required)" +} + +variable "network_project_id" { + type = string + description = "The project ID of the shared VPC's host (for shared vpc support)" + default = "" +} + +variable "subnetwork" { + type = string + description = "The subnetwork to host the cluster in (required)" +} + +variable "kubernetes_version" { + type = string + description = "The Kubernetes version of the masters. If set to 'latest' it will pull latest available version in the selected region." + default = "latest" +} + +variable "node_version" { + type = string + description = "The Kubernetes version of the node pools. Defaults kubernetes_version (master) variable and can be overridden for individual node pools by setting the `version` key on them. Must be empyty or set the same as master at cluster creation." + default = "" +} + +variable "master_authorized_networks_config" { + type = list(object({ cidr_blocks = list(object({ cidr_block = string, display_name = string })) })) + description = "The desired configuration options for master authorized networks. The object format is {cidr_blocks = list(object({cidr_block = string, display_name = string}))}. Omit the nested cidr_blocks attribute to disallow external access (except the cluster node IPs, which GKE automatically whitelists)." + default = [] +} + +variable "horizontal_pod_autoscaling" { + type = bool + description = "Enable horizontal pod autoscaling addon" + default = true +} + +variable "http_load_balancing" { + type = bool + description = "Enable httpload balancer addon" + default = true +} + +variable "kubernetes_dashboard" { + type = bool + description = "Enable kubernetes dashboard addon" + default = false +} + +variable "network_policy" { + type = bool + description = "Enable network policy addon" + default = false +} + +variable "network_policy_provider" { + type = string + description = "The network policy provider." + default = "CALICO" +} + +variable "maintenance_start_time" { + type = string + description = "Time window specified for daily maintenance operations in RFC3339 format" + default = "05:00" +} + +variable "ip_range_pods" { + type = string + description = "The _name_ of the secondary subnet ip range to use for pods" +} + +variable "ip_range_services" { + type = string + description = "The _name_ of the secondary subnet range to use for services" +} + +variable "initial_node_count" { + type = number + description = "The number of nodes to create in this cluster's default node pool." + default = 0 +} + +variable "remove_default_node_pool" { + type = bool + description = "Remove default node pool while setting up the cluster" + default = false +} + +variable "disable_legacy_metadata_endpoints" { + type = bool + description = "Disable the /0.1/ and /v1beta1/ metadata server endpoints on the node. Changing this value will cause all node pools to be recreated." + default = true +} + +variable "node_pools" { + type = list(map(string)) + description = "List of maps containing node pools" + + default = [ + { + name = "default-node-pool" + }, + ] +} + +variable "node_pools_labels" { + type = map(map(string)) + description = "Map of maps containing node labels by node-pool name" + + default = { + all = {} + default-node-pool = {} + } +} + +variable "node_pools_metadata" { + type = map(map(string)) + description = "Map of maps containing node metadata by node-pool name" + + default = { + all = {} + default-node-pool = {} + } +} + +variable "node_pools_tags" { + type = map(list(string)) + description = "Map of lists containing node network tags by node-pool name" + + default = { + all = [] + default-node-pool = [] + } +} + +variable "node_pools_oauth_scopes" { + type = map(list(string)) + description = "Map of lists containing node oauth scopes by node-pool name" + + default = { + all = ["https://www.googleapis.com/auth/cloud-platform"] + default-node-pool = [] + } +} + +variable "stub_domains" { + type = map(list(string)) + description = "Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server" + default = {} +} + +variable "upstream_nameservers" { + type = "list" + description = "If specified, the values replace the nameservers taken by default from the node’s /etc/resolv.conf" + default = [] +} + +variable "non_masquerade_cidrs" { + type = list(string) + description = "List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading." + default = ["10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16"] +} + +variable "ip_masq_resync_interval" { + type = string + description = "The interval at which the agent attempts to sync its ConfigMap file from the disk." + default = "60s" +} + +variable "ip_masq_link_local" { + type = bool + description = "Whether to masquerade traffic to the link-local prefix (169.254.0.0/16)." + default = false +} + +variable "configure_ip_masq" { + description = "Enables the installation of ip masquerading, which is usually no longer required when using aliasied IP addresses. IP masquerading uses a kubectl call, so when you have a private cluster, you will need access to the API server." + default = false +} + +variable "logging_service" { + type = string + description = "The logging service that the cluster should write logs to. Available options include logging.googleapis.com, logging.googleapis.com/kubernetes (beta), and none" + default = "logging.googleapis.com" +} + +variable "monitoring_service" { + type = string + description = "The monitoring service that the cluster should write metrics to. Automatically send metrics from pods in the cluster to the Google Cloud Monitoring API. VM metrics will be collected by Google Compute Engine regardless of this setting Available options include monitoring.googleapis.com, monitoring.googleapis.com/kubernetes (beta) and none" + default = "monitoring.googleapis.com" +} + +variable "create_service_account" { + type = bool + description = "Defines if service account specified to run nodes should be created." + default = true +} + +variable "grant_registry_access" { + type = bool + description = "Grants created cluster-specific service account storage.objectViewer role." + default = false +} + +variable "registry_project_id" { + type = string + description = "Project holding the Google Container Registry. If empty, we use the cluster project. If grant_registry_access is true, storage.objectViewer role is assigned on this project." + default = "" +} + +variable "service_account" { + type = string + description = "The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created." + default = "" +} + +variable "basic_auth_username" { + type = string + description = "The username to be used with Basic Authentication. An empty value will disable Basic Authentication, which is the recommended configuration." + default = "" +} + +variable "basic_auth_password" { + type = string + description = "The password to be used with Basic Authentication." + default = "" +} + +variable "issue_client_certificate" { + type = bool + description = "Issues a client certificate to authenticate to the cluster endpoint. To maximize the security of your cluster, leave this option disabled. Client certificates don't automatically rotate and aren't easily revocable. WARNING: changing this after cluster creation is destructive!" + default = false +} + +variable "cluster_ipv4_cidr" { + default = "" + description = "The IP address range of the kubernetes pods in this cluster. Default is an automatically assigned CIDR." +} + +variable "cluster_resource_labels" { + type = map(string) + description = "The GCE resource labels (a map of key/value pairs) to be applied to the cluster" + default = {} +} + +variable "skip_provisioners" { + type = bool + description = "Flag to skip all local-exec provisioners. It breaks `stub_domains` and `upstream_nameservers` variables functionality." + default = false +} + +variable "deploy_using_private_endpoint" { + type = bool + description = "(Beta) A toggle for Terraform and kubectl to connect to the master's internal IP address during deployment." + default = false +} + +variable "enable_private_endpoint" { + type = bool + description = "(Beta) Whether the master's internal IP address is used as the cluster endpoint" + default = false +} + +variable "enable_private_nodes" { + type = bool + description = "(Beta) Whether nodes have internal IP addresses only" + default = false +} + +variable "master_ipv4_cidr_block" { + type = string + description = "(Beta) The IP range in CIDR notation to use for the hosted master network" + default = "10.0.0.0/28" +} diff --git a/modules/private-cluster-update-variant/versions.tf b/modules/private-cluster-update-variant/versions.tf new file mode 100644 index 0000000000..832ec1df39 --- /dev/null +++ b/modules/private-cluster-update-variant/versions.tf @@ -0,0 +1,19 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +terraform { + required_version = ">= 0.12" +} diff --git a/modules/private-cluster/README.md b/modules/private-cluster/README.md index c29d58ee93..5465544b82 100644 --- a/modules/private-cluster/README.md +++ b/modules/private-cluster/README.md @@ -174,10 +174,12 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | node\_version | The Kubernetes version of the node pools. Defaults kubernetes_version (master) variable and can be overridden for individual node pools by setting the `version` key on them. Must be empyty or set the same as master at cluster creation. | string | `""` | no | | non\_masquerade\_cidrs | List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading. | list(string) | `` | no | | project\_id | The project ID to host the cluster in (required) | string | n/a | yes | -| region | The region to host the cluster in (required) | string | n/a | yes | +| region | The region to host the cluster in (optional if zonal cluster / required if regional) | string | `"null"` | no | | regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | +| registry\_project\_id | Project holding the Google Container Registry. If empty, we use the cluster project. If grant_registry_access is true, storage.objectViewer role is assigned on this project. | string | `""` | no | | remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | | service\_account | The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created. | string | `""` | no | +| skip\_provisioners | Flag to skip all local-exec provisioners. It breaks `stub_domains` and `upstream_nameservers` variables functionality. | bool | `"false"` | no | | stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | map(list(string)) | `` | no | | subnetwork | The subnetwork to host the cluster in (required) | string | n/a | yes | | upstream\_nameservers | If specified, the values replace the nameservers taken by default from the node’s /etc/resolv.conf | list | `` | no | @@ -237,6 +239,9 @@ following project roles: - roles/iam.serviceAccountUser - roles/resourcemanager.projectIamAdmin (only required if `service_account` is set to `create`) +Additionally, if `service_account` is set to `create` and `grant_registry_access` is requested, the service account requires the following role on the `registry_project_id` project: +- roles/resourcemanager.projectIamAdmin + ### Enable APIs In order to operate with the Service Account you must activate the following APIs on the project where the Service Account was created: @@ -257,141 +262,6 @@ The project has the following folders and files: - /README.MD: This file. - /modules: Private and beta sub modules. -## Templating - -To more cleanly handle cases where desired functionality would require complex duplication of Terraform resources (i.e. [PR 51](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/51)), this repository is largely generated from the [`autogen`](/autogen) directory. - -The root module is generated by running `make generate`. Changes to this repository should be made in the [`autogen`](/autogen) directory where appropriate. - -Note: The correct sequence to update the repo using autogen functionality is to run -`make generate && make generate_docs`. This will create the various Terraform files, and then -generate the Terraform documentation using `terraform-docs`. - -## Testing - -### Requirements -- [bundler](https://github.com/bundler/bundler) -- [gcloud](https://cloud.google.com/sdk/install) -- [terraform-docs](https://github.com/segmentio/terraform-docs/releases) 0.6.0 - -### Autogeneration of documentation from .tf files -Run -``` -make generate_docs -``` - -### Integration test - -Integration tests are run though [test-kitchen](https://github.com/test-kitchen/test-kitchen), [kitchen-terraform](https://github.com/newcontext-oss/kitchen-terraform), and [InSpec](https://github.com/inspec/inspec). - -Six test-kitchen instances are defined: - -- `deploy-service` -- `node-pool` -- `shared-vpc` -- `simple-regional` -- `simple-zonal` -- `stub-domains` - -The test-kitchen instances in `test/fixtures/` wrap identically-named examples in the `examples/` directory. - -#### Setup - -1. Configure the [test fixtures](#test-configuration) -2. Download a Service Account key with the necessary permissions and put it in the module's root directory with the name `credentials.json`. - - Requires the [permissions to run the module](#configure-a-service-account) - - Requires `roles/compute.networkAdmin` to create the test suite's networks - - Requires `roles/resourcemanager.projectIamAdmin` since service account creation is tested -3. Build the Docker container for testing: - - ``` - make docker_build_kitchen_terraform - ``` -4. Run the testing container in interactive mode: - - ``` - make docker_run - ``` - - The module root directory will be loaded into the Docker container at `/cft/workdir/`. -5. Run kitchen-terraform to test the infrastructure: - - 1. `kitchen create` creates Terraform state and downloads modules, if applicable. - 2. `kitchen converge` creates the underlying resources. Run `kitchen converge ` to create resources for a specific test case. - 3. Run `kitchen converge` again. This is necessary due to an oddity in how `networkPolicyConfig` is handled by the upstream API. (See [#72](https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/issues/72) for details). - 4. `kitchen verify` tests the created infrastructure. Run `kitchen verify ` to run a specific test case. - 5. `kitchen destroy` tears down the underlying resources created by `kitchen converge`. Run `kitchen destroy ` to tear down resources for a specific test case. - -Alternatively, you can simply run `make test_integration_docker` to run all the test steps non-interactively. - -If you wish to parallelize running the test suites, it is also possible to offload the work onto Concourse to run each test suite for you using the command `make test_integration_concourse`. The `.concourse` directory will be created and contain all of the logs from the running test suites. - -When running tests locally, you will need to use your own test project environment. You can configure your environment by setting all of the following variables: - -``` -export COMPUTE_ENGINE_SERVICE_ACCOUNT="" -export PROJECT_ID="" -export REGION="" -export ZONES='[""]' -export SERVICE_ACCOUNT_JSON="$(cat "")" -export CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE="" -export GOOGLE_APPLICATION_CREDENTIALS="" -``` - -#### Test configuration - -Each test-kitchen instance is configured with a `variables.tfvars` file in the test fixture directory, e.g. `test/fixtures/node_pool/terraform.tfvars`. -For convenience, since all of the variables are project-specific, these files have been symlinked to `test/fixtures/shared/terraform.tfvars`. -Similarly, each test fixture has a `variables.tf` to define these variables, and an `outputs.tf` to facilitate providing necessary information for `inspec` to locate and query against created resources. - -Each test-kitchen instance creates a GCP Network and Subnetwork fixture to house resources, and may create any other necessary fixture data as needed. - -### Autogeneration of documentation from .tf files -Run -``` -make generate_docs -``` - -### Linting -The makefile in this project will lint or sometimes just format any shell, -Python, golang, Terraform, or Dockerfiles. The linters will only be run if -the makefile finds files with the appropriate file extension. - -All of the linter checks are in the default make target, so you just have to -run - -``` -make -s -``` - -The -s is for 'silent'. Successful output looks like this - -``` -Running shellcheck -Running flake8 -Running go fmt and go vet -Running terraform validate -Running hadolint on Dockerfiles -Checking for required files -Testing the validity of the header check -.. ----------------------------------------------------------------------- -Ran 2 tests in 0.026s - -OK -Checking file headers -The following lines have trailing whitespace -``` - -The linters -are as follows: -* Shell - shellcheck. Can be found in homebrew -* Python - flake8. Can be installed with 'pip install flake8' -* Golang - gofmt. gofmt comes with the standard golang installation. golang -is a compiled language so there is no standard linter. -* Terraform - terraform has a built-in linter in the 'terraform validate' -command. -* Dockerfiles - hadolint. Can be found in homebrew [upgrading-to-v2.0]: ../../docs/upgrading_to_v2.0.md [upgrading-to-v3.0]: ../../docs/upgrading_to_v3.0.md diff --git a/modules/private-cluster/cluster.tf b/modules/private-cluster/cluster.tf index a5bb5da979..3c42e64325 100644 --- a/modules/private-cluster/cluster.tf +++ b/modules/private-cluster/cluster.tf @@ -41,6 +41,7 @@ resource "google_container_cluster" "primary" { } } + subnetwork = data.google_compute_subnetwork.gke_subnetwork.self_link min_master_version = local.master_version @@ -99,7 +100,7 @@ resource "google_container_cluster" "primary" { } lifecycle { - ignore_changes = [node_pool] + ignore_changes = [node_pool, initial_node_count] } timeouts { @@ -166,22 +167,14 @@ resource "google_container_node_pool" "pools" { image_type = lookup(var.node_pools[count.index], "image_type", "COS") machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2") labels = merge( - { - "cluster_name" = var.name - }, - { - "node_pool" = var.node_pools[count.index]["name"] - }, + lookup(lookup(var.node_pools_labels, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(var.node_pools_labels, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index]["name"] } : {}, var.node_pools_labels["all"], var.node_pools_labels[var.node_pools[count.index]["name"]], ) metadata = merge( - { - "cluster_name" = var.name - }, - { - "node_pool" = var.node_pools[count.index]["name"] - }, + lookup(lookup(var.node_pools_metadata, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(var.node_pools_metadata, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index]["name"] } : {}, var.node_pools_metadata["all"], var.node_pools_metadata[var.node_pools[count.index]["name"]], { @@ -189,8 +182,8 @@ resource "google_container_node_pool" "pools" { }, ) tags = concat( - ["gke-${var.name}"], - ["gke-${var.name}-${var.node_pools[count.index]["name"]}"], + lookup(var.node_pools_tags, "default_values", [true, true])[0] ? ["gke-${var.name}"] : [], + lookup(var.node_pools_tags, "default_values", [true, true])[1] ? ["gke-${var.name}-${var.node_pools[count.index]["name"]}"] : [], var.node_pools_tags["all"], var.node_pools_tags[var.node_pools[count.index]["name"]], ) @@ -232,6 +225,7 @@ resource "google_container_node_pool" "pools" { } resource "null_resource" "wait_for_cluster" { + count = var.skip_provisioners ? 0 : 1 provisioner "local-exec" { command = "${path.module}/scripts/wait-for-cluster.sh ${var.project_id} ${var.name}" diff --git a/modules/private-cluster/dns.tf b/modules/private-cluster/dns.tf index b240a23e65..8a581ff68e 100644 --- a/modules/private-cluster/dns.tf +++ b/modules/private-cluster/dns.tf @@ -20,7 +20,7 @@ Delete default kube-dns configmap *****************************************/ resource "null_resource" "delete_default_kube_dns_configmap" { - count = local.custom_kube_dns_config || local.upstream_nameservers_config ? 1 : 0 + count = (local.custom_kube_dns_config || local.upstream_nameservers_config) && ! var.skip_provisioners ? 1 : 0 provisioner "local-exec" { command = "${path.module}/scripts/kubectl_wrapper.sh https://${local.cluster_endpoint} ${data.google_client_config.default.access_token} ${local.cluster_ca_certificate} ${path.module}/scripts/delete-default-resource.sh kube-system configmap kube-dns" diff --git a/modules/private-cluster/main.tf b/modules/private-cluster/main.tf index bfe746401c..aba5e2d79f 100644 --- a/modules/private-cluster/main.tf +++ b/modules/private-cluster/main.tf @@ -23,7 +23,7 @@ data "google_compute_zones" "available" { provider = google project = var.project_id - region = var.region + region = local.region } resource "random_shuffle" "available_zones" { @@ -34,6 +34,7 @@ resource "random_shuffle" "available_zones" { locals { // location location = var.regional ? var.region : var.zones[0] + region = var.region == null ? join("-", slice(split("-", var.zones[0]), 0, 2)) : var.region // for regional cluster - use var.zones if provided, use available otherwise, for zonal cluster use var.zones with first element extracted node_locations = var.regional ? coalescelist(compact(var.zones), sort(random_shuffle.available_zones.result)) : slice(var.zones, 1, length(var.zones)) // kuberentes version @@ -44,6 +45,7 @@ locals { master_version = var.regional ? local.master_version_regional : local.master_version_zonal node_version = var.regional ? local.node_version_regional : local.node_version_zonal + custom_kube_dns_config = length(keys(var.stub_domains)) > 0 upstream_nameservers_config = length(var.upstream_nameservers) > 0 network_project_id = var.network_project_id != "" ? var.network_project_id : var.project_id diff --git a/modules/private-cluster/networks.tf b/modules/private-cluster/networks.tf index a382073dc0..aae034eee5 100644 --- a/modules/private-cluster/networks.tf +++ b/modules/private-cluster/networks.tf @@ -27,6 +27,6 @@ data "google_compute_subnetwork" "gke_subnetwork" { provider = google name = var.subnetwork - region = var.region + region = local.region project = local.network_project_id } diff --git a/modules/private-cluster/sa.tf b/modules/private-cluster/sa.tf index 9e063fcc22..c7f34e4fbb 100644 --- a/modules/private-cluster/sa.tf +++ b/modules/private-cluster/sa.tf @@ -64,7 +64,7 @@ resource "google_project_iam_member" "cluster_service_account-monitoring_viewer" resource "google_project_iam_member" "cluster_service_account-gcr" { count = var.create_service_account && var.grant_registry_access ? 1 : 0 - project = var.project_id + project = var.registry_project_id == "" ? var.project_id : var.registry_project_id role = "roles/storage.objectViewer" member = "serviceAccount:${google_service_account.cluster_service_account[0].email}" } diff --git a/modules/private-cluster/scripts/wait-for-cluster.sh b/modules/private-cluster/scripts/wait-for-cluster.sh index 6ff3253d58..b7019eace1 100755 --- a/modules/private-cluster/scripts/wait-for-cluster.sh +++ b/modules/private-cluster/scripts/wait-for-cluster.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,6 +15,11 @@ set -e +# shellcheck disable=SC2034 +if [ -n "${GOOGLE_APPLICATION_CREDENTIALS}" ]; then + export CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE="${GOOGLE_APPLICATION_CREDENTIALS}" +fi + PROJECT=$1 CLUSTER_NAME=$2 gcloud_command="gcloud container clusters list --project=$PROJECT --format=json" diff --git a/modules/private-cluster/variables.tf b/modules/private-cluster/variables.tf index 8008e08975..508a4f1b96 100644 --- a/modules/private-cluster/variables.tf +++ b/modules/private-cluster/variables.tf @@ -40,7 +40,8 @@ variable "regional" { variable "region" { type = string - description = "The region to host the cluster in (required)" + description = "The region to host the cluster in (optional if zonal cluster / required if regional)" + default = null } variable "zones" { @@ -257,6 +258,12 @@ variable "grant_registry_access" { default = false } +variable "registry_project_id" { + type = string + description = "Project holding the Google Container Registry. If empty, we use the cluster project. If grant_registry_access is true, storage.objectViewer role is assigned on this project." + default = "" +} + variable "service_account" { type = string description = "The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created." @@ -292,6 +299,11 @@ variable "cluster_resource_labels" { default = {} } +variable "skip_provisioners" { + type = bool + description = "Flag to skip all local-exec provisioners. It breaks `stub_domains` and `upstream_nameservers` variables functionality." + default = false +} variable "deploy_using_private_endpoint" { type = bool diff --git a/networks.tf b/networks.tf index a382073dc0..aae034eee5 100644 --- a/networks.tf +++ b/networks.tf @@ -27,6 +27,6 @@ data "google_compute_subnetwork" "gke_subnetwork" { provider = google name = var.subnetwork - region = var.region + region = local.region project = local.network_project_id } diff --git a/sa.tf b/sa.tf index 9e063fcc22..c7f34e4fbb 100644 --- a/sa.tf +++ b/sa.tf @@ -64,7 +64,7 @@ resource "google_project_iam_member" "cluster_service_account-monitoring_viewer" resource "google_project_iam_member" "cluster_service_account-gcr" { count = var.create_service_account && var.grant_registry_access ? 1 : 0 - project = var.project_id + project = var.registry_project_id == "" ? var.project_id : var.registry_project_id role = "roles/storage.objectViewer" member = "serviceAccount:${google_service_account.cluster_service_account[0].email}" } diff --git a/scripts/wait-for-cluster.sh b/scripts/wait-for-cluster.sh index 6ff3253d58..b7019eace1 100755 --- a/scripts/wait-for-cluster.sh +++ b/scripts/wait-for-cluster.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,6 +15,11 @@ set -e +# shellcheck disable=SC2034 +if [ -n "${GOOGLE_APPLICATION_CREDENTIALS}" ]; then + export CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE="${GOOGLE_APPLICATION_CREDENTIALS}" +fi + PROJECT=$1 CLUSTER_NAME=$2 gcloud_command="gcloud container clusters list --project=$PROJECT --format=json" diff --git a/test/.gitignore b/test/.gitignore new file mode 100644 index 0000000000..d69ba0d42f --- /dev/null +++ b/test/.gitignore @@ -0,0 +1 @@ +source.sh diff --git a/test/boilerplate/boilerplate.Makefile.txt b/test/boilerplate/boilerplate.Makefile.txt deleted file mode 100644 index b0c7da3d77..0000000000 --- a/test/boilerplate/boilerplate.Makefile.txt +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/test/boilerplate/boilerplate.go.txt b/test/boilerplate/boilerplate.go.txt deleted file mode 100644 index 557e16f064..0000000000 --- a/test/boilerplate/boilerplate.go.txt +++ /dev/null @@ -1,15 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ diff --git a/test/boilerplate/boilerplate.py.txt b/test/boilerplate/boilerplate.py.txt deleted file mode 100644 index b0c7da3d77..0000000000 --- a/test/boilerplate/boilerplate.py.txt +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/test/boilerplate/boilerplate.xml.txt b/test/boilerplate/boilerplate.xml.txt deleted file mode 100644 index 3d98cdc6e5..0000000000 --- a/test/boilerplate/boilerplate.xml.txt +++ /dev/null @@ -1,15 +0,0 @@ - diff --git a/test/boilerplate/boilerplate.yaml.txt b/test/boilerplate/boilerplate.yaml.txt deleted file mode 100644 index b0c7da3d77..0000000000 --- a/test/boilerplate/boilerplate.yaml.txt +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/test/ci/simple-regional-with-networking.yml b/test/ci/simple-regional-with-networking.yml new file mode 100644 index 0000000000..68ba8c38cd --- /dev/null +++ b/test/ci/simple-regional-with-networking.yml @@ -0,0 +1,18 @@ +--- + +platform: linux + +inputs: +- name: pull-request + path: terraform-google-kubernetes-engine + +run: + path: make + args: ['test_integration'] + dir: terraform-google-kubernetes-engine + +params: + SUITE: "simple-regional-with-networking-local" + COMPUTE_ENGINE_SERVICE_ACCOUNT: "" + REGION: "us-east4" + ZONES: '["us-east4-a", "us-east4-b", "us-east4-c"]' diff --git a/test/ci/workload-metadata-config.yml b/test/ci/workload-metadata-config.yml index 23874671db..231c8dfc3a 100644 --- a/test/ci/workload-metadata-config.yml +++ b/test/ci/workload-metadata-config.yml @@ -15,4 +15,5 @@ params: SUITE: "workload-metadata-config-local" COMPUTE_ENGINE_SERVICE_ACCOUNT: "" REGION: "us-east4" - ZONES: '["us-east4-a", "us-east4-b", "us-east4-c"]' \ No newline at end of file + ZONES: '["us-east4-a", "us-east4-b", "us-east4-c"]' + diff --git a/test/ci_integration.sh b/test/ci_integration.sh deleted file mode 100755 index 365ed3862e..0000000000 --- a/test/ci_integration.sh +++ /dev/null @@ -1,70 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Always clean up. -DELETE_AT_EXIT="$(mktemp -d)" -finish() { - echo 'BEGIN: finish() trap handler' >&2 - kitchen destroy "$SUITE" - [[ -d "${DELETE_AT_EXIT}" ]] && rm -rf "${DELETE_AT_EXIT}" - echo 'END: finish() trap handler' >&2 -} - -# Map the input parameters provided by Concourse CI, or whatever mechanism is -# running the tests to Terraform input variables. Also setup credentials for -# use with kitchen-terraform, inspec, and gcloud. -setup_environment() { - local tmpfile - tmpfile="$(mktemp)" - echo "${SERVICE_ACCOUNT_JSON}" > "${tmpfile}" - - echo "${SERVICE_ACCOUNT_JSON}" > "test/fixtures/shared/credentials.json" - - # gcloud variables - export CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE="${tmpfile}" - # Application default credentials (Terraform google provider and inspec-gcp) - export GOOGLE_APPLICATION_CREDENTIALS="${tmpfile}" - - # Terraform variables - export TF_VAR_project_id="$PROJECT_ID" - export TF_VAR_credentials_path_relative="../shared/credentials.json" - export TF_VAR_region="$REGION" - export TF_VAR_zones="$ZONES" - export TF_VAR_compute_engine_service_account="$COMPUTE_ENGINE_SERVICE_ACCOUNT" -} - -main() { - export SUITE="${SUITE:-}" - - set -eu - # Setup trap handler to auto-cleanup - export TMPDIR="${DELETE_AT_EXIT}" - trap finish EXIT - - # Setup environment variables - setup_environment - set -x - - # Execute the test lifecycle - kitchen create "$SUITE" - kitchen converge "$SUITE" - kitchen verify "$SUITE" -} - -# if script is being executed and not sourced. -if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then - main "$@" -fi diff --git a/test/fixtures/deploy_service/terraform.tfvars b/test/fixtures/deploy_service/terraform.tfvars deleted file mode 120000 index 08ac6f4724..0000000000 --- a/test/fixtures/deploy_service/terraform.tfvars +++ /dev/null @@ -1 +0,0 @@ -../shared/terraform.tfvars \ No newline at end of file diff --git a/test/fixtures/disable_client_cert/terraform.tfvars b/test/fixtures/disable_client_cert/terraform.tfvars deleted file mode 120000 index 08ac6f4724..0000000000 --- a/test/fixtures/disable_client_cert/terraform.tfvars +++ /dev/null @@ -1 +0,0 @@ -../shared/terraform.tfvars \ No newline at end of file diff --git a/test/fixtures/node_pool/terraform.tfvars b/test/fixtures/node_pool/terraform.tfvars deleted file mode 120000 index 08ac6f4724..0000000000 --- a/test/fixtures/node_pool/terraform.tfvars +++ /dev/null @@ -1 +0,0 @@ -../shared/terraform.tfvars \ No newline at end of file diff --git a/test/fixtures/node_pool_update_variant/example.tf b/test/fixtures/node_pool_update_variant/example.tf new file mode 100644 index 0000000000..c3a21df3d5 --- /dev/null +++ b/test/fixtures/node_pool_update_variant/example.tf @@ -0,0 +1,29 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +module "example" { + source = "../../../examples/node_pool_update_variant" + + project_id = var.project_id + cluster_name_suffix = "-${random_string.suffix.result}" + region = var.region + zones = slice(var.zones, 0, 1) + network = google_compute_network.main.name + subnetwork = google_compute_subnetwork.main.name + ip_range_pods = google_compute_subnetwork.main.secondary_ip_range[0].range_name + ip_range_services = google_compute_subnetwork.main.secondary_ip_range[1].range_name + compute_engine_service_account = var.compute_engine_service_account +} diff --git a/test/fixtures/node_pool_update_variant/network.tf b/test/fixtures/node_pool_update_variant/network.tf new file mode 100644 index 0000000000..e1292eae3b --- /dev/null +++ b/test/fixtures/node_pool_update_variant/network.tf @@ -0,0 +1,48 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +resource "random_string" "suffix" { + length = 4 + special = false + upper = false +} + +provider "google" { + project = var.project_id +} + +resource "google_compute_network" "main" { + name = "cft-gke-test-${random_string.suffix.result}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "main" { + name = "cft-gke-test-${random_string.suffix.result}" + ip_cidr_range = "10.0.0.0/17" + region = var.region + network = google_compute_network.main.self_link + + secondary_ip_range { + range_name = "cft-gke-test-pods-${random_string.suffix.result}" + ip_cidr_range = "192.168.0.0/18" + } + + secondary_ip_range { + range_name = "cft-gke-test-services-${random_string.suffix.result}" + ip_cidr_range = "192.168.64.0/18" + } +} + diff --git a/test/fixtures/node_pool_update_variant/outputs.tf b/test/fixtures/node_pool_update_variant/outputs.tf new file mode 120000 index 0000000000..726bdc722f --- /dev/null +++ b/test/fixtures/node_pool_update_variant/outputs.tf @@ -0,0 +1 @@ +../shared/outputs.tf \ No newline at end of file diff --git a/test/fixtures/node_pool_update_variant/variables.tf b/test/fixtures/node_pool_update_variant/variables.tf new file mode 120000 index 0000000000..c113c00a3d --- /dev/null +++ b/test/fixtures/node_pool_update_variant/variables.tf @@ -0,0 +1 @@ +../shared/variables.tf \ No newline at end of file diff --git a/test/fixtures/sandbox_enabled/example.tf b/test/fixtures/sandbox_enabled/example.tf new file mode 100644 index 0000000000..05b7edfd9e --- /dev/null +++ b/test/fixtures/sandbox_enabled/example.tf @@ -0,0 +1,40 @@ +/** + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +module "example" { + source = "../../../examples/simple_regional_beta" + + project_id = var.project_id + cluster_name_suffix = "-${random_string.suffix.result}" + region = var.region + network = google_compute_network.main.name + subnetwork = google_compute_subnetwork.main.name + ip_range_pods = google_compute_subnetwork.main.secondary_ip_range[0].range_name + ip_range_services = google_compute_subnetwork.main.secondary_ip_range[1].range_name + compute_engine_service_account = var.compute_engine_service_account + istio = false + cloudrun = false + node_metadata = "UNSPECIFIED" + sandbox_enabled = true + remove_default_node_pool = true + + node_pools = [ + { + name = "default-node-pool" + image_type = "COS_CONTAINERD" + }, + ] +} diff --git a/test/fixtures/sandbox_enabled/network.tf b/test/fixtures/sandbox_enabled/network.tf new file mode 100644 index 0000000000..5d34d43748 --- /dev/null +++ b/test/fixtures/sandbox_enabled/network.tf @@ -0,0 +1,48 @@ +/** + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +resource "random_string" "suffix" { + length = 4 + special = false + upper = false +} + +provider "google" { + project = var.project_id +} + +resource "google_compute_network" "main" { + name = "cft-gke-test-${random_string.suffix.result}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "main" { + name = "cft-gke-test-${random_string.suffix.result}" + ip_cidr_range = "10.0.0.0/17" + region = var.region + network = google_compute_network.main.self_link + + secondary_ip_range { + range_name = "cft-gke-test-pods-${random_string.suffix.result}" + ip_cidr_range = "192.168.0.0/18" + } + + secondary_ip_range { + range_name = "cft-gke-test-services-${random_string.suffix.result}" + ip_cidr_range = "192.168.64.0/18" + } +} + diff --git a/test/fixtures/sandbox_enabled/outputs.tf b/test/fixtures/sandbox_enabled/outputs.tf new file mode 120000 index 0000000000..726bdc722f --- /dev/null +++ b/test/fixtures/sandbox_enabled/outputs.tf @@ -0,0 +1 @@ +../shared/outputs.tf \ No newline at end of file diff --git a/test/fixtures/sandbox_enabled/variables.tf b/test/fixtures/sandbox_enabled/variables.tf new file mode 120000 index 0000000000..c113c00a3d --- /dev/null +++ b/test/fixtures/sandbox_enabled/variables.tf @@ -0,0 +1 @@ +../shared/variables.tf \ No newline at end of file diff --git a/test/fixtures/shared/outputs.tf b/test/fixtures/shared/outputs.tf index 1c2eb5e9ba..71b4b250de 100644 --- a/test/fixtures/shared/outputs.tf +++ b/test/fixtures/shared/outputs.tf @@ -79,3 +79,6 @@ output "service_account" { value = module.example.service_account } +output "registry_project_id" { + value = var.registry_project_id +} diff --git a/test/fixtures/shared/terraform.tfvars b/test/fixtures/shared/terraform.tfvars deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/fixtures/shared/terraform.tfvars.sample b/test/fixtures/shared/terraform.tfvars.sample deleted file mode 100644 index 3110e9b3d5..0000000000 --- a/test/fixtures/shared/terraform.tfvars.sample +++ /dev/null @@ -1,4 +0,0 @@ -project_id="" -region="us-east4" -zones=["us-east4-a","us-east4-b","us-east4-c"] -compute_engine_service_account="" diff --git a/test/fixtures/shared/variables.tf b/test/fixtures/shared/variables.tf index f8e3d6dfa4..5dff24dbd4 100644 --- a/test/fixtures/shared/variables.tf +++ b/test/fixtures/shared/variables.tf @@ -20,15 +20,20 @@ variable "project_id" { variable "region" { description = "The GCP region to create and test resources in" + default = "us-east4" } variable "zones" { type = list(string) description = "The GCP zones to create and test resources in, for applicable tests" - default = [] + default = ["us-east4-a", "us-east4-b", "us-east4-c"] } variable "compute_engine_service_account" { description = "The email address of the service account to associate with the GKE cluster" } +variable "registry_project_id" { + description = "Project to use for granting access to the GCR registry, if requested" +} + diff --git a/test/fixtures/shared_vpc/terraform.tfvars b/test/fixtures/shared_vpc/terraform.tfvars deleted file mode 120000 index 08ac6f4724..0000000000 --- a/test/fixtures/shared_vpc/terraform.tfvars +++ /dev/null @@ -1 +0,0 @@ -../shared/terraform.tfvars \ No newline at end of file diff --git a/test/fixtures/simple_regional/example.tf b/test/fixtures/simple_regional/example.tf index a03fadb28b..7f8bb83637 100644 --- a/test/fixtures/simple_regional/example.tf +++ b/test/fixtures/simple_regional/example.tf @@ -25,4 +25,5 @@ module "example" { ip_range_pods = google_compute_subnetwork.main.secondary_ip_range[0].range_name ip_range_services = google_compute_subnetwork.main.secondary_ip_range[1].range_name compute_engine_service_account = var.compute_engine_service_account + skip_provisioners = true } diff --git a/test/fixtures/simple_regional/terraform.tfvars b/test/fixtures/simple_regional/terraform.tfvars deleted file mode 120000 index 08ac6f4724..0000000000 --- a/test/fixtures/simple_regional/terraform.tfvars +++ /dev/null @@ -1 +0,0 @@ -../shared/terraform.tfvars \ No newline at end of file diff --git a/test/fixtures/simple_regional_private/terraform.tfvars b/test/fixtures/simple_regional_private/terraform.tfvars deleted file mode 120000 index 08ac6f4724..0000000000 --- a/test/fixtures/simple_regional_private/terraform.tfvars +++ /dev/null @@ -1 +0,0 @@ -../shared/terraform.tfvars \ No newline at end of file diff --git a/test/fixtures/simple_regional_with_networking/example.tf b/test/fixtures/simple_regional_with_networking/example.tf new file mode 100644 index 0000000000..c7ae5af76c --- /dev/null +++ b/test/fixtures/simple_regional_with_networking/example.tf @@ -0,0 +1,22 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +module "example" { + source = "../../../examples/simple_regional_with_networking" + + project_id = var.project_id + region = var.region +} diff --git a/test/fixtures/simple_regional_with_networking/outputs.tf b/test/fixtures/simple_regional_with_networking/outputs.tf new file mode 100644 index 0000000000..08f9a8a2e8 --- /dev/null +++ b/test/fixtures/simple_regional_with_networking/outputs.tf @@ -0,0 +1,73 @@ +/** + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +output "project_id" { + value = var.project_id +} + +output "location" { + value = module.example.location +} + +output "cluster_name" { + description = "Cluster name" + value = module.example.cluster_name +} + +output "kubernetes_endpoint" { + sensitive = true + value = module.example.kubernetes_endpoint +} + +output "client_token" { + sensitive = true + value = module.example.client_token +} + +output "ca_certificate" { + value = module.example.ca_certificate +} + +output "service_account" { + description = "The default service account used for running nodes." + value = module.example.service_account +} + +output "network_name" { + description = "The name of the VPC being created" + value = module.example.network +} + +output "subnet_name" { + description = "The name of the subnet being created" + value = module.example.subnetwork +} + +output "region" { + description = "The region the cluster is hosted in" + value = module.example.region +} + +output "ip_range_pods_name" { + description = "The secondary range name for pods" + value = module.example.ip_range_pods_name +} + +output "ip_range_services_name" { + description = "The secondary range name for services" + value = module.example.ip_range_services_name +} diff --git a/test/fixtures/simple_regional_with_networking/variables.tf b/test/fixtures/simple_regional_with_networking/variables.tf new file mode 100644 index 0000000000..e9310a56c5 --- /dev/null +++ b/test/fixtures/simple_regional_with_networking/variables.tf @@ -0,0 +1,24 @@ +/** + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +variable "project_id" { + description = "The project ID to host the cluster in" +} + +variable "region" { + description = "The region to host the cluster in" + default = "us-east4" +} diff --git a/test/fixtures/simple_zonal/terraform.tfvars b/test/fixtures/simple_zonal/terraform.tfvars deleted file mode 120000 index 08ac6f4724..0000000000 --- a/test/fixtures/simple_zonal/terraform.tfvars +++ /dev/null @@ -1 +0,0 @@ -../shared/terraform.tfvars \ No newline at end of file diff --git a/test/fixtures/simple_zonal_private/terraform.tfvars b/test/fixtures/simple_zonal_private/terraform.tfvars deleted file mode 120000 index 08ac6f4724..0000000000 --- a/test/fixtures/simple_zonal_private/terraform.tfvars +++ /dev/null @@ -1 +0,0 @@ -../shared/terraform.tfvars \ No newline at end of file diff --git a/test/fixtures/stub_domains/terraform.tfvars b/test/fixtures/stub_domains/terraform.tfvars deleted file mode 120000 index 08ac6f4724..0000000000 --- a/test/fixtures/stub_domains/terraform.tfvars +++ /dev/null @@ -1 +0,0 @@ -../shared/terraform.tfvars \ No newline at end of file diff --git a/test/fixtures/stub_domains_private/terraform.tfvars b/test/fixtures/stub_domains_private/terraform.tfvars deleted file mode 120000 index 08ac6f4724..0000000000 --- a/test/fixtures/stub_domains_private/terraform.tfvars +++ /dev/null @@ -1 +0,0 @@ -../shared/terraform.tfvars \ No newline at end of file diff --git a/test/fixtures/stub_domains_upstream_nameservers/terraform.tfvars b/test/fixtures/stub_domains_upstream_nameservers/terraform.tfvars deleted file mode 120000 index 08ac6f4724..0000000000 --- a/test/fixtures/stub_domains_upstream_nameservers/terraform.tfvars +++ /dev/null @@ -1 +0,0 @@ -../shared/terraform.tfvars \ No newline at end of file diff --git a/test/fixtures/upstream_nameservers/terraform.tfvars b/test/fixtures/upstream_nameservers/terraform.tfvars deleted file mode 120000 index 08ac6f4724..0000000000 --- a/test/fixtures/upstream_nameservers/terraform.tfvars +++ /dev/null @@ -1 +0,0 @@ -../shared/terraform.tfvars \ No newline at end of file diff --git a/test/fixtures/workload_metadata_config/example.tf b/test/fixtures/workload_metadata_config/example.tf index 4d4a98e119..3568cfa404 100644 --- a/test/fixtures/workload_metadata_config/example.tf +++ b/test/fixtures/workload_metadata_config/example.tf @@ -17,13 +17,13 @@ module "example" { source = "../../../examples/workload_metadata_config" - project_id = var.project_id - cluster_name_suffix = "-${random_string.suffix.result}" - region = var.region - zones = slice(var.zones, 0, 1) - network = google_compute_network.main.name - subnetwork = google_compute_subnetwork.main.name - ip_range_pods = google_compute_subnetwork.main.secondary_ip_range[0].range_name - ip_range_services = google_compute_subnetwork.main.secondary_ip_range[1].range_name - compute_engine_service_account = var.compute_engine_service_account + project_id = var.project_id + cluster_name_suffix = "-${random_string.suffix.result}" + region = var.region + zones = slice(var.zones, 0, 1) + network = google_compute_network.main.name + subnetwork = google_compute_subnetwork.main.name + ip_range_pods = google_compute_subnetwork.main.secondary_ip_range[0].range_name + ip_range_services = google_compute_subnetwork.main.secondary_ip_range[1].range_name + registry_project_id = var.registry_project_id } diff --git a/test/fixtures/workload_metadata_config/terraform.tfvars b/test/fixtures/workload_metadata_config/terraform.tfvars deleted file mode 120000 index 08ac6f4724..0000000000 --- a/test/fixtures/workload_metadata_config/terraform.tfvars +++ /dev/null @@ -1 +0,0 @@ -../shared/terraform.tfvars \ No newline at end of file diff --git a/test/integration/deploy_service/controls/gcloud.rb b/test/integration/deploy_service/controls/gcloud.rb index 2f8cfb2a38..fd72b9180b 100644 --- a/test/integration/deploy_service/controls/gcloud.rb +++ b/test/integration/deploy_service/controls/gcloud.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/integration/deploy_service/controls/kubectl.rb b/test/integration/deploy_service/controls/kubectl.rb index 1443f94057..2d4a473d2c 100644 --- a/test/integration/deploy_service/controls/kubectl.rb +++ b/test/integration/deploy_service/controls/kubectl.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/integration/disable_client_cert/controls/gcloud.rb b/test/integration/disable_client_cert/controls/gcloud.rb index c4739ffdaa..91d0c9df87 100644 --- a/test/integration/disable_client_cert/controls/gcloud.rb +++ b/test/integration/disable_client_cert/controls/gcloud.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/integration/node_pool/controls/gcloud.rb b/test/integration/node_pool/controls/gcloud.rb index a9696c211a..6ff5fdd201 100644 --- a/test/integration/node_pool/controls/gcloud.rb +++ b/test/integration/node_pool/controls/gcloud.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/integration/node_pool/controls/kubectl.rb b/test/integration/node_pool/controls/kubectl.rb index fb11abad17..471f9cb33f 100644 --- a/test/integration/node_pool/controls/kubectl.rb +++ b/test/integration/node_pool/controls/kubectl.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/integration/sandbox_enabled/controls/gcloud.rb b/test/integration/sandbox_enabled/controls/gcloud.rb new file mode 100644 index 0000000000..eb0ffdaf46 --- /dev/null +++ b/test/integration/sandbox_enabled/controls/gcloud.rb @@ -0,0 +1,102 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +project_id = attribute('project_id') +location = attribute('location') +cluster_name = attribute('cluster_name') + +control "gcloud" do + title "Google Compute Engine GKE configuration" + describe command("gcloud --project=#{project_id} container clusters --zone=#{location} describe #{cluster_name} --format=json") do + its(:exit_status) { should eq 0 } + its(:stderr) { should eq '' } + + let!(:data) do + if subject.exit_status == 0 + JSON.parse(subject.stdout) + else + {} + end + end + + describe "cluster" do + it "is running" do + expect(data['status']).to eq 'RUNNING' + end + + it "is regional" do + expect(data['location']).to match(/^.*[1-9]$/) + end + + it "uses public nodes and master endpoint" do + expect(data['privateClusterConfig']).to eq nil + end + + it "has the expected addon settings" do + expect(data['addonsConfig']).to eq({ + "horizontalPodAutoscaling" => {}, + "httpLoadBalancing" => {}, + "kubernetesDashboard" => { + "disabled" => true, + }, + "networkPolicyConfig" => { + "disabled" => true, + }, + }) + end + end + + describe "node pool" do + let(:node_pools) { data['nodePools'].reject { |p| p['name'] == "default-pool" } } + + it "is the expected image type" do + expect(node_pools).to include( + including( + "config" => including( + "imageType" => "COS_CONTAINERD", + ), + ) + ) + end + + it "has the expected labels" do + expect(node_pools).to include( + including( + "config" => including( + "labels" => including( + "cluster_name" => cluster_name, + "node_pool" => "default-node-pool", + "sandbox.gke.io/runtime" => "gvisor", + ), + ), + ) + ) + end + + it "has the expected network tags" do + expect(node_pools).to include( + including( + "config" => including( + "tags" => match_array([ + "gke-#{cluster_name}", + "gke-#{cluster_name}-default-node-pool", + ]), + ), + ) + ) + end + + end + end +end diff --git a/test/integration/sandbox_enabled/inspec.yml b/test/integration/sandbox_enabled/inspec.yml new file mode 100644 index 0000000000..0454937a36 --- /dev/null +++ b/test/integration/sandbox_enabled/inspec.yml @@ -0,0 +1,17 @@ +name: sandbox_enabled +attributes: + - name: project_id + required: true + type: string + - name: location + required: true + type: string + - name: cluster_name + required: true + type: string + - name: kubernetes_endpoint + required: true + type: string + - name: client_token + required: true + type: string diff --git a/test/integration/shared_vpc/controls/gcloud.rb b/test/integration/shared_vpc/controls/gcloud.rb index 2f8cfb2a38..fd72b9180b 100644 --- a/test/integration/shared_vpc/controls/gcloud.rb +++ b/test/integration/shared_vpc/controls/gcloud.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/integration/simple_regional/controls/gcloud.rb b/test/integration/simple_regional/controls/gcloud.rb index e3fba671b3..e6bbcfc047 100644 --- a/test/integration/simple_regional/controls/gcloud.rb +++ b/test/integration/simple_regional/controls/gcloud.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/integration/simple_regional_private/controls/gcloud.rb b/test/integration/simple_regional_private/controls/gcloud.rb index f4df827813..b15dafcd02 100644 --- a/test/integration/simple_regional_private/controls/gcloud.rb +++ b/test/integration/simple_regional_private/controls/gcloud.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/integration/simple_regional_with_networking/controls/gcloud.rb b/test/integration/simple_regional_with_networking/controls/gcloud.rb new file mode 100644 index 0000000000..e6bbcfc047 --- /dev/null +++ b/test/integration/simple_regional_with_networking/controls/gcloud.rb @@ -0,0 +1,172 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +project_id = attribute('project_id') +location = attribute('location') +cluster_name = attribute('cluster_name') + +control "gcloud" do + title "Google Compute Engine GKE configuration" + describe command("gcloud --project=#{project_id} container clusters --zone=#{location} describe #{cluster_name} --format=json") do + its(:exit_status) { should eq 0 } + its(:stderr) { should eq '' } + + let!(:data) do + if subject.exit_status == 0 + JSON.parse(subject.stdout) + else + {} + end + end + + describe "cluster" do + it "is running" do + expect(data['status']).to eq 'RUNNING' + end + + it "is regional" do + expect(data['location']).to match(/^.*[1-9]$/) + end + + it "uses public nodes and master endpoint" do + expect(data['privateClusterConfig']).to eq nil + end + + it "has the expected addon settings" do + expect(data['addonsConfig']).to eq({ + "horizontalPodAutoscaling" => {}, + "httpLoadBalancing" => {}, + "kubernetesDashboard" => { + "disabled" => true, + }, + "networkPolicyConfig" => { + "disabled" => true, + }, + }) + end + end + + describe "default node pool" do + let(:default_node_pool) { data['nodePools'].select { |p| p['name'] == "default-pool" }.first } + + it "exists" do + expect(data['nodePools']).to include( + including( + "name" => "default-pool", + ) + ) + end + end + + describe "node pool" do + let(:node_pools) { data['nodePools'].reject { |p| p['name'] == "default-pool" } } + + it "has autoscaling enabled" do + expect(node_pools).to include( + including( + "autoscaling" => including( + "enabled" => true, + ), + ) + ) + end + + it "has the expected minimum node count" do + expect(node_pools).to include( + including( + "autoscaling" => including( + "minNodeCount" => 1, + ), + ) + ) + end + + it "has the expected maximum node count" do + expect(node_pools).to include( + including( + "autoscaling" => including( + "maxNodeCount" => 100, + ), + ) + ) + end + + it "is the expected machine type" do + expect(node_pools).to include( + including( + "config" => including( + "machineType" => "n1-standard-2", + ), + ) + ) + end + + it "has the expected disk size" do + expect(node_pools).to include( + including( + "config" => including( + "diskSizeGb" => 100, + ), + ) + ) + end + + it "has the expected labels" do + expect(node_pools).to include( + including( + "config" => including( + "labels" => including( + "cluster_name" => cluster_name, + "node_pool" => "default-node-pool", + ), + ), + ) + ) + end + + it "has the expected network tags" do + expect(node_pools).to include( + including( + "config" => including( + "tags" => match_array([ + "gke-#{cluster_name}", + "gke-#{cluster_name}-default-node-pool", + ]), + ), + ) + ) + end + + it "has autorepair enabled" do + expect(node_pools).to include( + including( + "management" => including( + "autoRepair" => true, + ), + ) + ) + end + + it "has autoupgrade enabled" do + expect(node_pools).to include( + including( + "management" => including( + "autoUpgrade" => true, + ), + ) + ) + end + end + end +end diff --git a/test/boilerplate/boilerplate.Dockerfile.txt b/test/integration/simple_regional_with_networking/controls/network.rb similarity index 54% rename from test/boilerplate/boilerplate.Dockerfile.txt rename to test/integration/simple_regional_with_networking/controls/network.rb index b0c7da3d77..a17ce74663 100644 --- a/test/boilerplate/boilerplate.Dockerfile.txt +++ b/test/integration/simple_regional_with_networking/controls/network.rb @@ -1,4 +1,4 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,3 +11,18 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +project_id = attribute('project_id') +network_name = attribute('network_name') +subnet_name = attribute('subnet_name') +control "network" do + title "gcp network configuration" + describe google_compute_network( + project: project_id, + name: network_name + ) do + it { should exist } + its ('subnetworks.count') { should eq 1 } + its ('subnetworks.first') { should match subnet_name } + end + end diff --git a/test/integration/simple_regional_with_networking/controls/subnet.rb b/test/integration/simple_regional_with_networking/controls/subnet.rb new file mode 100644 index 0000000000..f88d46355b --- /dev/null +++ b/test/integration/simple_regional_with_networking/controls/subnet.rb @@ -0,0 +1,46 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +project_id = attribute('project_id') +network_name = attribute('network_name') +subnet_name = attribute('subnet_name') +region = attribute('region') +ip_range_pods_name = attribute('ip_range_pods_name') +ip_range_services_name = attribute('ip_range_services_name') +control "subnet" do + title "gcp subnetwork configuration" + describe command("gcloud compute networks subnets describe #{subnet_name} --project=#{project_id} --region=#{region} --format=json") do + its(:exit_status) { should eq 0 } + its(:stderr) { should eq '' } + let(:data) do + if subject.exit_status == 0 + JSON.parse(subject.stdout) + else + {} + end + end + it "#should have the correct secondaryIpRanges configuration for #{ip_range_pods_name}" do + expect(data["secondaryIpRanges"][0]).to include( + "rangeName" => ip_range_pods_name, + "ipCidrRange" => "192.168.0.0/18" + ) + end + it "#should have the correct secondaryIpRanges configuration for #{ip_range_services_name}" do + expect(data["secondaryIpRanges"][1]).to include( + "rangeName" => ip_range_services_name, + "ipCidrRange" => "192.168.64.0/18" + ) + end + end + end diff --git a/test/integration/simple_regional_with_networking/inspec.yml b/test/integration/simple_regional_with_networking/inspec.yml new file mode 100644 index 0000000000..bf2e4e86aa --- /dev/null +++ b/test/integration/simple_regional_with_networking/inspec.yml @@ -0,0 +1,36 @@ +name: simple_regional_with_networking +depends: + - name: inspec-gcp + git: https://github.com/inspec/inspec-gcp.git + tag: v0.10.0 +attributes: + - name: project_id + required: true + type: string + - name: location + required: true + type: string + - name: cluster_name + required: true + type: string + - name: kubernetes_endpoint + required: true + type: string + - name: client_token + required: true + type: string + - name: network_name + required: true + type: string + - name: subnet_name + required: true + type: string + - name: region + required: true + type: string + - name: ip_range_pods_name + required: true + type: string + - name: ip_range_services_name + required: true + type: string diff --git a/test/integration/simple_zonal/controls/gcloud.rb b/test/integration/simple_zonal/controls/gcloud.rb index cab5f8e4fd..c2e72936b0 100644 --- a/test/integration/simple_zonal/controls/gcloud.rb +++ b/test/integration/simple_zonal/controls/gcloud.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/integration/simple_zonal/controls/gcp.rb b/test/integration/simple_zonal/controls/gcp.rb index 8e4cf6f96c..6e9ade64ff 100644 --- a/test/integration/simple_zonal/controls/gcp.rb +++ b/test/integration/simple_zonal/controls/gcp.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/integration/simple_zonal/inspec.yml b/test/integration/simple_zonal/inspec.yml index 028e773638..5cb8ff9e01 100644 --- a/test/integration/simple_zonal/inspec.yml +++ b/test/integration/simple_zonal/inspec.yml @@ -27,4 +27,5 @@ attributes: type: string - name: service_account required: true - type: string \ No newline at end of file + type: string + diff --git a/test/integration/simple_zonal_private/controls/gcloud.rb b/test/integration/simple_zonal_private/controls/gcloud.rb index 2f808e136c..9968affcb6 100644 --- a/test/integration/simple_zonal_private/controls/gcloud.rb +++ b/test/integration/simple_zonal_private/controls/gcloud.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/integration/stub_domains/controls/gcloud.rb b/test/integration/stub_domains/controls/gcloud.rb index 03612e151e..48072bb119 100644 --- a/test/integration/stub_domains/controls/gcloud.rb +++ b/test/integration/stub_domains/controls/gcloud.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/integration/stub_domains/controls/kubectl.rb b/test/integration/stub_domains/controls/kubectl.rb index 1fa048e98d..1e53883a2d 100644 --- a/test/integration/stub_domains/controls/kubectl.rb +++ b/test/integration/stub_domains/controls/kubectl.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/integration/stub_domains_private/controls/gcloud.rb b/test/integration/stub_domains_private/controls/gcloud.rb index 3356196754..f16ee7b401 100644 --- a/test/integration/stub_domains_private/controls/gcloud.rb +++ b/test/integration/stub_domains_private/controls/gcloud.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/integration/stub_domains_private/controls/kubectl.rb b/test/integration/stub_domains_private/controls/kubectl.rb index e9a1bd7412..17502685d8 100644 --- a/test/integration/stub_domains_private/controls/kubectl.rb +++ b/test/integration/stub_domains_private/controls/kubectl.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/integration/stub_domains_upstream_nameservers/controls/gcloud.rb b/test/integration/stub_domains_upstream_nameservers/controls/gcloud.rb index 03612e151e..48072bb119 100644 --- a/test/integration/stub_domains_upstream_nameservers/controls/gcloud.rb +++ b/test/integration/stub_domains_upstream_nameservers/controls/gcloud.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/integration/stub_domains_upstream_nameservers/controls/kubectl.rb b/test/integration/stub_domains_upstream_nameservers/controls/kubectl.rb index 5223cbd2d4..8e8dfe086c 100644 --- a/test/integration/stub_domains_upstream_nameservers/controls/kubectl.rb +++ b/test/integration/stub_domains_upstream_nameservers/controls/kubectl.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/integration/upstream_nameservers/controls/gcloud.rb b/test/integration/upstream_nameservers/controls/gcloud.rb index 03612e151e..48072bb119 100644 --- a/test/integration/upstream_nameservers/controls/gcloud.rb +++ b/test/integration/upstream_nameservers/controls/gcloud.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/integration/upstream_nameservers/controls/kubectl.rb b/test/integration/upstream_nameservers/controls/kubectl.rb index 36612a02aa..21ec09c326 100644 --- a/test/integration/upstream_nameservers/controls/kubectl.rb +++ b/test/integration/upstream_nameservers/controls/kubectl.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test/integration/workload_metadata_config/controls/gcloud.rb b/test/integration/workload_metadata_config/controls/gcloud.rb index ea9c3627ce..ad642ff7c9 100644 --- a/test/integration/workload_metadata_config/controls/gcloud.rb +++ b/test/integration/workload_metadata_config/controls/gcloud.rb @@ -1,10 +1,10 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -13,8 +13,10 @@ # limitations under the License. project_id = attribute('project_id') +registry_project_id = attribute('registry_project_id') location = attribute('location') cluster_name = attribute('cluster_name') +service_account = attribute('service_account') control "gcloud" do title "Google Compute Engine GKE configuration" @@ -55,4 +57,20 @@ end end end + + describe command("gcloud projects get-iam-policy #{registry_project_id} --format=json") do + its(:exit_status) { should eq 0 } + its(:stderr) { should eq '' } + + let!(:iam) do + if subject.exit_status == 0 + JSON.parse(subject.stdout) + else + {} + end + end + it "has expected registry roles" do + expect(iam['bindings']).to include("members" => ["serviceAccount:#{service_account}"], "role" => "roles/storage.objectViewer") + end + end end diff --git a/test/integration/workload_metadata_config/inspec.yml b/test/integration/workload_metadata_config/inspec.yml index f6f3811afa..4f2b7d40d6 100644 --- a/test/integration/workload_metadata_config/inspec.yml +++ b/test/integration/workload_metadata_config/inspec.yml @@ -9,3 +9,9 @@ attributes: - name: project_id required: true type: string + - name: service_account + required: true + type: string + - name: registry_project_id + required: false + type: string diff --git a/test/make.sh b/test/make.sh deleted file mode 100755 index ec1cd6b01d..0000000000 --- a/test/make.sh +++ /dev/null @@ -1,225 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Please note that this file was generated from [terraform-google-module-template](https://github.com/terraform-google-modules/terraform-google-module-template). -# Please make sure to contribute relevant changes upstream! - -# Create a temporary directory that's auto-cleaned, even if the process aborts. -DELETE_AT_EXIT="$(mktemp -d)" -finish() { - [[ -d "${DELETE_AT_EXIT}" ]] && rm -rf "${DELETE_AT_EXIT}" -} -trap finish EXIT -# Create a temporary file in the auto-cleaned up directory while avoiding -# overwriting TMPDIR for other processes. -# shellcheck disable=SC2120 -# (Arguments may be passed, e.g. maketemp -d) -maketemp() { - TMPDIR="${DELETE_AT_EXIT}" mktemp "$@" -} - -# find_files is a helper to exclude .git directories and match only regular -# files to avoid double-processing symlinks. -find_files() { - local pth="$1" - shift - find "${pth}" '(' \ - -path '*/.git' -o \ - -path '*/.terraform' -o \ - -path '*/.kitchen' -o \ - -path './autogen' -o \ - -path './test/fixtures/all_examples' -o \ - -path './test/fixtures/shared' ')' \ - -prune -o -type f "$@" -} - -# Compatibility with both GNU and BSD style xargs. -compat_xargs() { - local compat=() rval - # Test if xargs is GNU or BSD style. GNU xargs will succeed with status 0 - # when given --no-run-if-empty and no input on STDIN. BSD xargs will fail and - # exit status non-zero If xargs fails, assume it is BSD style and proceed. - # stderr is silently redirected to avoid console log spam. - if xargs --no-run-if-empty /dev/null; then - compat=("--no-run-if-empty") - fi - xargs "${compat[@]}" "$@" - rval="$?" - if [[ -z "${NOWARN:-}" ]] && [[ "${rval}" -gt 0 ]]; then - echo "Warning: compat_xargs $* failed with exit code ${rval}" >&2 - fi - return "${rval}" -} - -# This function makes sure that the required files for -# releasing to OSS are present -function basefiles() { - local fn required_files="LICENSE README.md" - echo "Checking for required files ${required_files}" - for fn in ${required_files}; do - test -f "${fn}" || echo "Missing required file ${fn}" - done -} - -# This function runs 'terraform validate' and 'terraform fmt' -# against all directory paths which contain *.tf files. -function check_terraform() { - local rval=125 - # fmt is before validate for faster feedback, validate requires terraform - # init which takes time. - echo "Running terraform fmt" - find_files . -name "*.tf" -print0 \ - | compat_xargs -0 -n1 dirname \ - | sort -u \ - | compat_xargs -t -n1 terraform fmt -diff -check=true -write=false - rval="$?" - if [[ "${rval}" -gt 0 ]]; then - echo "Error: terraform fmt failed with exit code ${rval}" >&2 - echo "Check the output for diffs and correct using terraform fmt " >&2 - return "${rval}" - fi - echo "Running terraform validate" - find_files . -not -path "./test/fixtures/shared/*" -name "*.tf" -print0 \ - | compat_xargs -0 -n1 dirname \ - | sort -u \ - | compat_xargs -t -n1 helpers/terraform_validate -} - -# This function runs 'go fmt' and 'go vet' on every file -# that ends in '.go' -function golang() { - echo "Running go fmt and go vet" - find_files . -name "*.go" -print0 | compat_xargs -0 -n1 go fmt - find_files . -name "*.go" -print0 | compat_xargs -0 -n1 go vet -} - -# This function runs the flake8 linter on every file -# ending in '.py' -function check_python() { - echo "Running flake8" - find_files . -name "*.py" -print0 | compat_xargs -0 flake8 - return 0 -} - -# This function runs the shellcheck linter on every -# file ending in '.sh' -function check_shell() { - echo "Running shellcheck" - find_files . -name "*.sh" -print0 | compat_xargs -0 shellcheck -x -} - -# This function makes sure that there is no trailing whitespace -# in any files in the project. -# There are some exclusions -function check_trailing_whitespace() { - local rc - echo "Checking for trailing whitespace" - find_files . -print \ - | grep -v -E '\.(pyc|png)$' \ - | NOWARN=1 compat_xargs grep -H -n '[[:blank:]]$' - rc=$? - if [[ ${rc} -eq 0 ]]; then - return 1 - fi -} - -function generate() { - pip3 install --user -r ./helpers/generate_modules/requirements.txt - ./helpers/generate_modules/generate_modules.py -} - -function generate_docs() { - echo "Generating markdown docs with terraform-docs" - local pth helper_dir rval - helper_dir="$(pwd)/helpers" - while read -r pth; do - if [[ -e "${pth}/README.md" ]]; then - (cd "${pth}" || return 3; "${helper_dir}"/terraform_docs .;) - rval="$?" - if [[ "${rval}" -gt 0 ]]; then - echo "Error: terraform_docs in ${pth} exit code: ${rval}" >&2 - return "${rval}" - fi - else - echo "Skipping ${pth} because README.md does not exist." - fi - done < <(find_files . -name '*.tf' -print0 \ - | compat_xargs -0 -n1 dirname \ - | sort -u) -} - -function check_generate() { - TMPDIR=$(mktemp -d) - git worktree add --detach "$TMPDIR" >/dev/null - cd "$TMPDIR" || exit 1 - - generate >/dev/null - generate_docs >/dev/null - - git diff --stat --exit-code >/dev/null - rc=$? - cd - >/dev/null || exit 1 - - if [[ $rc -ne 0 ]]; then - echo '"make generate" creates a diff, run "make generate" and commit the results' - fi - rm -rf "$TMPDIR" - git worktree prune >/dev/null - - echo "Code was generated properly" - - exit $rc -} - -function check_generate_docs() { - TMPDIR=$(mktemp -d) - git worktree add --detach "$TMPDIR" >/dev/null - cd "$TMPDIR" || exit 1 - - generate_docs >/dev/null - git diff --stat --exit-code >/dev/null - rc=$? - cd - >/dev/null || exit 1 - - if [[ $rc -ne 0 ]]; then - echo '"make generate_docs" creates a diff, run "make generate_docs" and commit the results' - fi - rm -rf "$TMPDIR" - git worktree prune >/dev/null - - echo "Docs were generated properly" - - exit $rc -} - -function prepare_test_variables() { - echo "Preparing terraform.tfvars files for integration tests" - #shellcheck disable=2044 - for i in $(find ./test/fixtures -type f -name terraform.tfvars.sample); do - destination=${i/%.sample/} - if [ ! -f "${destination}" ]; then - cp "${i}" "${destination}" - echo "${destination} has been created. Please edit it to reflect your GCP configuration." - fi - done -} - -function check_headers() { - echo "Checking file headers" - # Use the exclusion behavior of find_files - find_files . -type f -print0 \ - | compat_xargs -0 python test/verify_boilerplate.py -} diff --git a/test/setup/.gitignore b/test/setup/.gitignore new file mode 100644 index 0000000000..0e515f83d2 --- /dev/null +++ b/test/setup/.gitignore @@ -0,0 +1,2 @@ +terraform.tfvars +source.sh diff --git a/test/setup/iam.tf b/test/setup/iam.tf new file mode 100644 index 0000000000..29facd32a9 --- /dev/null +++ b/test/setup/iam.tf @@ -0,0 +1,58 @@ +/** + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +locals { + int_required_roles = [ + "roles/cloudkms.cryptoKeyEncrypterDecrypter", + "roles/compute.networkAdmin", + "roles/container.clusterAdmin", + "roles/container.developer", + "roles/iam.serviceAccountAdmin", + "roles/iam.serviceAccountUser", + "roles/compute.networkAdmin", + "roles/compute.viewer", + "roles/resourcemanager.projectIamAdmin" + ] +} + + +resource "random_id" "random_suffix" { + byte_length = 2 +} + +resource "google_service_account" "int_test" { + project = module.gke-project.project_id + account_id = "gke-int-test-${random_id.random_suffix.hex}" + display_name = "gke-int-test" +} + +resource "google_service_account" "gke_sa" { + project = module.gke-project.project_id + account_id = "gke-sa-int-test-${random_id.random_suffix.hex}" + display_name = "gke-sa-int-test" +} + +resource "google_project_iam_member" "int_test" { + count = length(local.int_required_roles) + + project = module.gke-project.project_id + role = local.int_required_roles[count.index] + member = "serviceAccount:${google_service_account.int_test.email}" +} + +resource "google_service_account_key" "int_test" { + service_account_id = google_service_account.int_test.id +} diff --git a/test/setup/main.tf b/test/setup/main.tf new file mode 100644 index 0000000000..70e10c46a3 --- /dev/null +++ b/test/setup/main.tf @@ -0,0 +1,43 @@ +/** + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +module "gke-project" { + source = "terraform-google-modules/project-factory/google" + version = "~> 3.0" + + name = "ci-gke" + random_project_id = true + org_id = var.org_id + folder_id = var.folder_id + billing_account = var.billing_account + + auto_create_network = true + + activate_apis = [ + "bigquery-json.googleapis.com", + "cloudkms.googleapis.com", + "cloudresourcemanager.googleapis.com", + "compute.googleapis.com", + "container.googleapis.com", + "containerregistry.googleapis.com", + "iam.googleapis.com", + "iamcredentials.googleapis.com", + "oslogin.googleapis.com", + "pubsub.googleapis.com", + "serviceusage.googleapis.com", + "storage-api.googleapis.com", + ] +} diff --git a/test/setup/make_source.sh b/test/setup/make_source.sh new file mode 100755 index 0000000000..ad3f57165a --- /dev/null +++ b/test/setup/make_source.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash + +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +echo "#!/usr/bin/env bash" > ../source.sh + +project_id=$(terraform output project_id) +echo "export TF_VAR_project_id='$project_id'" >> ../source.sh + +# We use the same project for registry project in the tests. +echo "export TF_VAR_registry_project_id='$project_id'" >> ../source.sh + +sa_json=$(terraform output sa_key) +# shellcheck disable=SC2086 +echo "export SERVICE_ACCOUNT_JSON='$(echo $sa_json | base64 --decode)'" >> ../source.sh + +compute_engine_service_account=$(terraform output compute_engine_service_account) +echo "export TF_VAR_compute_engine_service_account='$compute_engine_service_account'" >> ../source.sh diff --git a/test/setup/outputs.tf b/test/setup/outputs.tf new file mode 100644 index 0000000000..3e508ed1c7 --- /dev/null +++ b/test/setup/outputs.tf @@ -0,0 +1,28 @@ +/** + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +output "project_id" { + value = module.gke-project.project_id +} + +output "sa_key" { + value = google_service_account_key.int_test.private_key + sensitive = true +} + +output "compute_engine_service_account" { + value = google_service_account.gke_sa.email +} diff --git a/test/setup/variables.tf b/test/setup/variables.tf new file mode 100644 index 0000000000..6d80b89896 --- /dev/null +++ b/test/setup/variables.tf @@ -0,0 +1,26 @@ +/** + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +variable "org_id" { + description = "The numeric organization id" +} + +variable "folder_id" { + description = "The folder to deploy in" +} + +variable "billing_account" { + description = "The billing account id associated with the project, e.g. XXXXXX-YYYYYY-ZZZZZZ" +} diff --git a/test/setup/versions.tf b/test/setup/versions.tf new file mode 100644 index 0000000000..51f6a433b0 --- /dev/null +++ b/test/setup/versions.tf @@ -0,0 +1,27 @@ +/** + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +terraform { + required_version = ">= 0.12" +} + +provider "google" { + version = "~> 2.18.0" +} + +provider "google-beta" { + version = "~> 2.18.0" +} diff --git a/test/task_helper_functions.sh b/test/task_helper_functions.sh new file mode 100755 index 0000000000..7de28bbdfe --- /dev/null +++ b/test/task_helper_functions.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env bash + +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +function generate() { + pip3 install --user -r /workspace/helpers/generate_modules/requirements.txt + /workspace/helpers/generate_modules/generate_modules.py +} + +# Changed from using git-diff, to aviod errors on CI: +# fatal: not a git repository (or any parent up to mount point /) +function check_generate() { + local tempdir rval rc + setup_trap_handler + tempdir=$(mktemp -d) + rval=0 + echo "Checking submodule's files generation" + rsync -axh \ + --exclude '*/.terraform' \ + --exclude '*/.kitchen' \ + --exclude '*/.git' \ + /workspace "${tempdir}" >/dev/null 2>/dev/null + cd "${tempdir}/workspace" || exit 1 + generate >/dev/null 2>/dev/null + generate_docs >/dev/null 2>/dev/null + diff -r \ + --exclude=".terraform" \ + --exclude=".kitchen" \ + --exclude=".git" \ + /workspace "${tempdir}/workspace" + rc=$? + if [[ "${rc}" -ne 0 ]]; then + echo "Error: submodule's files generation has not been run, please run the" + echo "'source /workspace/helpers/generate.sh && generate' commands and commit the above changes." + ((rval++)) + fi + cd /workspace || exit 1 + rm -Rf "${tempdir}" + return $((rval)) +} diff --git a/test/test_verify_boilerplate.py b/test/test_verify_boilerplate.py deleted file mode 100755 index 22a3cca055..0000000000 --- a/test/test_verify_boilerplate.py +++ /dev/null @@ -1,136 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -''' A simple test for the verify_boilerplate python script. -This will create a set of test files, both valid and invalid, -and confirm that the has_valid_header call returns the correct -value. - -It also checks the number of files that are found by the -get_files call. -''' -from copy import deepcopy -from tempfile import mkdtemp -from shutil import rmtree -import unittest -from verify_boilerplate import has_valid_header, get_refs, get_regexs, \ - get_args, get_files - - -class AllTestCase(unittest.TestCase): - """ - All of the setup, teardown, and tests are contained in this - class. - """ - - def write_file(self, filename, content, expected): - """ - A utility method that creates test files, and adds them to - the cases that will be tested. - - Args: - filename: (string) the file name (path) to be created. - content: (list of strings) the contents of the file. - expected: (boolean) True if the header is expected to be valid, - false if not. - """ - - file = open(filename, 'w+') - for line in content: - file.write(line + "\n") - file.close() - self.cases[filename] = expected - - def create_test_files(self, tmp_path, extension, header): - """ - Creates 2 test files for .tf, .xml, .go, etc and one for - Dockerfile, and Makefile. - - The reason for the difference is that Makefile and Dockerfile - don't have an extension. These would be substantially more - difficult to create negative test cases, unless the files - were written, deleted, and re-written. - - Args: - tmp_path: (string) the path in which to create the files - extension: (string) the file extension - header: (list of strings) the header/boilerplate content - """ - - content = "\n...blah \ncould be code or could be garbage\n" - special_cases = ["Dockerfile", "Makefile"] - header_template = deepcopy(header) - valid_filename = tmp_path + extension - valid_content = header_template.append(content) - if extension not in special_cases: - # Invalid test cases for non-*file files (.tf|.py|.sh|.yaml|.xml..) - invalid_header = [] - for line in header_template: - if "2018" in line: - invalid_header.append(line.replace('2018', 'YEAR')) - else: - invalid_header.append(line) - invalid_header.append(content) - invalid_content = invalid_header - invalid_filename = tmp_path + "invalid." + extension - self.write_file(invalid_filename, invalid_content, False) - valid_filename = tmp_path + "testfile." + extension - - valid_content = header_template - self.write_file(valid_filename, valid_content, True) - - def setUp(self): - """ - Set initial counts and values, and initializes the setup of the - test files. - """ - self.cases = {} - self.tmp_path = mkdtemp() + "/" - self.my_args = get_args() - self.my_refs = get_refs(self.my_args) - self.my_regex = get_regexs() - self.prexisting_file_count = len( - get_files(self.my_refs.keys(), self.my_args)) - for key in self.my_refs: - self.create_test_files(self.tmp_path, key, - self.my_refs.get(key)) - - def tearDown(self): - """ Delete the test directory. """ - rmtree(self.tmp_path) - - def test_files_headers(self): - """ - Confirms that the expected output of has_valid_header is correct. - """ - for case in self.cases: - if self.cases[case]: - self.assertTrue(has_valid_header(case, self.my_refs, - self.my_regex)) - else: - self.assertFalse(has_valid_header(case, self.my_refs, - self.my_regex)) - - def test_invalid_count(self): - """ - Test that the initial files found isn't zero, indicating - a problem with the code. - """ - self.assertFalse(self.prexisting_file_count == 0) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/verify_boilerplate.py b/test/verify_boilerplate.py deleted file mode 100644 index a632fdedcc..0000000000 --- a/test/verify_boilerplate.py +++ /dev/null @@ -1,279 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# Verifies that all source files contain the necessary copyright boilerplate -# snippet. -# This is based on existing work -# https://github.com/kubernetes/test-infra/blob/master/hack -# /verify_boilerplate.py -from __future__ import print_function -import argparse -import glob -import os -import re -import sys - - -def get_args(): - """Parses command line arguments. - - Configures and runs argparse.ArgumentParser to extract command line - arguments. - - Returns: - An argparse.Namespace containing the arguments parsed from the - command line - """ - parser = argparse.ArgumentParser() - parser.add_argument("filenames", - help="list of files to check, " - "all files if unspecified", - nargs='*') - rootdir = os.path.dirname(__file__) + "/../" - rootdir = os.path.abspath(rootdir) - parser.add_argument( - "--rootdir", - default=rootdir, - help="root directory to examine") - - default_boilerplate_dir = os.path.join(rootdir, "test/boilerplate") - parser.add_argument("--boilerplate-dir", default=default_boilerplate_dir) - return parser.parse_args() - - -def get_refs(ARGS): - """Converts the directory of boilerplate files into a map keyed by file - extension. - - Reads each boilerplate file's contents into an array, then adds that array - to a map keyed by the file extension. - - Returns: - A map of boilerplate lines, keyed by file extension. For example, - boilerplate.py.txt would result in the k,v pair {".py": py_lines} where - py_lines is an array containing each line of the file. - """ - refs = {} - - # Find and iterate over the absolute path for each boilerplate template - for path in glob.glob(os.path.join( - ARGS.boilerplate_dir, - "boilerplate.*.txt")): - extension = os.path.basename(path).split(".")[1] - ref_file = open(path, 'r') - ref = ref_file.read().splitlines() - ref_file.close() - refs[extension] = ref - return refs - - -# pylint: disable=too-many-locals -def has_valid_header(filename, refs, regexs): - """Test whether a file has the correct boilerplate header. - - Tests each file against the boilerplate stored in refs for that file type - (based on extension), or by the entire filename (eg Dockerfile, Makefile). - Some heuristics are applied to remove build tags and shebangs, but little - variance in header formatting is tolerated. - - Args: - filename: A string containing the name of the file to test - refs: A map of boilerplate headers, keyed by file extension - regexs: a map of compiled regex objects used in verifying boilerplate - - Returns: - True if the file has the correct boilerplate header, otherwise returns - False. - """ - try: - with open(filename, 'r') as fp: # pylint: disable=invalid-name - data = fp.read() - except IOError: - return False - basename = os.path.basename(filename) - extension = get_file_extension(filename) - if extension: - ref = refs[extension] - else: - ref = refs[basename] - # remove build tags from the top of Go files - if extension == "go": - con = regexs["go_build_constraints"] - (data, found) = con.subn("", data, 1) - # remove shebang - elif extension == "sh" or extension == "py": - she = regexs["shebang"] - (data, found) = she.subn("", data, 1) - data = data.splitlines() - # if our test file is smaller than the reference it surely fails! - if len(ref) > len(data): - return False - # trim our file to the same number of lines as the reference file - data = data[:len(ref)] - year = regexs["year"] - for datum in data: - if year.search(datum): - return False - - # if we don't match the reference at this point, fail - if ref != data: - return False - return True - - -def get_file_extension(filename): - """Extracts the extension part of a filename. - - Identifies the extension as everything after the last period in filename. - - Args: - filename: string containing the filename - - Returns: - A string containing the extension in lowercase - """ - return os.path.splitext(filename)[1].split(".")[-1].lower() - - -# These directories will be omitted from header checks -SKIPPED_DIRS = [ - 'Godeps', 'third_party', '_gopath', '_output', - '.git', 'vendor', '__init__.py', 'node_modules' -] - - -def normalize_files(files): - """Extracts the files that require boilerplate checking from the files - argument. - - A new list will be built. Each path from the original files argument will - be added unless it is within one of SKIPPED_DIRS. All relative paths will - be converted to absolute paths by prepending the root_dir path parsed from - the command line, or its default value. - - Args: - files: a list of file path strings - - Returns: - A modified copy of the files list where any any path in a skipped - directory is removed, and all paths have been made absolute. - """ - newfiles = [] - for pathname in files: - if any(x in pathname for x in SKIPPED_DIRS): - continue - newfiles.append(pathname) - for idx, pathname in enumerate(newfiles): - if not os.path.isabs(pathname): - newfiles[idx] = os.path.join(ARGS.rootdir, pathname) - return newfiles - - -def get_files(extensions, ARGS): - """Generates a list of paths whose boilerplate should be verified. - - If a list of file names has been provided on the command line, it will be - treated as the initial set to search. Otherwise, all paths within rootdir - will be discovered and used as the initial set. - - Once the initial set of files is identified, it is normalized via - normalize_files() and further stripped of any file name whose extension is - not in extensions. - - Args: - extensions: a list of file extensions indicating which file types - should have their boilerplate verified - - Returns: - A list of absolute file paths - """ - files = [] - if ARGS.filenames: - files = ARGS.filenames - else: - for root, dirs, walkfiles in os.walk(ARGS.rootdir): - # don't visit certain dirs. This is just a performance improvement - # as we would prune these later in normalize_files(). But doing it - # cuts down the amount of filesystem walking we do and cuts down - # the size of the file list - for dpath in SKIPPED_DIRS: - if dpath in dirs: - dirs.remove(dpath) - for name in walkfiles: - pathname = os.path.join(root, name) - files.append(pathname) - files = normalize_files(files) - outfiles = [] - for pathname in files: - basename = os.path.basename(pathname) - extension = get_file_extension(pathname) - if extension in extensions or basename in extensions: - outfiles.append(pathname) - return outfiles - - -def get_regexs(): - """Builds a map of regular expressions used in boilerplate validation. - - There are two scenarios where these regexes are used. The first is in - validating the date referenced is the boilerplate, by ensuring it is an - acceptable year. The second is in identifying non-boilerplate elements, - like shebangs and compiler hints that should be ignored when validating - headers. - - Returns: - A map of compiled regular expression objects, keyed by mnemonic. - """ - regexs = {} - # Search for "YEAR" which exists in the boilerplate, but shouldn't in the - # real thing - regexs["year"] = re.compile('YEAR') - # dates can be 2014, 2015, 2016 or 2017, company holder names can be - # anything - regexs["date"] = re.compile('(2014|2015|2016|2017|2018)') - # strip // +build \n\n build constraints - regexs["go_build_constraints"] = re.compile(r"^(// \+build.*\n)+\n", - re.MULTILINE) - # strip #!.* from shell/python scripts - regexs["shebang"] = re.compile(r"^(#!.*\n)\n*", re.MULTILINE) - return regexs - - -def main(args): - """Identifies and verifies files that should have the desired boilerplate. - - Retrieves the lists of files to be validated and tests each one in turn. - If all files contain correct boilerplate, this function terminates - normally. Otherwise it prints the name of each non-conforming file and - exists with a non-zero status code. - """ - regexs = get_regexs() - refs = get_refs(args) - filenames = get_files(refs.keys(), args) - nonconforming_files = [] - for filename in filenames: - if not has_valid_header(filename, refs, regexs): - nonconforming_files.append(filename) - if nonconforming_files: - print('%d files have incorrect boilerplate headers:' % len( - nonconforming_files)) - for filename in sorted(nonconforming_files): - print(os.path.relpath(filename, args.rootdir)) - sys.exit(1) - - -if __name__ == "__main__": - ARGS = get_args() - main(ARGS) diff --git a/variables.tf b/variables.tf index 460bdeaeff..58cf1f4685 100644 --- a/variables.tf +++ b/variables.tf @@ -40,7 +40,8 @@ variable "regional" { variable "region" { type = string - description = "The region to host the cluster in (required)" + description = "The region to host the cluster in (optional if zonal cluster / required if regional)" + default = null } variable "zones" { @@ -257,6 +258,12 @@ variable "grant_registry_access" { default = false } +variable "registry_project_id" { + type = string + description = "Project holding the Google Container Registry. If empty, we use the cluster project. If grant_registry_access is true, storage.objectViewer role is assigned on this project." + default = "" +} + variable "service_account" { type = string description = "The service account to run nodes as if not overridden in `node_pools`. The create_service_account variable default value (true) will cause a cluster-specific service account to be created." @@ -292,3 +299,8 @@ variable "cluster_resource_labels" { default = {} } +variable "skip_provisioners" { + type = bool + description = "Flag to skip all local-exec provisioners. It breaks `stub_domains` and `upstream_nameservers` variables functionality." + default = false +}