From ffd1b51bb99a45605729b3c6e0de86ac5aef2a7e Mon Sep 17 00:00:00 2001 From: Sam Gendler Date: Wed, 6 Nov 2019 00:45:16 -0800 Subject: [PATCH 01/20] add private_zonal_with_networking example --- .kitchen.yml | 17 ++ .../private_zonal_with_networking/main.tf | 82 ++++++++ .../private_zonal_with_networking/outputs.tf | 60 ++++++ .../test_outputs.tf | 58 ++++++ .../variables.tf | 54 ++++++ test/ci/private-zonal-with-networking.yml | 18 ++ .../private_zonal_with_networking/example.tf | 23 +++ .../private_zonal_with_networking/outputs.tf | 73 +++++++ .../variables.tf | 31 +++ .../controls/gcloud.rb | 180 ++++++++++++++++++ .../controls/network.rb | 28 +++ .../controls/subnet.rb | 46 +++++ .../private_zonal_with_networking/inspec.yml | 36 ++++ 13 files changed, 706 insertions(+) create mode 100644 examples/private_zonal_with_networking/main.tf create mode 100644 examples/private_zonal_with_networking/outputs.tf create mode 100644 examples/private_zonal_with_networking/test_outputs.tf create mode 100644 examples/private_zonal_with_networking/variables.tf create mode 100644 test/ci/private-zonal-with-networking.yml create mode 100644 test/fixtures/private_zonal_with_networking/example.tf create mode 100644 test/fixtures/private_zonal_with_networking/outputs.tf create mode 100644 test/fixtures/private_zonal_with_networking/variables.tf create mode 100644 test/integration/private_zonal_with_networking/controls/gcloud.rb create mode 100644 test/integration/private_zonal_with_networking/controls/network.rb create mode 100644 test/integration/private_zonal_with_networking/controls/subnet.rb create mode 100644 test/integration/private_zonal_with_networking/inspec.yml diff --git a/.kitchen.yml b/.kitchen.yml index 81603782cd..7707328091 100644 --- a/.kitchen.yml +++ b/.kitchen.yml @@ -68,6 +68,23 @@ suites: systems: - name: simple_regional backend: local + - name: "private_zonal_with_networking" + driver: + root_module_directory: test/fixtures/private_zonal_with_networking + verifier: + systems: + - name: private_zonal_with_networking + backend: local + controls: + - gcloud + - name: subnet + backend: local + controls: + - subnet + - name: network + backend: gcp + controls: + - network - name: "simple_regional_with_networking" driver: root_module_directory: test/fixtures/simple_regional_with_networking diff --git a/examples/private_zonal_with_networking/main.tf b/examples/private_zonal_with_networking/main.tf new file mode 100644 index 0000000000..fb5d34d757 --- /dev/null +++ b/examples/private_zonal_with_networking/main.tf @@ -0,0 +1,82 @@ +/** + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +module "gcp-network" { + source = "terraform-google-modules/network/google" + version = "~> 1.4.0" + project_id = var.project_id + network_name = var.network + + subnets = [ + { + subnet_name = var.subnetwork + subnet_ip = "10.0.0.0/17" + subnet_region = var.region + subnet_private_access = "true" + }, + ] + + secondary_ranges = { + "${var.subnetwork}" = [ + { + range_name = var.ip_range_pods_name + ip_cidr_range = "192.168.0.0/18" + }, + { + range_name = var.ip_range_services_name + ip_cidr_range = "192.168.64.0/18" + }, + ] + } +} + +data "google_compute_subnetwork" "subnetwork" { + name = module.gcp-network.subnets_names[0] + project = var.project_id + region = var.region + depends_on = [module.gcp-network] +} + +module "gke" { + source = "../../modules/beta-private-cluster/" + project_id = var.project_id + name = var.cluster_name + regional = false + region = var.region + zones = slice(var.zones, 0, 1) + network = data.google_compute_subnetwork.subnetwork.network + subnetwork = data.google_compute_subnetwork.subnetwork.name + ip_range_pods = var.ip_range_pods_name + ip_range_services = var.ip_range_services_name + create_service_account = true + enable_private_endpoint = true + enable_private_nodes = true + master_ipv4_cidr_block = "172.16.0.0/28" + + master_authorized_networks_config = [ + { + cidr_blocks = [ + { + cidr_block = data.google_compute_subnetwork.subnetwork.ip_cidr_range + display_name = "VPC" + }, + ] + }, + ] +} + +data "google_client_config" "default" { +} diff --git a/examples/private_zonal_with_networking/outputs.tf b/examples/private_zonal_with_networking/outputs.tf new file mode 100644 index 0000000000..bb255b54a2 --- /dev/null +++ b/examples/private_zonal_with_networking/outputs.tf @@ -0,0 +1,60 @@ +/** + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +output "kubernetes_endpoint" { + description = "The cluster endpoint" + sensitive = true + value = module.gke.endpoint +} + +output "client_token" { + description = "The bearer token for auth" + sensitive = true + value = base64encode(data.google_client_config.default.access_token) +} + +output "ca_certificate" { + description = "The cluster ca certificate (base64 encoded)" + value = module.gke.ca_certificate +} + +output "service_account" { + description = "The default service account used for running nodes." + value = module.gke.service_account +} + +output "cluster_name" { + description = "Cluster name" + value = module.gke.name +} + +output "network_name" { + description = "The name of the VPC being created" + value = module.gcp-network.network_name +} + +output "subnet_name" { + description = "The name of the subnet being created" + value = module.gcp-network.subnets_names +} + +output "subnet_secondary_ranges" { + description = "The secondary ranges associated with the subnet" + value = module.gcp-network.subnets_secondary_ranges +} + + + diff --git a/examples/private_zonal_with_networking/test_outputs.tf b/examples/private_zonal_with_networking/test_outputs.tf new file mode 100644 index 0000000000..a703679105 --- /dev/null +++ b/examples/private_zonal_with_networking/test_outputs.tf @@ -0,0 +1,58 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// These outputs are used to test the module with kitchen-terraform +// They do not need to be included in real-world uses of this module + +output "project_id" { + value = var.project_id +} + +output "region" { + value = module.gke.region +} + +output "network" { + value = var.network +} + +output "subnetwork" { + value = var.subnetwork +} + +output "location" { + value = module.gke.location +} + +output "ip_range_pods_name" { + description = "The secondary IP range used for pods" + value = var.ip_range_pods_name +} + +output "ip_range_services_name" { + description = "The secondary IP range used for services" + value = var.ip_range_services_name +} + +output "zones" { + description = "List of zones in which the cluster resides" + value = module.gke.zones +} + +output "master_kubernetes_version" { + description = "The master Kubernetes version" + value = module.gke.master_version +} diff --git a/examples/private_zonal_with_networking/variables.tf b/examples/private_zonal_with_networking/variables.tf new file mode 100644 index 0000000000..ac2c68d2b6 --- /dev/null +++ b/examples/private_zonal_with_networking/variables.tf @@ -0,0 +1,54 @@ +/** + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +variable "project_id" { + description = "The project ID to host the cluster in" +} + +variable "cluster_name" { + description = "The name for the GKE cluster" + default = "gke-on-vpc-cluster" +} + +variable "region" { + description = "The region to host the cluster in" + default = "us-central1" +} + +variable "network" { + description = "The VPC network created to host the cluster in" + default = "gke-network" +} + +variable "subnetwork" { + description = "The subnetwork created to host the cluster in" + default = "gke-subnet" +} + +variable "ip_range_pods_name" { + description = "The secondary ip range to use for pods" + default = "ip-range-pods" +} + +variable "ip_range_services_name" { + description = "The secondary ip range to use for pods" + default = "ip-range-scv" +} + +variable "zones" { + default = [] +} + diff --git a/test/ci/private-zonal-with-networking.yml b/test/ci/private-zonal-with-networking.yml new file mode 100644 index 0000000000..f628957a28 --- /dev/null +++ b/test/ci/private-zonal-with-networking.yml @@ -0,0 +1,18 @@ +--- + +platform: linux + +inputs: +- name: pull-request + path: terraform-google-kubernetes-engine + +run: + path: make + args: ['test_integration'] + dir: terraform-google-kubernetes-engine + +params: + SUITE: "private-zonal-with-networking-local" + COMPUTE_ENGINE_SERVICE_ACCOUNT: "" + REGION: "us-east4" + ZONES: '["us-east4-a", "us-east4-b", "us-east4-c"]' diff --git a/test/fixtures/private_zonal_with_networking/example.tf b/test/fixtures/private_zonal_with_networking/example.tf new file mode 100644 index 0000000000..23612f1610 --- /dev/null +++ b/test/fixtures/private_zonal_with_networking/example.tf @@ -0,0 +1,23 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +module "example" { + source = "../../../examples/private_zonal_with_networking" + + project_id = var.project_id + region = var.region + zones = var.zones +} diff --git a/test/fixtures/private_zonal_with_networking/outputs.tf b/test/fixtures/private_zonal_with_networking/outputs.tf new file mode 100644 index 0000000000..08f9a8a2e8 --- /dev/null +++ b/test/fixtures/private_zonal_with_networking/outputs.tf @@ -0,0 +1,73 @@ +/** + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +output "project_id" { + value = var.project_id +} + +output "location" { + value = module.example.location +} + +output "cluster_name" { + description = "Cluster name" + value = module.example.cluster_name +} + +output "kubernetes_endpoint" { + sensitive = true + value = module.example.kubernetes_endpoint +} + +output "client_token" { + sensitive = true + value = module.example.client_token +} + +output "ca_certificate" { + value = module.example.ca_certificate +} + +output "service_account" { + description = "The default service account used for running nodes." + value = module.example.service_account +} + +output "network_name" { + description = "The name of the VPC being created" + value = module.example.network +} + +output "subnet_name" { + description = "The name of the subnet being created" + value = module.example.subnetwork +} + +output "region" { + description = "The region the cluster is hosted in" + value = module.example.region +} + +output "ip_range_pods_name" { + description = "The secondary range name for pods" + value = module.example.ip_range_pods_name +} + +output "ip_range_services_name" { + description = "The secondary range name for services" + value = module.example.ip_range_services_name +} diff --git a/test/fixtures/private_zonal_with_networking/variables.tf b/test/fixtures/private_zonal_with_networking/variables.tf new file mode 100644 index 0000000000..d610f1e807 --- /dev/null +++ b/test/fixtures/private_zonal_with_networking/variables.tf @@ -0,0 +1,31 @@ +/** + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +variable "project_id" { + description = "The project ID to host the cluster in" +} + +variable "region" { + description = "The region to host the cluster in" + default = "us-east4" +} + +variable "zones" { + type = list(string) + description = "The GCP zones to create and test resources in, for applicable tests" + default = ["us-east4-a", "us-east4-b", "us-east4-c"] +} + diff --git a/test/integration/private_zonal_with_networking/controls/gcloud.rb b/test/integration/private_zonal_with_networking/controls/gcloud.rb new file mode 100644 index 0000000000..e998182da2 --- /dev/null +++ b/test/integration/private_zonal_with_networking/controls/gcloud.rb @@ -0,0 +1,180 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +project_id = attribute('project_id') +location = attribute('location') +cluster_name = attribute('cluster_name') + +control "gcloud" do + title "Google Compute Engine GKE configuration" + describe command("gcloud --project=#{project_id} container clusters --zone=#{location} describe #{cluster_name} --format=json") do + its(:exit_status) { should eq 0 } + its(:stderr) { should eq '' } + + let!(:data) do + if subject.exit_status == 0 + JSON.parse(subject.stdout) + else + {} + end + end + + describe "cluster" do + it "is running" do + expect(data['status']).to eq 'RUNNING' + end + + it "is zonal" do + expect(data['location']).to match(/^(.*)[1-9]-[a-z]$/) + end + + it "is single zoned" do + expect(data['locations'].size).to eq 1 + end + + it "uses the private master endpoint" do + expect(data['privateClusterConfig']['enablePrivateEndpoint']).to eq true + end + + it "uses private nodes" do + expect(data['privateClusterConfig']['enablePrivateNodes']).to eq true + end + + it "has the expected addon settings" do + expect(data['addonsConfig']).to eq({ + "horizontalPodAutoscaling" => {}, + "httpLoadBalancing" => {}, + "kubernetesDashboard" => { + "disabled" => true, + }, + "networkPolicyConfig" => { + "disabled" => true, + }, + }) + end + end + + describe "default node pool" do + let(:default_node_pool) { data['nodePools'].select { |p| p['name'] == "default-pool" }.first } + + it "exists" do + expect(data['nodePools']).to include( + including( + "name" => "default-pool", + ) + ) + end + end + + describe "node pool" do + let(:node_pools) { data['nodePools'].reject { |p| p['name'] == "default-pool" } } + + it "has autoscaling enabled" do + expect(node_pools).to include( + including( + "autoscaling" => including( + "enabled" => true, + ), + ) + ) + end + + it "has the expected minimum node count" do + expect(node_pools).to include( + including( + "autoscaling" => including( + "minNodeCount" => 1, + ), + ) + ) + end + + it "has the expected maximum node count" do + expect(node_pools).to include( + including( + "autoscaling" => including( + "maxNodeCount" => 100, + ), + ) + ) + end + + it "is the expected machine type" do + expect(node_pools).to include( + including( + "config" => including( + "machineType" => "n1-standard-2", + ), + ) + ) + end + + it "has the expected disk size" do + expect(node_pools).to include( + including( + "config" => including( + "diskSizeGb" => 100, + ), + ) + ) + end + + it "has the expected labels" do + expect(node_pools).to include( + including( + "config" => including( + "labels" => including( + "cluster_name" => cluster_name, + "node_pool" => "default-node-pool", + ), + ), + ) + ) + end + + it "has the expected network tags" do + expect(node_pools).to include( + including( + "config" => including( + "tags" => match_array([ + "gke-#{cluster_name}", + "gke-#{cluster_name}-default-node-pool", + ]), + ), + ) + ) + end + + it "has autorepair enabled" do + expect(node_pools).to include( + including( + "management" => including( + "autoRepair" => true, + ), + ) + ) + end + + it "has autoupgrade enabled" do + expect(node_pools).to include( + including( + "management" => including( + "autoUpgrade" => true, + ), + ) + ) + end + end + end +end diff --git a/test/integration/private_zonal_with_networking/controls/network.rb b/test/integration/private_zonal_with_networking/controls/network.rb new file mode 100644 index 0000000000..a17ce74663 --- /dev/null +++ b/test/integration/private_zonal_with_networking/controls/network.rb @@ -0,0 +1,28 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +project_id = attribute('project_id') +network_name = attribute('network_name') +subnet_name = attribute('subnet_name') +control "network" do + title "gcp network configuration" + describe google_compute_network( + project: project_id, + name: network_name + ) do + it { should exist } + its ('subnetworks.count') { should eq 1 } + its ('subnetworks.first') { should match subnet_name } + end + end diff --git a/test/integration/private_zonal_with_networking/controls/subnet.rb b/test/integration/private_zonal_with_networking/controls/subnet.rb new file mode 100644 index 0000000000..f88d46355b --- /dev/null +++ b/test/integration/private_zonal_with_networking/controls/subnet.rb @@ -0,0 +1,46 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +project_id = attribute('project_id') +network_name = attribute('network_name') +subnet_name = attribute('subnet_name') +region = attribute('region') +ip_range_pods_name = attribute('ip_range_pods_name') +ip_range_services_name = attribute('ip_range_services_name') +control "subnet" do + title "gcp subnetwork configuration" + describe command("gcloud compute networks subnets describe #{subnet_name} --project=#{project_id} --region=#{region} --format=json") do + its(:exit_status) { should eq 0 } + its(:stderr) { should eq '' } + let(:data) do + if subject.exit_status == 0 + JSON.parse(subject.stdout) + else + {} + end + end + it "#should have the correct secondaryIpRanges configuration for #{ip_range_pods_name}" do + expect(data["secondaryIpRanges"][0]).to include( + "rangeName" => ip_range_pods_name, + "ipCidrRange" => "192.168.0.0/18" + ) + end + it "#should have the correct secondaryIpRanges configuration for #{ip_range_services_name}" do + expect(data["secondaryIpRanges"][1]).to include( + "rangeName" => ip_range_services_name, + "ipCidrRange" => "192.168.64.0/18" + ) + end + end + end diff --git a/test/integration/private_zonal_with_networking/inspec.yml b/test/integration/private_zonal_with_networking/inspec.yml new file mode 100644 index 0000000000..bf2e4e86aa --- /dev/null +++ b/test/integration/private_zonal_with_networking/inspec.yml @@ -0,0 +1,36 @@ +name: simple_regional_with_networking +depends: + - name: inspec-gcp + git: https://github.com/inspec/inspec-gcp.git + tag: v0.10.0 +attributes: + - name: project_id + required: true + type: string + - name: location + required: true + type: string + - name: cluster_name + required: true + type: string + - name: kubernetes_endpoint + required: true + type: string + - name: client_token + required: true + type: string + - name: network_name + required: true + type: string + - name: subnet_name + required: true + type: string + - name: region + required: true + type: string + - name: ip_range_pods_name + required: true + type: string + - name: ip_range_services_name + required: true + type: string From a0a6f66603dc6531d65375fce0152cb7ee861174 Mon Sep 17 00:00:00 2001 From: Sam Gendler Date: Wed, 6 Nov 2019 01:40:10 -0800 Subject: [PATCH 02/20] get gke module to wait on network creation correctly --- examples/private_zonal_with_networking/main.tf | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/examples/private_zonal_with_networking/main.tf b/examples/private_zonal_with_networking/main.tf index fb5d34d757..dc62661017 100644 --- a/examples/private_zonal_with_networking/main.tf +++ b/examples/private_zonal_with_networking/main.tf @@ -44,7 +44,7 @@ module "gcp-network" { } data "google_compute_subnetwork" "subnetwork" { - name = module.gcp-network.subnets_names[0] + name = var.subnetwork project = var.project_id region = var.region depends_on = [module.gcp-network] @@ -57,7 +57,12 @@ module "gke" { regional = false region = var.region zones = slice(var.zones, 0, 1) - network = data.google_compute_subnetwork.subnetwork.network + + // This craziness gets a plain network name from the reference link which is the + // only way to force cluster creation to wait on network creation without a + // depends_on link. Tests use terraform 0.12.6, which does not have regex or regexall + network = reverse(split("/", data.google_compute_subnetwork.subnetwork.network))[0] + subnetwork = data.google_compute_subnetwork.subnetwork.name ip_range_pods = var.ip_range_pods_name ip_range_services = var.ip_range_services_name From 7954da52d89e344b5b9a4278a53109badd18bdb1 Mon Sep 17 00:00:00 2001 From: Sam Gendler Date: Wed, 6 Nov 2019 04:34:42 -0800 Subject: [PATCH 03/20] Get tests for private_zonal_with_network to function correctly --- .kitchen.yml | 2 +- .../private_zonal_with_networking/outputs.tf | 1 - .../test_outputs.tf | 8 +- .../variables.tf | 10 +-- .../controls/gcloud.rb | 85 +------------------ .../controls/subnet.rb | 49 ++++++----- .../private_zonal_with_networking/inspec.yml | 2 +- 7 files changed, 41 insertions(+), 116 deletions(-) diff --git a/.kitchen.yml b/.kitchen.yml index 7707328091..242d0b27c1 100644 --- a/.kitchen.yml +++ b/.kitchen.yml @@ -77,7 +77,7 @@ suites: backend: local controls: - gcloud - - name: subnet + - name: private_zonal_with_networking backend: local controls: - subnet diff --git a/examples/private_zonal_with_networking/outputs.tf b/examples/private_zonal_with_networking/outputs.tf index bb255b54a2..f306934801 100644 --- a/examples/private_zonal_with_networking/outputs.tf +++ b/examples/private_zonal_with_networking/outputs.tf @@ -57,4 +57,3 @@ output "subnet_secondary_ranges" { } - diff --git a/examples/private_zonal_with_networking/test_outputs.tf b/examples/private_zonal_with_networking/test_outputs.tf index a703679105..e8eeacb9ef 100644 --- a/examples/private_zonal_with_networking/test_outputs.tf +++ b/examples/private_zonal_with_networking/test_outputs.tf @@ -21,10 +21,6 @@ output "project_id" { value = var.project_id } -output "region" { - value = module.gke.region -} - output "network" { value = var.network } @@ -37,6 +33,10 @@ output "location" { value = module.gke.location } +output "region" { + value = var.region +} + output "ip_range_pods_name" { description = "The secondary IP range used for pods" value = var.ip_range_pods_name diff --git a/examples/private_zonal_with_networking/variables.tf b/examples/private_zonal_with_networking/variables.tf index ac2c68d2b6..ecaf86a558 100644 --- a/examples/private_zonal_with_networking/variables.tf +++ b/examples/private_zonal_with_networking/variables.tf @@ -25,7 +25,11 @@ variable "cluster_name" { variable "region" { description = "The region to host the cluster in" - default = "us-central1" +} + +variable "zones" { + type = list(string) + description = "The zone to host the cluster in (required if is a zonal cluster)" } variable "network" { @@ -48,7 +52,3 @@ variable "ip_range_services_name" { default = "ip-range-scv" } -variable "zones" { - default = [] -} - diff --git a/test/integration/private_zonal_with_networking/controls/gcloud.rb b/test/integration/private_zonal_with_networking/controls/gcloud.rb index e998182da2..adaf6fd646 100644 --- a/test/integration/private_zonal_with_networking/controls/gcloud.rb +++ b/test/integration/private_zonal_with_networking/controls/gcloud.rb @@ -75,53 +75,19 @@ ) ) end - end - - describe "node pool" do - let(:node_pools) { data['nodePools'].reject { |p| p['name'] == "default-pool" } } - - it "has autoscaling enabled" do - expect(node_pools).to include( - including( - "autoscaling" => including( - "enabled" => true, - ), - ) - ) - end - - it "has the expected minimum node count" do - expect(node_pools).to include( - including( - "autoscaling" => including( - "minNodeCount" => 1, - ), - ) - ) - end - - it "has the expected maximum node count" do - expect(node_pools).to include( - including( - "autoscaling" => including( - "maxNodeCount" => 100, - ), - ) - ) - end it "is the expected machine type" do - expect(node_pools).to include( + expect(data['nodePools']).to include( including( "config" => including( - "machineType" => "n1-standard-2", + "machineType" => "n1-standard-1", ), ) ) end it "has the expected disk size" do - expect(node_pools).to include( + expect(data['nodePools']).to include( including( "config" => including( "diskSizeGb" => 100, @@ -130,51 +96,6 @@ ) end - it "has the expected labels" do - expect(node_pools).to include( - including( - "config" => including( - "labels" => including( - "cluster_name" => cluster_name, - "node_pool" => "default-node-pool", - ), - ), - ) - ) - end - - it "has the expected network tags" do - expect(node_pools).to include( - including( - "config" => including( - "tags" => match_array([ - "gke-#{cluster_name}", - "gke-#{cluster_name}-default-node-pool", - ]), - ), - ) - ) - end - - it "has autorepair enabled" do - expect(node_pools).to include( - including( - "management" => including( - "autoRepair" => true, - ), - ) - ) - end - - it "has autoupgrade enabled" do - expect(node_pools).to include( - including( - "management" => including( - "autoUpgrade" => true, - ), - ) - ) - end end end end diff --git a/test/integration/private_zonal_with_networking/controls/subnet.rb b/test/integration/private_zonal_with_networking/controls/subnet.rb index f88d46355b..7b967b6c87 100644 --- a/test/integration/private_zonal_with_networking/controls/subnet.rb +++ b/test/integration/private_zonal_with_networking/controls/subnet.rb @@ -18,29 +18,34 @@ region = attribute('region') ip_range_pods_name = attribute('ip_range_pods_name') ip_range_services_name = attribute('ip_range_services_name') + control "subnet" do - title "gcp subnetwork configuration" - describe command("gcloud compute networks subnets describe #{subnet_name} --project=#{project_id} --region=#{region} --format=json") do - its(:exit_status) { should eq 0 } - its(:stderr) { should eq '' } - let(:data) do - if subject.exit_status == 0 - JSON.parse(subject.stdout) - else - {} - end - end - it "#should have the correct secondaryIpRanges configuration for #{ip_range_pods_name}" do - expect(data["secondaryIpRanges"][0]).to include( - "rangeName" => ip_range_pods_name, - "ipCidrRange" => "192.168.0.0/18" - ) - end - it "#should have the correct secondaryIpRanges configuration for #{ip_range_services_name}" do - expect(data["secondaryIpRanges"][1]).to include( - "rangeName" => ip_range_services_name, - "ipCidrRange" => "192.168.64.0/18" - ) + title "gcp subnetwork configuration" + describe command("gcloud compute networks subnets describe #{subnet_name} --project=#{project_id} --region=#{region} --format=json") do + its(:exit_status) { should eq 0 } + its(:stderr) { should eq '' } + + let!(:data) do + if subject.exit_status == 0 + JSON.parse(subject.stdout) + else + {} end end + + it "#should have the correct secondaryIpRanges configuration for #{ip_range_pods_name}" do + expect(data["secondaryIpRanges"][0]).to include( + "rangeName" => ip_range_pods_name, + "ipCidrRange" => "192.168.0.0/18" + ) + end + + it "#should have the correct secondaryIpRanges configuration for #{ip_range_services_name}" do + expect(data["secondaryIpRanges"][1]).to include( + "rangeName" => ip_range_services_name, + "ipCidrRange" => "192.168.64.0/18" + ) + end + end +end diff --git a/test/integration/private_zonal_with_networking/inspec.yml b/test/integration/private_zonal_with_networking/inspec.yml index bf2e4e86aa..87f447173f 100644 --- a/test/integration/private_zonal_with_networking/inspec.yml +++ b/test/integration/private_zonal_with_networking/inspec.yml @@ -1,4 +1,4 @@ -name: simple_regional_with_networking +name: private_zonal_with_networking depends: - name: inspec-gcp git: https://github.com/inspec/inspec-gcp.git From 92cd9797b6b47df86776708e0232ff38cdf1108f Mon Sep 17 00:00:00 2001 From: Sam Gendler Date: Wed, 6 Nov 2019 05:09:36 -0800 Subject: [PATCH 04/20] lint fix --- .../private_zonal_with_networking/main.tf | 26 +++++++++---------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/examples/private_zonal_with_networking/main.tf b/examples/private_zonal_with_networking/main.tf index dc62661017..82db39a591 100644 --- a/examples/private_zonal_with_networking/main.tf +++ b/examples/private_zonal_with_networking/main.tf @@ -22,9 +22,9 @@ module "gcp-network" { subnets = [ { - subnet_name = var.subnetwork - subnet_ip = "10.0.0.0/17" - subnet_region = var.region + subnet_name = var.subnetwork + subnet_ip = "10.0.0.0/17" + subnet_region = var.region subnet_private_access = "true" }, ] @@ -44,24 +44,24 @@ module "gcp-network" { } data "google_compute_subnetwork" "subnetwork" { - name = var.subnetwork - project = var.project_id - region = var.region + name = var.subnetwork + project = var.project_id + region = var.region depends_on = [module.gcp-network] } module "gke" { - source = "../../modules/beta-private-cluster/" - project_id = var.project_id - name = var.cluster_name - regional = false - region = var.region - zones = slice(var.zones, 0, 1) + source = "../../modules/beta-private-cluster/" + project_id = var.project_id + name = var.cluster_name + regional = false + region = var.region + zones = slice(var.zones, 0, 1) // This craziness gets a plain network name from the reference link which is the // only way to force cluster creation to wait on network creation without a // depends_on link. Tests use terraform 0.12.6, which does not have regex or regexall - network = reverse(split("/", data.google_compute_subnetwork.subnetwork.network))[0] + network = reverse(split("/", data.google_compute_subnetwork.subnetwork.network))[0] subnetwork = data.google_compute_subnetwork.subnetwork.name ip_range_pods = var.ip_range_pods_name From b4a8eae532ff4734311ba907d474db53075243f2 Mon Sep 17 00:00:00 2001 From: bharathkkb Date: Tue, 19 Nov 2019 20:10:30 -0600 Subject: [PATCH 05/20] add .tmpl ext to autogen tf templates --- autogen/{auth.tf => auth.tf.tmpl} | 0 autogen/{cluster.tf => cluster.tf.tmpl} | 0 autogen/{dns.tf => dns.tf.tmpl} | 0 autogen/{main.tf => main.tf.tmpl} | 0 autogen/{masq.tf => masq.tf.tmpl} | 0 autogen/{networks.tf => networks.tf.tmpl} | 0 autogen/{outputs.tf => outputs.tf.tmpl} | 0 autogen/{sa.tf => sa.tf.tmpl} | 0 autogen/{variables.tf => variables.tf.tmpl} | 0 autogen/{versions.tf => versions.tf.tmpl} | 0 helpers/generate_modules/README.md | 186 +++++++++++++++++++ helpers/generate_modules/generate_modules.py | 6 +- 12 files changed, 190 insertions(+), 2 deletions(-) rename autogen/{auth.tf => auth.tf.tmpl} (100%) rename autogen/{cluster.tf => cluster.tf.tmpl} (100%) rename autogen/{dns.tf => dns.tf.tmpl} (100%) rename autogen/{main.tf => main.tf.tmpl} (100%) rename autogen/{masq.tf => masq.tf.tmpl} (100%) rename autogen/{networks.tf => networks.tf.tmpl} (100%) rename autogen/{outputs.tf => outputs.tf.tmpl} (100%) rename autogen/{sa.tf => sa.tf.tmpl} (100%) rename autogen/{variables.tf => variables.tf.tmpl} (100%) rename autogen/{versions.tf => versions.tf.tmpl} (100%) create mode 100644 helpers/generate_modules/README.md diff --git a/autogen/auth.tf b/autogen/auth.tf.tmpl similarity index 100% rename from autogen/auth.tf rename to autogen/auth.tf.tmpl diff --git a/autogen/cluster.tf b/autogen/cluster.tf.tmpl similarity index 100% rename from autogen/cluster.tf rename to autogen/cluster.tf.tmpl diff --git a/autogen/dns.tf b/autogen/dns.tf.tmpl similarity index 100% rename from autogen/dns.tf rename to autogen/dns.tf.tmpl diff --git a/autogen/main.tf b/autogen/main.tf.tmpl similarity index 100% rename from autogen/main.tf rename to autogen/main.tf.tmpl diff --git a/autogen/masq.tf b/autogen/masq.tf.tmpl similarity index 100% rename from autogen/masq.tf rename to autogen/masq.tf.tmpl diff --git a/autogen/networks.tf b/autogen/networks.tf.tmpl similarity index 100% rename from autogen/networks.tf rename to autogen/networks.tf.tmpl diff --git a/autogen/outputs.tf b/autogen/outputs.tf.tmpl similarity index 100% rename from autogen/outputs.tf rename to autogen/outputs.tf.tmpl diff --git a/autogen/sa.tf b/autogen/sa.tf.tmpl similarity index 100% rename from autogen/sa.tf rename to autogen/sa.tf.tmpl diff --git a/autogen/variables.tf b/autogen/variables.tf.tmpl similarity index 100% rename from autogen/variables.tf rename to autogen/variables.tf.tmpl diff --git a/autogen/versions.tf b/autogen/versions.tf.tmpl similarity index 100% rename from autogen/versions.tf rename to autogen/versions.tf.tmpl diff --git a/helpers/generate_modules/README.md b/helpers/generate_modules/README.md new file mode 100644 index 0000000000..66623aa0c4 --- /dev/null +++ b/helpers/generate_modules/README.md @@ -0,0 +1,186 @@ +# Terraform Kubernetes Engine Module + +This module handles opinionated Google Cloud Platform Kubernetes Engine cluster creation and configuration with Node Pools, IP MASQ, Network Policy, etc. +The resources/services/activations/deletions that this module will create/trigger are: +- Create a GKE cluster with the provided addons +- Create GKE Node Pool(s) with provided configuration and attach to cluster +- Replace the default kube-dns configmap if `stub_domains` are provided +- Activate network policy if `network_policy` is true +- Add `ip-masq-agent` configmap with provided `non_masquerade_cidrs` if `configure_ip_masq` is true + +Sub modules are provided from creating private clusters, beta private clusters, and beta public clusters as well. Beta sub modules allow for the use of various GKE beta features. See the modules directory for the various sub modules. + + +## Compatibility + +This module is meant for use with Terraform 0.12. If you haven't +[upgraded][terraform-0.12-upgrade] and need a Terraform +0.11.x-compatible version of this module, the last released version +intended for Terraform 0.11.x is [3.0.0]. + +## Usage +There are multiple examples included in the [examples](./examples/) folder but simple usage is as follows: + +```hcl +module "gke" { + source = "terraform-google-modules/kubernetes-engine/google" + project_id = "" + name = "gke-test-1" + region = "us-central1" + zones = ["us-central1-a", "us-central1-b", "us-central1-f"] + network = "vpc-01" + subnetwork = "us-central1-01" + ip_range_pods = "us-central1-01-gke-01-pods" + ip_range_services = "us-central1-01-gke-01-services" + http_load_balancing = false + horizontal_pod_autoscaling = true + kubernetes_dashboard = true + network_policy = true + + node_pools = [ + { + name = "default-node-pool" + machine_type = "n1-standard-2" + min_count = 1 + max_count = 100 + disk_size_gb = 100 + disk_type = "pd-standard" + image_type = "COS" + auto_repair = true + auto_upgrade = true + service_account = "project-service-account@.iam.gserviceaccount.com" + preemptible = false + initial_node_count = 80 + }, + ] + + node_pools_oauth_scopes = { + all = [] + + default-node-pool = [ + "https://www.googleapis.com/auth/cloud-platform", + ] + } + + node_pools_labels = { + all = {} + + default-node-pool = { + default-node-pool = true + } + } + + node_pools_metadata = { + all = {} + + default-node-pool = { + node-pool-metadata-custom-value = "my-node-pool" + } + } + + node_pools_taints = { + all = [] + + default-node-pool = [ + { + key = "default-node-pool" + value = true + effect = "PREFER_NO_SCHEDULE" + }, + ] + } + + node_pools_tags = { + all = [] + + default-node-pool = [ + "default-node-pool", + ] + } +} +``` + + +Then perform the following commands on the root folder: + +- `terraform init` to get the plugins +- `terraform plan` to see the infrastructure plan +- `terraform apply` to apply the infrastructure build +- `terraform destroy` to destroy the built infrastructure + +## Upgrade to v3.0.0 + +v3.0.0 is a breaking release. Refer to the +[Upgrading to v3.0 guide][upgrading-to-v3.0] for details. + +## Upgrade to v2.0.0 + +v2.0.0 is a breaking release. Refer to the +[Upgrading to v2.0 guide][upgrading-to-v2.0] for details. + +## Upgrade to v1.0.0 + +Version 1.0.0 of this module introduces a breaking change: adding the `disable-legacy-endpoints` metadata field to all node pools. This metadata is required by GKE and [determines whether the `/0.1/` and `/v1beta1/` paths are available in the nodes' metadata server](https://cloud.google.com/kubernetes-engine/docs/how-to/protecting-cluster-metadata#disable-legacy-apis). If your applications do not require access to the node's metadata server, you can leave the default value of `true` provided by the module. If your applications require access to the metadata server, be sure to read the linked documentation to see if you need to set the value for this field to `false` to allow your applications access to the above metadata server paths. + +In either case, upgrading to module version `v1.0.0` will trigger a recreation of all node pools in the cluster. + + + + +## Requirements + +Before this module can be used on a project, you must ensure that the following pre-requisites are fulfilled: + +1. Terraform and kubectl are [installed](#software-dependencies) on the machine where Terraform is executed. +2. The Service Account you execute the module with has the right [permissions](#configure-a-service-account). +3. The Compute Engine and Kubernetes Engine APIs are [active](#enable-apis) on the project you will launch the cluster in. +4. If you are using a Shared VPC, the APIs must also be activated on the Shared VPC host project and your service account needs the proper permissions there. + +The [project factory](https://github.com/terraform-google-modules/terraform-google-project-factory) can be used to provision projects with the correct APIs active and the necessary Shared VPC connections. + +### Software Dependencies +#### Kubectl +- [kubectl](https://github.com/kubernetes/kubernetes/releases) 1.9.x +#### Terraform and Plugins +- [Terraform](https://www.terraform.io/downloads.html) 0.12 +- [Terraform Provider for GCP][terraform-provider-google] v2.9 + +### Configure a Service Account +In order to execute this module you must have a Service Account with the +following project roles: +- roles/compute.viewer +- roles/container.clusterAdmin +- roles/container.developer +- roles/iam.serviceAccountAdmin +- roles/iam.serviceAccountUser +- roles/resourcemanager.projectIamAdmin (only required if `service_account` is set to `create`) + +Additionally, if `service_account` is set to `create` and `grant_registry_access` is requested, the service account requires the following role on the `registry_project_id` project: +- roles/resourcemanager.projectIamAdmin + +### Enable APIs +In order to operate with the Service Account you must activate the following APIs on the project where the Service Account was created: + +- Compute Engine API - compute.googleapis.com +- Kubernetes Engine API - container.googleapis.com + +## File structure +The project has the following folders and files: + +- /: root folder +- /examples: Examples for using this module and sub module. +- /helpers: Helper scripts. +- /scripts: Scripts for specific tasks on module (see Infrastructure section on this file). +- /test: Folders with files for testing the module (see Testing section on this file). +- /main.tf: `main` file for the public module, contains all the resources to create. +- /variables.tf: Variables for the public cluster module. +- /output.tf: The outputs for the public cluster module. +- /README.MD: This file. +- /modules: Private and beta sub modules. + + +[upgrading-to-v2.0]: docs/upgrading_to_v2.0.md +[upgrading-to-v3.0]: docs/upgrading_to_v3.0.md +[terraform-provider-google]: https://github.com/terraform-providers/terraform-provider-google +[3.0.0]: https://registry.terraform.io/modules/terraform-google-modules/kubernetes-engine/google/3.0.0 +[terraform-0.12-upgrade]: https://www.terraform.io/upgrade-guides/0-12.html diff --git a/helpers/generate_modules/generate_modules.py b/helpers/generate_modules/generate_modules.py index b98b8bb69e..f542ddf966 100755 --- a/helpers/generate_modules/generate_modules.py +++ b/helpers/generate_modules/generate_modules.py @@ -82,9 +82,11 @@ def main(argv): lstrip_blocks=True, ) templates = env.list_templates() - for template_file in templates: - for module in MODULES: + for module in MODULES: + for template_file in templates: template = env.get_template(template_file) + if template_file.endswith(".tf.tmpl"): + template_file=template_file.replace(".tf.tmpl",".tf") rendered = template.render( module.template_options(BASE_TEMPLATE_OPTIONS) ) From 25207d3173562c82628c69cff34c6b54fc3fb3f5 Mon Sep 17 00:00:00 2001 From: bharathkkb Date: Tue, 19 Nov 2019 20:13:23 -0600 Subject: [PATCH 06/20] remove random file generated during testing --- helpers/generate_modules/README.md | 186 ----------------------------- 1 file changed, 186 deletions(-) delete mode 100644 helpers/generate_modules/README.md diff --git a/helpers/generate_modules/README.md b/helpers/generate_modules/README.md deleted file mode 100644 index 66623aa0c4..0000000000 --- a/helpers/generate_modules/README.md +++ /dev/null @@ -1,186 +0,0 @@ -# Terraform Kubernetes Engine Module - -This module handles opinionated Google Cloud Platform Kubernetes Engine cluster creation and configuration with Node Pools, IP MASQ, Network Policy, etc. -The resources/services/activations/deletions that this module will create/trigger are: -- Create a GKE cluster with the provided addons -- Create GKE Node Pool(s) with provided configuration and attach to cluster -- Replace the default kube-dns configmap if `stub_domains` are provided -- Activate network policy if `network_policy` is true -- Add `ip-masq-agent` configmap with provided `non_masquerade_cidrs` if `configure_ip_masq` is true - -Sub modules are provided from creating private clusters, beta private clusters, and beta public clusters as well. Beta sub modules allow for the use of various GKE beta features. See the modules directory for the various sub modules. - - -## Compatibility - -This module is meant for use with Terraform 0.12. If you haven't -[upgraded][terraform-0.12-upgrade] and need a Terraform -0.11.x-compatible version of this module, the last released version -intended for Terraform 0.11.x is [3.0.0]. - -## Usage -There are multiple examples included in the [examples](./examples/) folder but simple usage is as follows: - -```hcl -module "gke" { - source = "terraform-google-modules/kubernetes-engine/google" - project_id = "" - name = "gke-test-1" - region = "us-central1" - zones = ["us-central1-a", "us-central1-b", "us-central1-f"] - network = "vpc-01" - subnetwork = "us-central1-01" - ip_range_pods = "us-central1-01-gke-01-pods" - ip_range_services = "us-central1-01-gke-01-services" - http_load_balancing = false - horizontal_pod_autoscaling = true - kubernetes_dashboard = true - network_policy = true - - node_pools = [ - { - name = "default-node-pool" - machine_type = "n1-standard-2" - min_count = 1 - max_count = 100 - disk_size_gb = 100 - disk_type = "pd-standard" - image_type = "COS" - auto_repair = true - auto_upgrade = true - service_account = "project-service-account@.iam.gserviceaccount.com" - preemptible = false - initial_node_count = 80 - }, - ] - - node_pools_oauth_scopes = { - all = [] - - default-node-pool = [ - "https://www.googleapis.com/auth/cloud-platform", - ] - } - - node_pools_labels = { - all = {} - - default-node-pool = { - default-node-pool = true - } - } - - node_pools_metadata = { - all = {} - - default-node-pool = { - node-pool-metadata-custom-value = "my-node-pool" - } - } - - node_pools_taints = { - all = [] - - default-node-pool = [ - { - key = "default-node-pool" - value = true - effect = "PREFER_NO_SCHEDULE" - }, - ] - } - - node_pools_tags = { - all = [] - - default-node-pool = [ - "default-node-pool", - ] - } -} -``` - - -Then perform the following commands on the root folder: - -- `terraform init` to get the plugins -- `terraform plan` to see the infrastructure plan -- `terraform apply` to apply the infrastructure build -- `terraform destroy` to destroy the built infrastructure - -## Upgrade to v3.0.0 - -v3.0.0 is a breaking release. Refer to the -[Upgrading to v3.0 guide][upgrading-to-v3.0] for details. - -## Upgrade to v2.0.0 - -v2.0.0 is a breaking release. Refer to the -[Upgrading to v2.0 guide][upgrading-to-v2.0] for details. - -## Upgrade to v1.0.0 - -Version 1.0.0 of this module introduces a breaking change: adding the `disable-legacy-endpoints` metadata field to all node pools. This metadata is required by GKE and [determines whether the `/0.1/` and `/v1beta1/` paths are available in the nodes' metadata server](https://cloud.google.com/kubernetes-engine/docs/how-to/protecting-cluster-metadata#disable-legacy-apis). If your applications do not require access to the node's metadata server, you can leave the default value of `true` provided by the module. If your applications require access to the metadata server, be sure to read the linked documentation to see if you need to set the value for this field to `false` to allow your applications access to the above metadata server paths. - -In either case, upgrading to module version `v1.0.0` will trigger a recreation of all node pools in the cluster. - - - - -## Requirements - -Before this module can be used on a project, you must ensure that the following pre-requisites are fulfilled: - -1. Terraform and kubectl are [installed](#software-dependencies) on the machine where Terraform is executed. -2. The Service Account you execute the module with has the right [permissions](#configure-a-service-account). -3. The Compute Engine and Kubernetes Engine APIs are [active](#enable-apis) on the project you will launch the cluster in. -4. If you are using a Shared VPC, the APIs must also be activated on the Shared VPC host project and your service account needs the proper permissions there. - -The [project factory](https://github.com/terraform-google-modules/terraform-google-project-factory) can be used to provision projects with the correct APIs active and the necessary Shared VPC connections. - -### Software Dependencies -#### Kubectl -- [kubectl](https://github.com/kubernetes/kubernetes/releases) 1.9.x -#### Terraform and Plugins -- [Terraform](https://www.terraform.io/downloads.html) 0.12 -- [Terraform Provider for GCP][terraform-provider-google] v2.9 - -### Configure a Service Account -In order to execute this module you must have a Service Account with the -following project roles: -- roles/compute.viewer -- roles/container.clusterAdmin -- roles/container.developer -- roles/iam.serviceAccountAdmin -- roles/iam.serviceAccountUser -- roles/resourcemanager.projectIamAdmin (only required if `service_account` is set to `create`) - -Additionally, if `service_account` is set to `create` and `grant_registry_access` is requested, the service account requires the following role on the `registry_project_id` project: -- roles/resourcemanager.projectIamAdmin - -### Enable APIs -In order to operate with the Service Account you must activate the following APIs on the project where the Service Account was created: - -- Compute Engine API - compute.googleapis.com -- Kubernetes Engine API - container.googleapis.com - -## File structure -The project has the following folders and files: - -- /: root folder -- /examples: Examples for using this module and sub module. -- /helpers: Helper scripts. -- /scripts: Scripts for specific tasks on module (see Infrastructure section on this file). -- /test: Folders with files for testing the module (see Testing section on this file). -- /main.tf: `main` file for the public module, contains all the resources to create. -- /variables.tf: Variables for the public cluster module. -- /output.tf: The outputs for the public cluster module. -- /README.MD: This file. -- /modules: Private and beta sub modules. - - -[upgrading-to-v2.0]: docs/upgrading_to_v2.0.md -[upgrading-to-v3.0]: docs/upgrading_to_v3.0.md -[terraform-provider-google]: https://github.com/terraform-providers/terraform-provider-google -[3.0.0]: https://registry.terraform.io/modules/terraform-google-modules/kubernetes-engine/google/3.0.0 -[terraform-0.12-upgrade]: https://www.terraform.io/upgrade-guides/0-12.html From d94749d2a9472893fc06f157b10d5d391a4c04c2 Mon Sep 17 00:00:00 2001 From: bharathkkb Date: Tue, 19 Nov 2019 20:18:17 -0600 Subject: [PATCH 07/20] pylint --- helpers/generate_modules/generate_modules.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/helpers/generate_modules/generate_modules.py b/helpers/generate_modules/generate_modules.py index f542ddf966..a61329caf8 100755 --- a/helpers/generate_modules/generate_modules.py +++ b/helpers/generate_modules/generate_modules.py @@ -86,7 +86,7 @@ def main(argv): for template_file in templates: template = env.get_template(template_file) if template_file.endswith(".tf.tmpl"): - template_file=template_file.replace(".tf.tmpl",".tf") + template_file = template_file.replace(".tf.tmpl", ".tf") rendered = template.render( module.template_options(BASE_TEMPLATE_OPTIONS) ) From 464a5956de5cb993f44cb20945f70c24234ceb20 Mon Sep 17 00:00:00 2001 From: Shashindran Vijayan Date: Wed, 20 Nov 2019 14:38:35 +0800 Subject: [PATCH 08/20] Removed kubernetes dashboard add config. The kubernetes addon has been removed by GKE --- modules/private-cluster/cluster.tf | 4 ---- 1 file changed, 4 deletions(-) diff --git a/modules/private-cluster/cluster.tf b/modules/private-cluster/cluster.tf index c8051255bf..249baa28f6 100644 --- a/modules/private-cluster/cluster.tf +++ b/modules/private-cluster/cluster.tf @@ -79,10 +79,6 @@ resource "google_container_cluster" "primary" { disabled = ! var.horizontal_pod_autoscaling } - kubernetes_dashboard { - disabled = ! var.kubernetes_dashboard - } - network_policy_config { disabled = ! var.network_policy } From 562f83d6deb568ca0dc08d34a50f5bee90135e5a Mon Sep 17 00:00:00 2001 From: Shashindran Vijayan Date: Wed, 20 Nov 2019 15:10:24 +0800 Subject: [PATCH 09/20] Update outputs.tf --- modules/private-cluster/outputs.tf | 5 ----- 1 file changed, 5 deletions(-) diff --git a/modules/private-cluster/outputs.tf b/modules/private-cluster/outputs.tf index dea7b5c7b5..54080bfa21 100644 --- a/modules/private-cluster/outputs.tf +++ b/modules/private-cluster/outputs.tf @@ -103,11 +103,6 @@ output "horizontal_pod_autoscaling_enabled" { value = local.cluster_horizontal_pod_autoscaling_enabled } -output "kubernetes_dashboard_enabled" { - description = "Whether kubernetes dashboard enabled" - value = local.cluster_kubernetes_dashboard_enabled -} - output "node_pools_names" { description = "List of node pools names" value = local.cluster_node_pools_names From b5358e44c4cdfdd85fd71b7f960c0f7db1ec1b66 Mon Sep 17 00:00:00 2001 From: Shashindran Vijayan Date: Wed, 20 Nov 2019 15:11:44 +0800 Subject: [PATCH 10/20] Update main.tf --- modules/private-cluster/main.tf | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/modules/private-cluster/main.tf b/modules/private-cluster/main.tf index f0d307311c..01af2436ef 100644 --- a/modules/private-cluster/main.tf +++ b/modules/private-cluster/main.tf @@ -80,8 +80,7 @@ locals { cluster_output_network_policy_enabled = google_container_cluster.primary.addons_config.0.network_policy_config.0.disabled cluster_output_http_load_balancing_enabled = google_container_cluster.primary.addons_config.0.http_load_balancing.0.disabled cluster_output_horizontal_pod_autoscaling_enabled = google_container_cluster.primary.addons_config.0.horizontal_pod_autoscaling.0.disabled - cluster_output_kubernetes_dashboard_enabled = google_container_cluster.primary.addons_config.0.kubernetes_dashboard.0.disabled - + cluster_output_node_pools_names = concat(google_container_node_pool.pools.*.name, [""]) cluster_output_node_pools_versions = concat(google_container_node_pool.pools.*.version, [""]) @@ -105,7 +104,6 @@ locals { cluster_network_policy_enabled = ! local.cluster_output_network_policy_enabled cluster_http_load_balancing_enabled = ! local.cluster_output_http_load_balancing_enabled cluster_horizontal_pod_autoscaling_enabled = ! local.cluster_output_horizontal_pod_autoscaling_enabled - cluster_kubernetes_dashboard_enabled = ! local.cluster_output_kubernetes_dashboard_enabled } /****************************************** From fb1aba2411ec4064393fa11cbffee02c3216d714 Mon Sep 17 00:00:00 2001 From: Shashindran Vijayan Date: Wed, 20 Nov 2019 16:37:46 +0800 Subject: [PATCH 11/20] Updated autogen to remove the inclusion of kubernetes dashboard addon. --- autogen/cluster.tf | 4 ---- autogen/main.tf | 2 -- autogen/outputs.tf | 5 ----- 3 files changed, 11 deletions(-) diff --git a/autogen/cluster.tf b/autogen/cluster.tf index 4791220591..1b8a585cd4 100644 --- a/autogen/cluster.tf +++ b/autogen/cluster.tf @@ -119,10 +119,6 @@ resource "google_container_cluster" "primary" { disabled = ! var.horizontal_pod_autoscaling } - kubernetes_dashboard { - disabled = ! var.kubernetes_dashboard - } - network_policy_config { disabled = ! var.network_policy } diff --git a/autogen/main.tf b/autogen/main.tf index 841444ea44..3316d8eb75 100644 --- a/autogen/main.tf +++ b/autogen/main.tf @@ -105,7 +105,6 @@ locals { cluster_output_network_policy_enabled = google_container_cluster.primary.addons_config.0.network_policy_config.0.disabled cluster_output_http_load_balancing_enabled = google_container_cluster.primary.addons_config.0.http_load_balancing.0.disabled cluster_output_horizontal_pod_autoscaling_enabled = google_container_cluster.primary.addons_config.0.horizontal_pod_autoscaling.0.disabled - cluster_output_kubernetes_dashboard_enabled = google_container_cluster.primary.addons_config.0.kubernetes_dashboard.0.disabled {% if beta_cluster %} # BETA features @@ -139,7 +138,6 @@ locals { cluster_network_policy_enabled = ! local.cluster_output_network_policy_enabled cluster_http_load_balancing_enabled = ! local.cluster_output_http_load_balancing_enabled cluster_horizontal_pod_autoscaling_enabled = ! local.cluster_output_horizontal_pod_autoscaling_enabled - cluster_kubernetes_dashboard_enabled = ! local.cluster_output_kubernetes_dashboard_enabled {% if beta_cluster %} # BETA features cluster_istio_enabled = ! local.cluster_output_istio_disabled diff --git a/autogen/outputs.tf b/autogen/outputs.tf index 842502ecea..44222bee6e 100644 --- a/autogen/outputs.tf +++ b/autogen/outputs.tf @@ -103,11 +103,6 @@ output "horizontal_pod_autoscaling_enabled" { value = local.cluster_horizontal_pod_autoscaling_enabled } -output "kubernetes_dashboard_enabled" { - description = "Whether kubernetes dashboard enabled" - value = local.cluster_kubernetes_dashboard_enabled -} - output "node_pools_names" { description = "List of node pools names" value = local.cluster_node_pools_names From e14251f9a8da6f8a9fe877fa6f0456fd8d8f8c43 Mon Sep 17 00:00:00 2001 From: Shashindran Vijayan Date: Wed, 20 Nov 2019 16:39:29 +0800 Subject: [PATCH 12/20] Removed kubernets dashboard addon from all modules. --- modules/beta-private-cluster-update-variant/cluster.tf | 4 ---- modules/beta-private-cluster-update-variant/main.tf | 2 -- modules/beta-private-cluster-update-variant/outputs.tf | 5 ----- modules/beta-private-cluster/cluster.tf | 4 ---- modules/beta-private-cluster/main.tf | 2 -- modules/beta-private-cluster/outputs.tf | 5 ----- modules/beta-public-cluster/cluster.tf | 4 ---- modules/beta-public-cluster/main.tf | 2 -- modules/beta-public-cluster/outputs.tf | 5 ----- modules/private-cluster-update-variant/cluster.tf | 4 ---- modules/private-cluster-update-variant/main.tf | 2 -- modules/private-cluster-update-variant/outputs.tf | 5 ----- 12 files changed, 44 deletions(-) diff --git a/modules/beta-private-cluster-update-variant/cluster.tf b/modules/beta-private-cluster-update-variant/cluster.tf index 10c206f35c..32bdbf1527 100644 --- a/modules/beta-private-cluster-update-variant/cluster.tf +++ b/modules/beta-private-cluster-update-variant/cluster.tf @@ -111,10 +111,6 @@ resource "google_container_cluster" "primary" { disabled = ! var.horizontal_pod_autoscaling } - kubernetes_dashboard { - disabled = ! var.kubernetes_dashboard - } - network_policy_config { disabled = ! var.network_policy } diff --git a/modules/beta-private-cluster-update-variant/main.tf b/modules/beta-private-cluster-update-variant/main.tf index 5b235ce00f..1632296df3 100644 --- a/modules/beta-private-cluster-update-variant/main.tf +++ b/modules/beta-private-cluster-update-variant/main.tf @@ -93,7 +93,6 @@ locals { cluster_output_network_policy_enabled = google_container_cluster.primary.addons_config.0.network_policy_config.0.disabled cluster_output_http_load_balancing_enabled = google_container_cluster.primary.addons_config.0.http_load_balancing.0.disabled cluster_output_horizontal_pod_autoscaling_enabled = google_container_cluster.primary.addons_config.0.horizontal_pod_autoscaling.0.disabled - cluster_output_kubernetes_dashboard_enabled = google_container_cluster.primary.addons_config.0.kubernetes_dashboard.0.disabled # BETA features cluster_output_istio_disabled = google_container_cluster.primary.addons_config.0.istio_config != null && length(google_container_cluster.primary.addons_config.0.istio_config) == 1 ? google_container_cluster.primary.addons_config.0.istio_config.0.disabled : false @@ -125,7 +124,6 @@ locals { cluster_network_policy_enabled = ! local.cluster_output_network_policy_enabled cluster_http_load_balancing_enabled = ! local.cluster_output_http_load_balancing_enabled cluster_horizontal_pod_autoscaling_enabled = ! local.cluster_output_horizontal_pod_autoscaling_enabled - cluster_kubernetes_dashboard_enabled = ! local.cluster_output_kubernetes_dashboard_enabled # BETA features cluster_istio_enabled = ! local.cluster_output_istio_disabled cluster_cloudrun_enabled = var.cloudrun diff --git a/modules/beta-private-cluster-update-variant/outputs.tf b/modules/beta-private-cluster-update-variant/outputs.tf index fb3f29c401..947c806067 100644 --- a/modules/beta-private-cluster-update-variant/outputs.tf +++ b/modules/beta-private-cluster-update-variant/outputs.tf @@ -103,11 +103,6 @@ output "horizontal_pod_autoscaling_enabled" { value = local.cluster_horizontal_pod_autoscaling_enabled } -output "kubernetes_dashboard_enabled" { - description = "Whether kubernetes dashboard enabled" - value = local.cluster_kubernetes_dashboard_enabled -} - output "node_pools_names" { description = "List of node pools names" value = local.cluster_node_pools_names diff --git a/modules/beta-private-cluster/cluster.tf b/modules/beta-private-cluster/cluster.tf index c363dacf9f..b6f91a38d8 100644 --- a/modules/beta-private-cluster/cluster.tf +++ b/modules/beta-private-cluster/cluster.tf @@ -111,10 +111,6 @@ resource "google_container_cluster" "primary" { disabled = ! var.horizontal_pod_autoscaling } - kubernetes_dashboard { - disabled = ! var.kubernetes_dashboard - } - network_policy_config { disabled = ! var.network_policy } diff --git a/modules/beta-private-cluster/main.tf b/modules/beta-private-cluster/main.tf index 5b235ce00f..1632296df3 100644 --- a/modules/beta-private-cluster/main.tf +++ b/modules/beta-private-cluster/main.tf @@ -93,7 +93,6 @@ locals { cluster_output_network_policy_enabled = google_container_cluster.primary.addons_config.0.network_policy_config.0.disabled cluster_output_http_load_balancing_enabled = google_container_cluster.primary.addons_config.0.http_load_balancing.0.disabled cluster_output_horizontal_pod_autoscaling_enabled = google_container_cluster.primary.addons_config.0.horizontal_pod_autoscaling.0.disabled - cluster_output_kubernetes_dashboard_enabled = google_container_cluster.primary.addons_config.0.kubernetes_dashboard.0.disabled # BETA features cluster_output_istio_disabled = google_container_cluster.primary.addons_config.0.istio_config != null && length(google_container_cluster.primary.addons_config.0.istio_config) == 1 ? google_container_cluster.primary.addons_config.0.istio_config.0.disabled : false @@ -125,7 +124,6 @@ locals { cluster_network_policy_enabled = ! local.cluster_output_network_policy_enabled cluster_http_load_balancing_enabled = ! local.cluster_output_http_load_balancing_enabled cluster_horizontal_pod_autoscaling_enabled = ! local.cluster_output_horizontal_pod_autoscaling_enabled - cluster_kubernetes_dashboard_enabled = ! local.cluster_output_kubernetes_dashboard_enabled # BETA features cluster_istio_enabled = ! local.cluster_output_istio_disabled cluster_cloudrun_enabled = var.cloudrun diff --git a/modules/beta-private-cluster/outputs.tf b/modules/beta-private-cluster/outputs.tf index fb3f29c401..947c806067 100644 --- a/modules/beta-private-cluster/outputs.tf +++ b/modules/beta-private-cluster/outputs.tf @@ -103,11 +103,6 @@ output "horizontal_pod_autoscaling_enabled" { value = local.cluster_horizontal_pod_autoscaling_enabled } -output "kubernetes_dashboard_enabled" { - description = "Whether kubernetes dashboard enabled" - value = local.cluster_kubernetes_dashboard_enabled -} - output "node_pools_names" { description = "List of node pools names" value = local.cluster_node_pools_names diff --git a/modules/beta-public-cluster/cluster.tf b/modules/beta-public-cluster/cluster.tf index 1f5eee84aa..9c966c3534 100644 --- a/modules/beta-public-cluster/cluster.tf +++ b/modules/beta-public-cluster/cluster.tf @@ -111,10 +111,6 @@ resource "google_container_cluster" "primary" { disabled = ! var.horizontal_pod_autoscaling } - kubernetes_dashboard { - disabled = ! var.kubernetes_dashboard - } - network_policy_config { disabled = ! var.network_policy } diff --git a/modules/beta-public-cluster/main.tf b/modules/beta-public-cluster/main.tf index 9668b6f1ea..0dcce31340 100644 --- a/modules/beta-public-cluster/main.tf +++ b/modules/beta-public-cluster/main.tf @@ -93,7 +93,6 @@ locals { cluster_output_network_policy_enabled = google_container_cluster.primary.addons_config.0.network_policy_config.0.disabled cluster_output_http_load_balancing_enabled = google_container_cluster.primary.addons_config.0.http_load_balancing.0.disabled cluster_output_horizontal_pod_autoscaling_enabled = google_container_cluster.primary.addons_config.0.horizontal_pod_autoscaling.0.disabled - cluster_output_kubernetes_dashboard_enabled = google_container_cluster.primary.addons_config.0.kubernetes_dashboard.0.disabled # BETA features cluster_output_istio_disabled = google_container_cluster.primary.addons_config.0.istio_config != null && length(google_container_cluster.primary.addons_config.0.istio_config) == 1 ? google_container_cluster.primary.addons_config.0.istio_config.0.disabled : false @@ -125,7 +124,6 @@ locals { cluster_network_policy_enabled = ! local.cluster_output_network_policy_enabled cluster_http_load_balancing_enabled = ! local.cluster_output_http_load_balancing_enabled cluster_horizontal_pod_autoscaling_enabled = ! local.cluster_output_horizontal_pod_autoscaling_enabled - cluster_kubernetes_dashboard_enabled = ! local.cluster_output_kubernetes_dashboard_enabled # BETA features cluster_istio_enabled = ! local.cluster_output_istio_disabled cluster_cloudrun_enabled = var.cloudrun diff --git a/modules/beta-public-cluster/outputs.tf b/modules/beta-public-cluster/outputs.tf index fb3f29c401..947c806067 100644 --- a/modules/beta-public-cluster/outputs.tf +++ b/modules/beta-public-cluster/outputs.tf @@ -103,11 +103,6 @@ output "horizontal_pod_autoscaling_enabled" { value = local.cluster_horizontal_pod_autoscaling_enabled } -output "kubernetes_dashboard_enabled" { - description = "Whether kubernetes dashboard enabled" - value = local.cluster_kubernetes_dashboard_enabled -} - output "node_pools_names" { description = "List of node pools names" value = local.cluster_node_pools_names diff --git a/modules/private-cluster-update-variant/cluster.tf b/modules/private-cluster-update-variant/cluster.tf index d7fc2dd736..967c02337b 100644 --- a/modules/private-cluster-update-variant/cluster.tf +++ b/modules/private-cluster-update-variant/cluster.tf @@ -79,10 +79,6 @@ resource "google_container_cluster" "primary" { disabled = ! var.horizontal_pod_autoscaling } - kubernetes_dashboard { - disabled = ! var.kubernetes_dashboard - } - network_policy_config { disabled = ! var.network_policy } diff --git a/modules/private-cluster-update-variant/main.tf b/modules/private-cluster-update-variant/main.tf index f0d307311c..7826dfff18 100644 --- a/modules/private-cluster-update-variant/main.tf +++ b/modules/private-cluster-update-variant/main.tf @@ -80,7 +80,6 @@ locals { cluster_output_network_policy_enabled = google_container_cluster.primary.addons_config.0.network_policy_config.0.disabled cluster_output_http_load_balancing_enabled = google_container_cluster.primary.addons_config.0.http_load_balancing.0.disabled cluster_output_horizontal_pod_autoscaling_enabled = google_container_cluster.primary.addons_config.0.horizontal_pod_autoscaling.0.disabled - cluster_output_kubernetes_dashboard_enabled = google_container_cluster.primary.addons_config.0.kubernetes_dashboard.0.disabled cluster_output_node_pools_names = concat(google_container_node_pool.pools.*.name, [""]) @@ -105,7 +104,6 @@ locals { cluster_network_policy_enabled = ! local.cluster_output_network_policy_enabled cluster_http_load_balancing_enabled = ! local.cluster_output_http_load_balancing_enabled cluster_horizontal_pod_autoscaling_enabled = ! local.cluster_output_horizontal_pod_autoscaling_enabled - cluster_kubernetes_dashboard_enabled = ! local.cluster_output_kubernetes_dashboard_enabled } /****************************************** diff --git a/modules/private-cluster-update-variant/outputs.tf b/modules/private-cluster-update-variant/outputs.tf index dea7b5c7b5..54080bfa21 100644 --- a/modules/private-cluster-update-variant/outputs.tf +++ b/modules/private-cluster-update-variant/outputs.tf @@ -103,11 +103,6 @@ output "horizontal_pod_autoscaling_enabled" { value = local.cluster_horizontal_pod_autoscaling_enabled } -output "kubernetes_dashboard_enabled" { - description = "Whether kubernetes dashboard enabled" - value = local.cluster_kubernetes_dashboard_enabled -} - output "node_pools_names" { description = "List of node pools names" value = local.cluster_node_pools_names From 5a2d1e120f0d85a8bd2427a4493d92ac1bd31b0a Mon Sep 17 00:00:00 2001 From: Jan Toebes Date: Wed, 20 Nov 2019 12:37:38 +0100 Subject: [PATCH 13/20] Add dependency to 'wait for resource' Add dependency so helm is not going to access the cluster before it is up and running --- autogen/outputs.tf.tmpl | 1 + 1 file changed, 1 insertion(+) diff --git a/autogen/outputs.tf.tmpl b/autogen/outputs.tf.tmpl index 842502ecea..22fe89862b 100644 --- a/autogen/outputs.tf.tmpl +++ b/autogen/outputs.tf.tmpl @@ -54,6 +54,7 @@ output "endpoint" { */ google_container_cluster.primary, google_container_node_pool.pools, + null_resource.wait_for_cluster.id, ] } From 6aae94a9558b9b865d82c6bbc02a1ce8ad5bc060 Mon Sep 17 00:00:00 2001 From: Jan Toebes Date: Wed, 20 Nov 2019 12:40:41 +0100 Subject: [PATCH 14/20] Add dependency to 'wait for resource' --- modules/beta-private-cluster-update-variant/outputs.tf | 1 + modules/beta-private-cluster/outputs.tf | 1 + modules/beta-public-cluster/outputs.tf | 1 + modules/private-cluster-update-variant/outputs.tf | 1 + modules/private-cluster/outputs.tf | 1 + outputs.tf | 1 + 6 files changed, 6 insertions(+) diff --git a/modules/beta-private-cluster-update-variant/outputs.tf b/modules/beta-private-cluster-update-variant/outputs.tf index fb3f29c401..e4a98aa447 100644 --- a/modules/beta-private-cluster-update-variant/outputs.tf +++ b/modules/beta-private-cluster-update-variant/outputs.tf @@ -54,6 +54,7 @@ output "endpoint" { */ google_container_cluster.primary, google_container_node_pool.pools, + null_resource.wait_for_cluster.id, ] } diff --git a/modules/beta-private-cluster/outputs.tf b/modules/beta-private-cluster/outputs.tf index fb3f29c401..e4a98aa447 100644 --- a/modules/beta-private-cluster/outputs.tf +++ b/modules/beta-private-cluster/outputs.tf @@ -54,6 +54,7 @@ output "endpoint" { */ google_container_cluster.primary, google_container_node_pool.pools, + null_resource.wait_for_cluster.id, ] } diff --git a/modules/beta-public-cluster/outputs.tf b/modules/beta-public-cluster/outputs.tf index fb3f29c401..e4a98aa447 100644 --- a/modules/beta-public-cluster/outputs.tf +++ b/modules/beta-public-cluster/outputs.tf @@ -54,6 +54,7 @@ output "endpoint" { */ google_container_cluster.primary, google_container_node_pool.pools, + null_resource.wait_for_cluster.id, ] } diff --git a/modules/private-cluster-update-variant/outputs.tf b/modules/private-cluster-update-variant/outputs.tf index dea7b5c7b5..3d6e9c8dab 100644 --- a/modules/private-cluster-update-variant/outputs.tf +++ b/modules/private-cluster-update-variant/outputs.tf @@ -54,6 +54,7 @@ output "endpoint" { */ google_container_cluster.primary, google_container_node_pool.pools, + null_resource.wait_for_cluster.id, ] } diff --git a/modules/private-cluster/outputs.tf b/modules/private-cluster/outputs.tf index dea7b5c7b5..3d6e9c8dab 100644 --- a/modules/private-cluster/outputs.tf +++ b/modules/private-cluster/outputs.tf @@ -54,6 +54,7 @@ output "endpoint" { */ google_container_cluster.primary, google_container_node_pool.pools, + null_resource.wait_for_cluster.id, ] } diff --git a/outputs.tf b/outputs.tf index dea7b5c7b5..3d6e9c8dab 100644 --- a/outputs.tf +++ b/outputs.tf @@ -54,6 +54,7 @@ output "endpoint" { */ google_container_cluster.primary, google_container_node_pool.pools, + null_resource.wait_for_cluster.id, ] } From 4f018c248934925a9206789ada9a52142d7d3547 Mon Sep 17 00:00:00 2001 From: Nicolas Bazire Date: Tue, 27 Aug 2019 11:24:22 +0200 Subject: [PATCH 15/20] Add support for local_ssd_count in node config. --- CHANGELOG.md | 2 ++ README.md | 1 + autogen/README.md | 1 + autogen/cluster.tf.tmpl | 6 ++++-- cluster.tf | 6 ++++-- examples/node_pool/main.tf | 1 + modules/beta-private-cluster-update-variant/README.md | 1 + modules/beta-private-cluster-update-variant/cluster.tf | 6 ++++-- modules/beta-private-cluster/README.md | 1 + modules/beta-private-cluster/cluster.tf | 6 ++++-- modules/beta-public-cluster/README.md | 1 + modules/beta-public-cluster/cluster.tf | 6 ++++-- modules/private-cluster-update-variant/README.md | 1 + modules/private-cluster-update-variant/cluster.tf | 6 ++++-- modules/private-cluster/README.md | 1 + modules/private-cluster/cluster.tf | 6 ++++-- 16 files changed, 38 insertions(+), 14 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e7e738732c..9b9d87223a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ Extending the adopted spec, each change should have a link to its corresponding * Support for setting node_locations on node pools. [#303] * Fix for specifying `node_count` on node pools when autoscaling is disabled. [#311] * Added submodule for installing Anthos Config Management. [#268] +* Support for `local_ssd_count` in node pool configuration. [#244] ## [v5.1.1] - 2019-10-25 @@ -245,6 +246,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o [#238]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/238 [#241]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/241 [#250]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/250 +[#244]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/244 [#236]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/236 [#217]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/217 [#234]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/234 diff --git a/README.md b/README.md index 15f6aff13b..1068fc7f30 100644 --- a/README.md +++ b/README.md @@ -43,6 +43,7 @@ module "gke" { machine_type = "n1-standard-2" min_count = 1 max_count = 100 + local_ssd_count = 0 disk_size_gb = 100 disk_type = "pd-standard" image_type = "COS" diff --git a/autogen/README.md b/autogen/README.md index 3efe785ff0..4165c9b8d8 100644 --- a/autogen/README.md +++ b/autogen/README.md @@ -60,6 +60,7 @@ module "gke" { {% endif %} min_count = 1 max_count = 100 + local_ssd_count = 0 disk_size_gb = 100 disk_type = "pd-standard" image_type = "COS" diff --git a/autogen/cluster.tf.tmpl b/autogen/cluster.tf.tmpl index 4791220591..27448db32d 100644 --- a/autogen/cluster.tf.tmpl +++ b/autogen/cluster.tf.tmpl @@ -384,8 +384,10 @@ resource "google_container_node_pool" "pools" { var.node_pools_tags[var.node_pools[count.index]["name"]], ) - disk_size_gb = lookup(var.node_pools[count.index], "disk_size_gb", 100) - disk_type = lookup(var.node_pools[count.index], "disk_type", "pd-standard") + local_ssd_count = lookup(var.node_pools[count.index], "local_ssd_count", 0) + disk_size_gb = lookup(var.node_pools[count.index], "disk_size_gb", 100) + disk_type = lookup(var.node_pools[count.index], "disk_type", "pd-standard") + service_account = lookup( var.node_pools[count.index], "service_account", diff --git a/cluster.tf b/cluster.tf index 072a60fb14..07801f226a 100644 --- a/cluster.tf +++ b/cluster.tf @@ -185,8 +185,10 @@ resource "google_container_node_pool" "pools" { var.node_pools_tags[var.node_pools[count.index]["name"]], ) - disk_size_gb = lookup(var.node_pools[count.index], "disk_size_gb", 100) - disk_type = lookup(var.node_pools[count.index], "disk_type", "pd-standard") + local_ssd_count = lookup(var.node_pools[count.index], "local_ssd_count", 0) + disk_size_gb = lookup(var.node_pools[count.index], "disk_size_gb", 100) + disk_type = lookup(var.node_pools[count.index], "disk_type", "pd-standard") + service_account = lookup( var.node_pools[count.index], "service_account", diff --git a/examples/node_pool/main.tf b/examples/node_pool/main.tf index 5bc0f53407..1ae41a53bb 100644 --- a/examples/node_pool/main.tf +++ b/examples/node_pool/main.tf @@ -50,6 +50,7 @@ module "gke" { machine_type = "n1-standard-2" min_count = 1 max_count = 2 + local_ssd_count = 0 disk_size_gb = 30 disk_type = "pd-standard" accelerator_count = 1 diff --git a/modules/beta-private-cluster-update-variant/README.md b/modules/beta-private-cluster-update-variant/README.md index d2b5197726..51efdb5467 100644 --- a/modules/beta-private-cluster-update-variant/README.md +++ b/modules/beta-private-cluster-update-variant/README.md @@ -51,6 +51,7 @@ module "gke" { node_locations = "us-central1-b,us-central1-c" min_count = 1 max_count = 100 + local_ssd_count = 0 disk_size_gb = 100 disk_type = "pd-standard" image_type = "COS" diff --git a/modules/beta-private-cluster-update-variant/cluster.tf b/modules/beta-private-cluster-update-variant/cluster.tf index 10c206f35c..8cc2311055 100644 --- a/modules/beta-private-cluster-update-variant/cluster.tf +++ b/modules/beta-private-cluster-update-variant/cluster.tf @@ -352,8 +352,10 @@ resource "google_container_node_pool" "pools" { var.node_pools_tags[var.node_pools[count.index]["name"]], ) - disk_size_gb = lookup(var.node_pools[count.index], "disk_size_gb", 100) - disk_type = lookup(var.node_pools[count.index], "disk_type", "pd-standard") + local_ssd_count = lookup(var.node_pools[count.index], "local_ssd_count", 0) + disk_size_gb = lookup(var.node_pools[count.index], "disk_size_gb", 100) + disk_type = lookup(var.node_pools[count.index], "disk_type", "pd-standard") + service_account = lookup( var.node_pools[count.index], "service_account", diff --git a/modules/beta-private-cluster/README.md b/modules/beta-private-cluster/README.md index 96ad9abf5a..71e281d32e 100644 --- a/modules/beta-private-cluster/README.md +++ b/modules/beta-private-cluster/README.md @@ -51,6 +51,7 @@ module "gke" { node_locations = "us-central1-b,us-central1-c" min_count = 1 max_count = 100 + local_ssd_count = 0 disk_size_gb = 100 disk_type = "pd-standard" image_type = "COS" diff --git a/modules/beta-private-cluster/cluster.tf b/modules/beta-private-cluster/cluster.tf index c363dacf9f..95b78225e7 100644 --- a/modules/beta-private-cluster/cluster.tf +++ b/modules/beta-private-cluster/cluster.tf @@ -280,8 +280,10 @@ resource "google_container_node_pool" "pools" { var.node_pools_tags[var.node_pools[count.index]["name"]], ) - disk_size_gb = lookup(var.node_pools[count.index], "disk_size_gb", 100) - disk_type = lookup(var.node_pools[count.index], "disk_type", "pd-standard") + local_ssd_count = lookup(var.node_pools[count.index], "local_ssd_count", 0) + disk_size_gb = lookup(var.node_pools[count.index], "disk_size_gb", 100) + disk_type = lookup(var.node_pools[count.index], "disk_type", "pd-standard") + service_account = lookup( var.node_pools[count.index], "service_account", diff --git a/modules/beta-public-cluster/README.md b/modules/beta-public-cluster/README.md index 7fcf78cccc..5052b306a9 100644 --- a/modules/beta-public-cluster/README.md +++ b/modules/beta-public-cluster/README.md @@ -46,6 +46,7 @@ module "gke" { node_locations = "us-central1-b,us-central1-c" min_count = 1 max_count = 100 + local_ssd_count = 0 disk_size_gb = 100 disk_type = "pd-standard" image_type = "COS" diff --git a/modules/beta-public-cluster/cluster.tf b/modules/beta-public-cluster/cluster.tf index 1f5eee84aa..638d65af9d 100644 --- a/modules/beta-public-cluster/cluster.tf +++ b/modules/beta-public-cluster/cluster.tf @@ -275,8 +275,10 @@ resource "google_container_node_pool" "pools" { var.node_pools_tags[var.node_pools[count.index]["name"]], ) - disk_size_gb = lookup(var.node_pools[count.index], "disk_size_gb", 100) - disk_type = lookup(var.node_pools[count.index], "disk_type", "pd-standard") + local_ssd_count = lookup(var.node_pools[count.index], "local_ssd_count", 0) + disk_size_gb = lookup(var.node_pools[count.index], "disk_size_gb", 100) + disk_type = lookup(var.node_pools[count.index], "disk_type", "pd-standard") + service_account = lookup( var.node_pools[count.index], "service_account", diff --git a/modules/private-cluster-update-variant/README.md b/modules/private-cluster-update-variant/README.md index fa9cdb8852..f73d070cb1 100644 --- a/modules/private-cluster-update-variant/README.md +++ b/modules/private-cluster-update-variant/README.md @@ -48,6 +48,7 @@ module "gke" { machine_type = "n1-standard-2" min_count = 1 max_count = 100 + local_ssd_count = 0 disk_size_gb = 100 disk_type = "pd-standard" image_type = "COS" diff --git a/modules/private-cluster-update-variant/cluster.tf b/modules/private-cluster-update-variant/cluster.tf index d7fc2dd736..af422abb2d 100644 --- a/modules/private-cluster-update-variant/cluster.tf +++ b/modules/private-cluster-update-variant/cluster.tf @@ -262,8 +262,10 @@ resource "google_container_node_pool" "pools" { var.node_pools_tags[var.node_pools[count.index]["name"]], ) - disk_size_gb = lookup(var.node_pools[count.index], "disk_size_gb", 100) - disk_type = lookup(var.node_pools[count.index], "disk_type", "pd-standard") + local_ssd_count = lookup(var.node_pools[count.index], "local_ssd_count", 0) + disk_size_gb = lookup(var.node_pools[count.index], "disk_size_gb", 100) + disk_type = lookup(var.node_pools[count.index], "disk_type", "pd-standard") + service_account = lookup( var.node_pools[count.index], "service_account", diff --git a/modules/private-cluster/README.md b/modules/private-cluster/README.md index 5465544b82..d23f84b68b 100644 --- a/modules/private-cluster/README.md +++ b/modules/private-cluster/README.md @@ -48,6 +48,7 @@ module "gke" { machine_type = "n1-standard-2" min_count = 1 max_count = 100 + local_ssd_count = 0 disk_size_gb = 100 disk_type = "pd-standard" image_type = "COS" diff --git a/modules/private-cluster/cluster.tf b/modules/private-cluster/cluster.tf index c8051255bf..4567530763 100644 --- a/modules/private-cluster/cluster.tf +++ b/modules/private-cluster/cluster.tf @@ -190,8 +190,10 @@ resource "google_container_node_pool" "pools" { var.node_pools_tags[var.node_pools[count.index]["name"]], ) - disk_size_gb = lookup(var.node_pools[count.index], "disk_size_gb", 100) - disk_type = lookup(var.node_pools[count.index], "disk_type", "pd-standard") + local_ssd_count = lookup(var.node_pools[count.index], "local_ssd_count", 0) + disk_size_gb = lookup(var.node_pools[count.index], "disk_size_gb", 100) + disk_type = lookup(var.node_pools[count.index], "disk_type", "pd-standard") + service_account = lookup( var.node_pools[count.index], "service_account", From 77380cb9674fec023885c63b8d1f7c3e9b916ac2 Mon Sep 17 00:00:00 2001 From: Morgante Pell Date: Wed, 20 Nov 2019 12:23:14 +0000 Subject: [PATCH 16/20] Update CHANGELOG.md --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9b9d87223a..85f598f641 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,7 @@ Extending the adopted spec, each change should have a link to its corresponding * Fix for specifying `node_count` on node pools when autoscaling is disabled. [#311] * Added submodule for installing Anthos Config Management. [#268] * Support for `local_ssd_count` in node pool configuration. [#244] +* Wait for cluster to be ready before returning endpoint. [#340] ## [v5.1.1] - 2019-10-25 @@ -229,6 +230,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o [v0.3.0]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v0.2.0...v0.3.0 [v0.2.0]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v0.1.0...v0.2.0 +[#340]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/340 [#268]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/issues/268 [#311]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/issues/311 [#303]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/303 From 1eae078625eaf9d2b5c5b3bebcaa5c6764ecff13 Mon Sep 17 00:00:00 2001 From: Pavel Zhukov Date: Wed, 13 Nov 2019 18:40:56 +0200 Subject: [PATCH 17/20] Make node_pools_* params optional and allow pool-specific overrides. [test/deploy_service] Add retry logic to wait until the nginx service becomes reachable [build] Add 120 seconds delay after the 'prepare' step --- Makefile | 2 +- autogen/cluster.tf.tmpl | 61 +++++++-------- autogen/main.tf.tmpl | 3 +- autogen/variables.tf.tmpl | 25 ++++--- autogen/variables_defaults.tf | 74 +++++++++++++++++++ build/int.cloudbuild.yaml | 4 +- build/lint.cloudbuild.yaml | 2 +- cluster.tf | 33 +++++---- examples/node_pool/main.tf | 16 ---- .../cluster.tf | 63 ++++++++-------- .../main.tf | 1 + .../variables.tf | 7 ++ .../variables_defaults.tf | 72 ++++++++++++++++++ modules/beta-private-cluster/cluster.tf | 37 +++++----- modules/beta-private-cluster/main.tf | 1 + modules/beta-private-cluster/variables.tf | 7 ++ .../variables_defaults.tf | 72 ++++++++++++++++++ modules/beta-public-cluster/cluster.tf | 37 +++++----- modules/beta-public-cluster/main.tf | 1 + modules/beta-public-cluster/variables.tf | 7 ++ .../beta-public-cluster/variables_defaults.tf | 72 ++++++++++++++++++ .../private-cluster-update-variant/cluster.tf | 59 ++++++++------- .../variables.tf | 4 + .../variables_defaults.tf | 62 ++++++++++++++++ modules/private-cluster/cluster.tf | 33 +++++---- modules/private-cluster/variables.tf | 4 + modules/private-cluster/variables_defaults.tf | 62 ++++++++++++++++ .../deploy_service/controls/kubectl.rb | 6 ++ variables.tf | 4 + variables_defaults.tf | 62 ++++++++++++++++ 30 files changed, 712 insertions(+), 181 deletions(-) create mode 100644 autogen/variables_defaults.tf create mode 100644 modules/beta-private-cluster-update-variant/variables_defaults.tf create mode 100644 modules/beta-private-cluster/variables_defaults.tf create mode 100644 modules/beta-public-cluster/variables_defaults.tf create mode 100644 modules/private-cluster-update-variant/variables_defaults.tf create mode 100644 modules/private-cluster/variables_defaults.tf create mode 100644 variables_defaults.tf diff --git a/Makefile b/Makefile index 736cad34ce..09dc66079c 100644 --- a/Makefile +++ b/Makefile @@ -18,7 +18,7 @@ # Make will use bash instead of sh SHELL := /usr/bin/env bash -DOCKER_TAG_VERSION_DEVELOPER_TOOLS := 0.4.6 +DOCKER_TAG_VERSION_DEVELOPER_TOOLS := 0 DOCKER_IMAGE_DEVELOPER_TOOLS := cft/developer-tools REGISTRY_URL := gcr.io/cloud-foundation-cicd diff --git a/autogen/cluster.tf.tmpl b/autogen/cluster.tf.tmpl index 27448db32d..77b3b5b84e 100644 --- a/autogen/cluster.tf.tmpl +++ b/autogen/cluster.tf.tmpl @@ -252,10 +252,10 @@ resource "random_id" "name" { labels = join(",", sort( concat( - keys(var.node_pools_labels["all"]), - values(var.node_pools_labels["all"]), - keys(var.node_pools_labels[var.node_pools[count.index]["name"]]), - values(var.node_pools_labels[var.node_pools[count.index]["name"]]) + keys(local.node_pools_labels["all"]), + values(local.node_pools_labels["all"]), + keys(local.node_pools_labels[var.node_pools[count.index]["name"]]), + values(local.node_pools_labels[var.node_pools[count.index]["name"]]) ) ) ) @@ -264,10 +264,10 @@ resource "random_id" "name" { metadata = join(",", sort( concat( - keys(var.node_pools_metadata["all"]), - values(var.node_pools_metadata["all"]), - keys(var.node_pools_metadata[var.node_pools[count.index]["name"]]), - values(var.node_pools_metadata[var.node_pools[count.index]["name"]]) + keys(local.node_pools_metadata["all"]), + values(local.node_pools_metadata["all"]), + keys(local.node_pools_metadata[var.node_pools[count.index]["name"]]), + values(local.node_pools_metadata[var.node_pools[count.index]["name"]]) ) ) ) @@ -276,8 +276,8 @@ resource "random_id" "name" { oauth_scopes = join(",", sort( concat( - var.node_pools_oauth_scopes["all"], - var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]] + local.node_pools_oauth_scopes["all"], + local.node_pools_oauth_scopes[var.node_pools[count.index]["name"]] ) ) ) @@ -286,8 +286,8 @@ resource "random_id" "name" { tags = join(",", sort( concat( - var.node_pools_tags["all"], - var.node_pools_tags[var.node_pools[count.index]["name"]] + local.node_pools_tags["all"], + local.node_pools_tags[var.node_pools[count.index]["name"]] ) ) ) @@ -314,7 +314,9 @@ resource "google_container_node_pool" "pools" { // use node_locations if provided, defaults to cluster level node_locations if not specified node_locations = lookup(var.node_pools[count.index], "node_locations", "") != "" ? split(",", var.node_pools[count.index]["node_locations"]) : null {% endif %} - cluster = google_container_cluster.primary.name + + cluster = google_container_cluster.primary.name + version = lookup(var.node_pools[count.index], "auto_upgrade", false) ? "" : lookup( var.node_pools[count.index], "version", @@ -350,16 +352,16 @@ resource "google_container_node_pool" "pools" { image_type = lookup(var.node_pools[count.index], "image_type", "COS") machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2") labels = merge( - lookup(lookup(var.node_pools_labels, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, - lookup(lookup(var.node_pools_labels, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index]["name"] } : {}, - var.node_pools_labels["all"], - var.node_pools_labels[var.node_pools[count.index]["name"]], + lookup(lookup(local.node_pools_labels, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(local.node_pools_labels, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index]["name"] } : {}, + local.node_pools_labels["all"], + local.node_pools_labels[var.node_pools[count.index]["name"]], ) metadata = merge( - lookup(lookup(var.node_pools_metadata, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, - lookup(lookup(var.node_pools_metadata, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index]["name"] } : {}, - var.node_pools_metadata["all"], - var.node_pools_metadata[var.node_pools[count.index]["name"]], + lookup(lookup(local.node_pools_metadata, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(local.node_pools_metadata, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index]["name"] } : {}, + local.node_pools_metadata["all"], + local.node_pools_metadata[var.node_pools[count.index]["name"]], { "disable-legacy-endpoints" = var.disable_legacy_metadata_endpoints }, @@ -367,8 +369,8 @@ resource "google_container_node_pool" "pools" { {% if beta_cluster %} dynamic "taint" { for_each = concat( - var.node_pools_taints["all"], - var.node_pools_taints[var.node_pools[count.index]["name"]], + local.node_pools_taints["all"], + local.node_pools_taints[var.node_pools[count.index]["name"]], ) content { effect = taint.value.effect @@ -378,10 +380,10 @@ resource "google_container_node_pool" "pools" { } {% endif %} tags = concat( - lookup(var.node_pools_tags, "default_values", [true, true])[0] ? ["gke-${var.name}"] : [], - lookup(var.node_pools_tags, "default_values", [true, true])[1] ? ["gke-${var.name}-${var.node_pools[count.index]["name"]}"] : [], - var.node_pools_tags["all"], - var.node_pools_tags[var.node_pools[count.index]["name"]], + lookup(local.node_pools_tags, "default_values", [true, true])[0] ? ["gke-${var.name}"] : [], + lookup(local.node_pools_tags, "default_values", [true, true])[1] ? ["gke-${var.name}-${var.node_pools[count.index]["name"]}"] : [], + local.node_pools_tags["all"], + local.node_pools_tags[var.node_pools[count.index]["name"]], ) local_ssd_count = lookup(var.node_pools[count.index], "local_ssd_count", 0) @@ -396,8 +398,8 @@ resource "google_container_node_pool" "pools" { preemptible = lookup(var.node_pools[count.index], "preemptible", false) oauth_scopes = concat( - var.node_pools_oauth_scopes["all"], - var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]], + local.node_pools_oauth_scopes["all"], + local.node_pools_oauth_scopes[var.node_pools[count.index]["name"]], ) guest_accelerator = [ @@ -431,6 +433,7 @@ resource "google_container_node_pool" "pools" { lifecycle { ignore_changes = [initial_node_count] + {% if update_variant %} create_before_destroy = true {% endif %} diff --git a/autogen/main.tf.tmpl b/autogen/main.tf.tmpl index 841444ea44..15d6d3983d 100644 --- a/autogen/main.tf.tmpl +++ b/autogen/main.tf.tmpl @@ -147,7 +147,8 @@ locals { cluster_pod_security_policy_enabled = local.cluster_output_pod_security_policy_enabled cluster_intranode_visibility_enabled = local.cluster_output_intranode_visbility_enabled cluster_vertical_pod_autoscaling_enabled = local.cluster_output_vertical_pod_autoscaling_enabled - cluster_workload_identity_config = var.identity_namespace == "" ? [] : [{ + + cluster_workload_identity_config = var.identity_namespace == "" ? [] : [{ identity_namespace = var.identity_namespace }] # /BETA features diff --git a/autogen/variables.tf.tmpl b/autogen/variables.tf.tmpl index ad5cc44e34..e58f2ef487 100644 --- a/autogen/variables.tf.tmpl +++ b/autogen/variables.tf.tmpl @@ -79,7 +79,7 @@ variable "node_version" { } variable "master_authorized_networks_config" { - type = list(object({cidr_blocks = list(object({cidr_block = string, display_name = string}))})) + type = list(object({ cidr_blocks = list(object({ cidr_block = string, display_name = string })) })) description = "The desired configuration options for master authorized networks. The object format is {cidr_blocks = list(object({cidr_block = string, display_name = string}))}. Omit the nested cidr_blocks attribute to disallow external access (except the cluster node IPs, which GKE automatically whitelists)." default = [] } @@ -163,6 +163,7 @@ variable "node_pools_labels" { type = map(map(string)) description = "Map of maps containing node labels by node-pool name" + # Default is being set in variables_defaults.tf default = { all = {} default-node-pool = {} @@ -173,6 +174,7 @@ variable "node_pools_metadata" { type = map(map(string)) description = "Map of maps containing node metadata by node-pool name" + # Default is being set in variables_defaults.tf default = { all = {} default-node-pool = {} @@ -181,9 +183,10 @@ variable "node_pools_metadata" { {% if beta_cluster %} variable "node_pools_taints" { - type = map(list(object({key=string,value=string,effect=string}))) + type = map(list(object({ key = string, value = string, effect = string }))) description = "Map of lists containing node taints by node-pool name" + # Default is being set in variables_defaults.tf default = { all = [] default-node-pool = [] @@ -195,6 +198,7 @@ variable "node_pools_tags" { type = map(list(string)) description = "Map of lists containing node network tags by node-pool name" + # Default is being set in variables_defaults.tf default = { all = [] default-node-pool = [] @@ -205,6 +209,7 @@ variable "node_pools_oauth_scopes" { type = map(list(string)) description = "Map of lists containing node oauth scopes by node-pool name" + # Default is being set in variables_defaults.tf default = { all = ["https://www.googleapis.com/auth/cloud-platform"] default-node-pool = [] @@ -356,10 +361,11 @@ variable "default_max_pods_per_node" { variable "database_encryption" { description = "Application-layer Secrets Encryption settings. The object format is {state = string, key_name = string}. Valid values of state are: \"ENCRYPTED\"; \"DECRYPTED\". key_name is the name of a CloudKMS key." - type = list(object({state = string, key_name = string})) - default = [{ - state = "DECRYPTED" - key_name = "" + type = list(object({ state = string, key_name = string })) + + default = [{ + state = "DECRYPTED" + key_name = "" }] } @@ -375,7 +381,8 @@ variable "enable_binary_authorization" { variable "pod_security_policy_config" { description = "enabled - Enable the PodSecurityPolicy controller for this cluster. If enabled, pods must be valid under a PodSecurityPolicy to be created." - default = [{ + + default = [{ "enabled" = false }] } @@ -429,8 +436,8 @@ variable "release_channel" { } variable "enable_shielded_nodes" { - type = bool + type = bool description = "Enable Shielded Nodes features on all nodes in this cluster" - default = false + default = false } {% endif %} diff --git a/autogen/variables_defaults.tf b/autogen/variables_defaults.tf new file mode 100644 index 0000000000..ccc9b0eed3 --- /dev/null +++ b/autogen/variables_defaults.tf @@ -0,0 +1,74 @@ +/** + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +{{ autogeneration_note }} + +# Setup dynamic default values for variables which can't be setup using +# the standard terraform "variable default" functionality + +locals { + node_pools_labels = merge( + { all = {} }, + { default-node-pool = {} }, + zipmap( + [for node_pool in var.node_pools : node_pool["name"]], + [for node_pool in var.node_pools : {}] + ), + var.node_pools_labels + ) + + node_pools_metadata = merge( + { all = {} }, + { default-node-pool = {} }, + zipmap( + [for node_pool in var.node_pools : node_pool["name"]], + [for node_pool in var.node_pools : {}] + ), + var.node_pools_metadata + ) + +{% if beta_cluster %} + node_pools_taints = merge( + { all = [] }, + { default-node-pool = [] }, + zipmap( + [for node_pool in var.node_pools : node_pool["name"]], + [for node_pool in var.node_pools : []] + ), + var.node_pools_taints + ) + +{% endif %} + node_pools_tags = merge( + { all = [] }, + { default-node-pool = [] }, + zipmap( + [for node_pool in var.node_pools : node_pool["name"]], + [for node_pool in var.node_pools : []] + ), + var.node_pools_tags + ) + + node_pools_oauth_scopes = merge( + { all = ["https://www.googleapis.com/auth/cloud-platform"] }, + { default-node-pool = [] }, + zipmap( + [for node_pool in var.node_pools : node_pool["name"]], + [for node_pool in var.node_pools : []] + ), + var.node_pools_oauth_scopes + ) +} diff --git a/build/int.cloudbuild.yaml b/build/int.cloudbuild.yaml index e78d0eb2ba..af0560641f 100644 --- a/build/int.cloudbuild.yaml +++ b/build/int.cloudbuild.yaml @@ -19,7 +19,7 @@ steps: args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && download_acm'] - id: prepare name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && prepare_environment'] + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && prepare_environment && sleep 120'] env: - 'TF_VAR_org_id=$_ORG_ID' - 'TF_VAR_folder_id=$_FOLDER_ID' @@ -309,6 +309,6 @@ tags: - 'integration' substitutions: _DOCKER_IMAGE_DEVELOPER_TOOLS: 'cft/developer-tools' - _DOCKER_TAG_VERSION_DEVELOPER_TOOLS: '0.5.4' + _DOCKER_TAG_VERSION_DEVELOPER_TOOLS: '0' options: machineType: 'N1_HIGHCPU_8' diff --git a/build/lint.cloudbuild.yaml b/build/lint.cloudbuild.yaml index 02b9e5327b..82c43b9fa6 100644 --- a/build/lint.cloudbuild.yaml +++ b/build/lint.cloudbuild.yaml @@ -24,4 +24,4 @@ tags: - 'lint' substitutions: _DOCKER_IMAGE_DEVELOPER_TOOLS: 'cft/developer-tools' - _DOCKER_TAG_VERSION_DEVELOPER_TOOLS: '0.5.4' + _DOCKER_TAG_VERSION_DEVELOPER_TOOLS: '0' diff --git a/cluster.tf b/cluster.tf index 07801f226a..73d17fc26a 100644 --- a/cluster.tf +++ b/cluster.tf @@ -131,7 +131,9 @@ resource "google_container_node_pool" "pools" { name = var.node_pools[count.index]["name"] project = var.project_id location = local.location - cluster = google_container_cluster.primary.name + + cluster = google_container_cluster.primary.name + version = lookup(var.node_pools[count.index], "auto_upgrade", false) ? "" : lookup( var.node_pools[count.index], "version", @@ -164,25 +166,25 @@ resource "google_container_node_pool" "pools" { image_type = lookup(var.node_pools[count.index], "image_type", "COS") machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2") labels = merge( - lookup(lookup(var.node_pools_labels, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, - lookup(lookup(var.node_pools_labels, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index]["name"] } : {}, - var.node_pools_labels["all"], - var.node_pools_labels[var.node_pools[count.index]["name"]], + lookup(lookup(local.node_pools_labels, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(local.node_pools_labels, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index]["name"] } : {}, + local.node_pools_labels["all"], + local.node_pools_labels[var.node_pools[count.index]["name"]], ) metadata = merge( - lookup(lookup(var.node_pools_metadata, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, - lookup(lookup(var.node_pools_metadata, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index]["name"] } : {}, - var.node_pools_metadata["all"], - var.node_pools_metadata[var.node_pools[count.index]["name"]], + lookup(lookup(local.node_pools_metadata, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(local.node_pools_metadata, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index]["name"] } : {}, + local.node_pools_metadata["all"], + local.node_pools_metadata[var.node_pools[count.index]["name"]], { "disable-legacy-endpoints" = var.disable_legacy_metadata_endpoints }, ) tags = concat( - lookup(var.node_pools_tags, "default_values", [true, true])[0] ? ["gke-${var.name}"] : [], - lookup(var.node_pools_tags, "default_values", [true, true])[1] ? ["gke-${var.name}-${var.node_pools[count.index]["name"]}"] : [], - var.node_pools_tags["all"], - var.node_pools_tags[var.node_pools[count.index]["name"]], + lookup(local.node_pools_tags, "default_values", [true, true])[0] ? ["gke-${var.name}"] : [], + lookup(local.node_pools_tags, "default_values", [true, true])[1] ? ["gke-${var.name}-${var.node_pools[count.index]["name"]}"] : [], + local.node_pools_tags["all"], + local.node_pools_tags[var.node_pools[count.index]["name"]], ) local_ssd_count = lookup(var.node_pools[count.index], "local_ssd_count", 0) @@ -197,8 +199,8 @@ resource "google_container_node_pool" "pools" { preemptible = lookup(var.node_pools[count.index], "preemptible", false) oauth_scopes = concat( - var.node_pools_oauth_scopes["all"], - var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]], + local.node_pools_oauth_scopes["all"], + local.node_pools_oauth_scopes[var.node_pools[count.index]["name"]], ) guest_accelerator = [ @@ -214,6 +216,7 @@ resource "google_container_node_pool" "pools" { lifecycle { ignore_changes = [initial_node_count] + } timeouts { diff --git a/examples/node_pool/main.tf b/examples/node_pool/main.tf index 1ae41a53bb..b2f1d010ed 100644 --- a/examples/node_pool/main.tf +++ b/examples/node_pool/main.tf @@ -72,20 +72,10 @@ module "gke" { }, ] - node_pools_oauth_scopes = { - all = [] - pool-01 = [] - pool-02 = [] - pool-03 = [] - } - node_pools_metadata = { - all = {} pool-01 = { shutdown-script = file("${path.module}/data/shutdown-script.sh") } - pool-02 = {} - pool-03 = {} } node_pools_labels = { @@ -95,8 +85,6 @@ module "gke" { pool-01 = { pool-01-example = true } - pool-02 = {} - pool-03 = {} } node_pools_taints = { @@ -114,8 +102,6 @@ module "gke" { effect = "PREFER_NO_SCHEDULE" }, ] - pool-02 = [] - pool-03 = [] } node_pools_tags = { @@ -125,8 +111,6 @@ module "gke" { pool-01 = [ "pool-01-example", ] - pool-02 = [] - pool-03 = [] } } diff --git a/modules/beta-private-cluster-update-variant/cluster.tf b/modules/beta-private-cluster-update-variant/cluster.tf index 8cc2311055..93e7072f31 100644 --- a/modules/beta-private-cluster-update-variant/cluster.tf +++ b/modules/beta-private-cluster-update-variant/cluster.tf @@ -235,10 +235,10 @@ resource "random_id" "name" { labels = join(",", sort( concat( - keys(var.node_pools_labels["all"]), - values(var.node_pools_labels["all"]), - keys(var.node_pools_labels[var.node_pools[count.index]["name"]]), - values(var.node_pools_labels[var.node_pools[count.index]["name"]]) + keys(local.node_pools_labels["all"]), + values(local.node_pools_labels["all"]), + keys(local.node_pools_labels[var.node_pools[count.index]["name"]]), + values(local.node_pools_labels[var.node_pools[count.index]["name"]]) ) ) ) @@ -247,10 +247,10 @@ resource "random_id" "name" { metadata = join(",", sort( concat( - keys(var.node_pools_metadata["all"]), - values(var.node_pools_metadata["all"]), - keys(var.node_pools_metadata[var.node_pools[count.index]["name"]]), - values(var.node_pools_metadata[var.node_pools[count.index]["name"]]) + keys(local.node_pools_metadata["all"]), + values(local.node_pools_metadata["all"]), + keys(local.node_pools_metadata[var.node_pools[count.index]["name"]]), + values(local.node_pools_metadata[var.node_pools[count.index]["name"]]) ) ) ) @@ -259,8 +259,8 @@ resource "random_id" "name" { oauth_scopes = join(",", sort( concat( - var.node_pools_oauth_scopes["all"], - var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]] + local.node_pools_oauth_scopes["all"], + local.node_pools_oauth_scopes[var.node_pools[count.index]["name"]] ) ) ) @@ -269,8 +269,8 @@ resource "random_id" "name" { tags = join(",", sort( concat( - var.node_pools_tags["all"], - var.node_pools_tags[var.node_pools[count.index]["name"]] + local.node_pools_tags["all"], + local.node_pools_tags[var.node_pools[count.index]["name"]] ) ) ) @@ -286,7 +286,9 @@ resource "google_container_node_pool" "pools" { location = local.location // use node_locations if provided, defaults to cluster level node_locations if not specified node_locations = lookup(var.node_pools[count.index], "node_locations", "") != "" ? split(",", var.node_pools[count.index]["node_locations"]) : null - cluster = google_container_cluster.primary.name + + cluster = google_container_cluster.primary.name + version = lookup(var.node_pools[count.index], "auto_upgrade", false) ? "" : lookup( var.node_pools[count.index], "version", @@ -320,24 +322,24 @@ resource "google_container_node_pool" "pools" { image_type = lookup(var.node_pools[count.index], "image_type", "COS") machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2") labels = merge( - lookup(lookup(var.node_pools_labels, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, - lookup(lookup(var.node_pools_labels, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index]["name"] } : {}, - var.node_pools_labels["all"], - var.node_pools_labels[var.node_pools[count.index]["name"]], + lookup(lookup(local.node_pools_labels, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(local.node_pools_labels, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index]["name"] } : {}, + local.node_pools_labels["all"], + local.node_pools_labels[var.node_pools[count.index]["name"]], ) metadata = merge( - lookup(lookup(var.node_pools_metadata, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, - lookup(lookup(var.node_pools_metadata, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index]["name"] } : {}, - var.node_pools_metadata["all"], - var.node_pools_metadata[var.node_pools[count.index]["name"]], + lookup(lookup(local.node_pools_metadata, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(local.node_pools_metadata, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index]["name"] } : {}, + local.node_pools_metadata["all"], + local.node_pools_metadata[var.node_pools[count.index]["name"]], { "disable-legacy-endpoints" = var.disable_legacy_metadata_endpoints }, ) dynamic "taint" { for_each = concat( - var.node_pools_taints["all"], - var.node_pools_taints[var.node_pools[count.index]["name"]], + local.node_pools_taints["all"], + local.node_pools_taints[var.node_pools[count.index]["name"]], ) content { effect = taint.value.effect @@ -346,10 +348,10 @@ resource "google_container_node_pool" "pools" { } } tags = concat( - lookup(var.node_pools_tags, "default_values", [true, true])[0] ? ["gke-${var.name}"] : [], - lookup(var.node_pools_tags, "default_values", [true, true])[1] ? ["gke-${var.name}-${var.node_pools[count.index]["name"]}"] : [], - var.node_pools_tags["all"], - var.node_pools_tags[var.node_pools[count.index]["name"]], + lookup(local.node_pools_tags, "default_values", [true, true])[0] ? ["gke-${var.name}"] : [], + lookup(local.node_pools_tags, "default_values", [true, true])[1] ? ["gke-${var.name}-${var.node_pools[count.index]["name"]}"] : [], + local.node_pools_tags["all"], + local.node_pools_tags[var.node_pools[count.index]["name"]], ) local_ssd_count = lookup(var.node_pools[count.index], "local_ssd_count", 0) @@ -364,8 +366,8 @@ resource "google_container_node_pool" "pools" { preemptible = lookup(var.node_pools[count.index], "preemptible", false) oauth_scopes = concat( - var.node_pools_oauth_scopes["all"], - var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]], + local.node_pools_oauth_scopes["all"], + local.node_pools_oauth_scopes[var.node_pools[count.index]["name"]], ) guest_accelerator = [ @@ -396,7 +398,8 @@ resource "google_container_node_pool" "pools" { } lifecycle { - ignore_changes = [initial_node_count] + ignore_changes = [initial_node_count] + create_before_destroy = true } diff --git a/modules/beta-private-cluster-update-variant/main.tf b/modules/beta-private-cluster-update-variant/main.tf index 5b235ce00f..bc466df2b2 100644 --- a/modules/beta-private-cluster-update-variant/main.tf +++ b/modules/beta-private-cluster-update-variant/main.tf @@ -132,6 +132,7 @@ locals { cluster_pod_security_policy_enabled = local.cluster_output_pod_security_policy_enabled cluster_intranode_visibility_enabled = local.cluster_output_intranode_visbility_enabled cluster_vertical_pod_autoscaling_enabled = local.cluster_output_vertical_pod_autoscaling_enabled + cluster_workload_identity_config = var.identity_namespace == "" ? [] : [{ identity_namespace = var.identity_namespace }] diff --git a/modules/beta-private-cluster-update-variant/variables.tf b/modules/beta-private-cluster-update-variant/variables.tf index d04ed5ac1f..65335a674c 100644 --- a/modules/beta-private-cluster-update-variant/variables.tf +++ b/modules/beta-private-cluster-update-variant/variables.tf @@ -163,6 +163,7 @@ variable "node_pools_labels" { type = map(map(string)) description = "Map of maps containing node labels by node-pool name" + # Default is being set in variables_defaults.tf default = { all = {} default-node-pool = {} @@ -173,6 +174,7 @@ variable "node_pools_metadata" { type = map(map(string)) description = "Map of maps containing node metadata by node-pool name" + # Default is being set in variables_defaults.tf default = { all = {} default-node-pool = {} @@ -183,6 +185,7 @@ variable "node_pools_taints" { type = map(list(object({ key = string, value = string, effect = string }))) description = "Map of lists containing node taints by node-pool name" + # Default is being set in variables_defaults.tf default = { all = [] default-node-pool = [] @@ -193,6 +196,7 @@ variable "node_pools_tags" { type = map(list(string)) description = "Map of lists containing node network tags by node-pool name" + # Default is being set in variables_defaults.tf default = { all = [] default-node-pool = [] @@ -203,6 +207,7 @@ variable "node_pools_oauth_scopes" { type = map(list(string)) description = "Map of lists containing node oauth scopes by node-pool name" + # Default is being set in variables_defaults.tf default = { all = ["https://www.googleapis.com/auth/cloud-platform"] default-node-pool = [] @@ -352,6 +357,7 @@ variable "default_max_pods_per_node" { variable "database_encryption" { description = "Application-layer Secrets Encryption settings. The object format is {state = string, key_name = string}. Valid values of state are: \"ENCRYPTED\"; \"DECRYPTED\". key_name is the name of a CloudKMS key." type = list(object({ state = string, key_name = string })) + default = [{ state = "DECRYPTED" key_name = "" @@ -370,6 +376,7 @@ variable "enable_binary_authorization" { variable "pod_security_policy_config" { description = "enabled - Enable the PodSecurityPolicy controller for this cluster. If enabled, pods must be valid under a PodSecurityPolicy to be created." + default = [{ "enabled" = false }] diff --git a/modules/beta-private-cluster-update-variant/variables_defaults.tf b/modules/beta-private-cluster-update-variant/variables_defaults.tf new file mode 100644 index 0000000000..9a75764306 --- /dev/null +++ b/modules/beta-private-cluster-update-variant/variables_defaults.tf @@ -0,0 +1,72 @@ +/** + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file was automatically generated from a template in ./autogen + +# Setup dynamic default values for variables which can't be setup using +# the standard terraform "variable default" functionality + +locals { + node_pools_labels = merge( + { all = {} }, + { default-node-pool = {} }, + zipmap( + [for node_pool in var.node_pools : node_pool["name"]], + [for node_pool in var.node_pools : {}] + ), + var.node_pools_labels + ) + + node_pools_metadata = merge( + { all = {} }, + { default-node-pool = {} }, + zipmap( + [for node_pool in var.node_pools : node_pool["name"]], + [for node_pool in var.node_pools : {}] + ), + var.node_pools_metadata + ) + + node_pools_taints = merge( + { all = [] }, + { default-node-pool = [] }, + zipmap( + [for node_pool in var.node_pools : node_pool["name"]], + [for node_pool in var.node_pools : []] + ), + var.node_pools_taints + ) + + node_pools_tags = merge( + { all = [] }, + { default-node-pool = [] }, + zipmap( + [for node_pool in var.node_pools : node_pool["name"]], + [for node_pool in var.node_pools : []] + ), + var.node_pools_tags + ) + + node_pools_oauth_scopes = merge( + { all = ["https://www.googleapis.com/auth/cloud-platform"] }, + { default-node-pool = [] }, + zipmap( + [for node_pool in var.node_pools : node_pool["name"]], + [for node_pool in var.node_pools : []] + ), + var.node_pools_oauth_scopes + ) +} diff --git a/modules/beta-private-cluster/cluster.tf b/modules/beta-private-cluster/cluster.tf index 95b78225e7..e5f4e4510d 100644 --- a/modules/beta-private-cluster/cluster.tf +++ b/modules/beta-private-cluster/cluster.tf @@ -214,7 +214,9 @@ resource "google_container_node_pool" "pools" { location = local.location // use node_locations if provided, defaults to cluster level node_locations if not specified node_locations = lookup(var.node_pools[count.index], "node_locations", "") != "" ? split(",", var.node_pools[count.index]["node_locations"]) : null - cluster = google_container_cluster.primary.name + + cluster = google_container_cluster.primary.name + version = lookup(var.node_pools[count.index], "auto_upgrade", false) ? "" : lookup( var.node_pools[count.index], "version", @@ -248,24 +250,24 @@ resource "google_container_node_pool" "pools" { image_type = lookup(var.node_pools[count.index], "image_type", "COS") machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2") labels = merge( - lookup(lookup(var.node_pools_labels, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, - lookup(lookup(var.node_pools_labels, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index]["name"] } : {}, - var.node_pools_labels["all"], - var.node_pools_labels[var.node_pools[count.index]["name"]], + lookup(lookup(local.node_pools_labels, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(local.node_pools_labels, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index]["name"] } : {}, + local.node_pools_labels["all"], + local.node_pools_labels[var.node_pools[count.index]["name"]], ) metadata = merge( - lookup(lookup(var.node_pools_metadata, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, - lookup(lookup(var.node_pools_metadata, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index]["name"] } : {}, - var.node_pools_metadata["all"], - var.node_pools_metadata[var.node_pools[count.index]["name"]], + lookup(lookup(local.node_pools_metadata, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(local.node_pools_metadata, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index]["name"] } : {}, + local.node_pools_metadata["all"], + local.node_pools_metadata[var.node_pools[count.index]["name"]], { "disable-legacy-endpoints" = var.disable_legacy_metadata_endpoints }, ) dynamic "taint" { for_each = concat( - var.node_pools_taints["all"], - var.node_pools_taints[var.node_pools[count.index]["name"]], + local.node_pools_taints["all"], + local.node_pools_taints[var.node_pools[count.index]["name"]], ) content { effect = taint.value.effect @@ -274,10 +276,10 @@ resource "google_container_node_pool" "pools" { } } tags = concat( - lookup(var.node_pools_tags, "default_values", [true, true])[0] ? ["gke-${var.name}"] : [], - lookup(var.node_pools_tags, "default_values", [true, true])[1] ? ["gke-${var.name}-${var.node_pools[count.index]["name"]}"] : [], - var.node_pools_tags["all"], - var.node_pools_tags[var.node_pools[count.index]["name"]], + lookup(local.node_pools_tags, "default_values", [true, true])[0] ? ["gke-${var.name}"] : [], + lookup(local.node_pools_tags, "default_values", [true, true])[1] ? ["gke-${var.name}-${var.node_pools[count.index]["name"]}"] : [], + local.node_pools_tags["all"], + local.node_pools_tags[var.node_pools[count.index]["name"]], ) local_ssd_count = lookup(var.node_pools[count.index], "local_ssd_count", 0) @@ -292,8 +294,8 @@ resource "google_container_node_pool" "pools" { preemptible = lookup(var.node_pools[count.index], "preemptible", false) oauth_scopes = concat( - var.node_pools_oauth_scopes["all"], - var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]], + local.node_pools_oauth_scopes["all"], + local.node_pools_oauth_scopes[var.node_pools[count.index]["name"]], ) guest_accelerator = [ @@ -325,6 +327,7 @@ resource "google_container_node_pool" "pools" { lifecycle { ignore_changes = [initial_node_count] + } timeouts { diff --git a/modules/beta-private-cluster/main.tf b/modules/beta-private-cluster/main.tf index 5b235ce00f..bc466df2b2 100644 --- a/modules/beta-private-cluster/main.tf +++ b/modules/beta-private-cluster/main.tf @@ -132,6 +132,7 @@ locals { cluster_pod_security_policy_enabled = local.cluster_output_pod_security_policy_enabled cluster_intranode_visibility_enabled = local.cluster_output_intranode_visbility_enabled cluster_vertical_pod_autoscaling_enabled = local.cluster_output_vertical_pod_autoscaling_enabled + cluster_workload_identity_config = var.identity_namespace == "" ? [] : [{ identity_namespace = var.identity_namespace }] diff --git a/modules/beta-private-cluster/variables.tf b/modules/beta-private-cluster/variables.tf index d04ed5ac1f..65335a674c 100644 --- a/modules/beta-private-cluster/variables.tf +++ b/modules/beta-private-cluster/variables.tf @@ -163,6 +163,7 @@ variable "node_pools_labels" { type = map(map(string)) description = "Map of maps containing node labels by node-pool name" + # Default is being set in variables_defaults.tf default = { all = {} default-node-pool = {} @@ -173,6 +174,7 @@ variable "node_pools_metadata" { type = map(map(string)) description = "Map of maps containing node metadata by node-pool name" + # Default is being set in variables_defaults.tf default = { all = {} default-node-pool = {} @@ -183,6 +185,7 @@ variable "node_pools_taints" { type = map(list(object({ key = string, value = string, effect = string }))) description = "Map of lists containing node taints by node-pool name" + # Default is being set in variables_defaults.tf default = { all = [] default-node-pool = [] @@ -193,6 +196,7 @@ variable "node_pools_tags" { type = map(list(string)) description = "Map of lists containing node network tags by node-pool name" + # Default is being set in variables_defaults.tf default = { all = [] default-node-pool = [] @@ -203,6 +207,7 @@ variable "node_pools_oauth_scopes" { type = map(list(string)) description = "Map of lists containing node oauth scopes by node-pool name" + # Default is being set in variables_defaults.tf default = { all = ["https://www.googleapis.com/auth/cloud-platform"] default-node-pool = [] @@ -352,6 +357,7 @@ variable "default_max_pods_per_node" { variable "database_encryption" { description = "Application-layer Secrets Encryption settings. The object format is {state = string, key_name = string}. Valid values of state are: \"ENCRYPTED\"; \"DECRYPTED\". key_name is the name of a CloudKMS key." type = list(object({ state = string, key_name = string })) + default = [{ state = "DECRYPTED" key_name = "" @@ -370,6 +376,7 @@ variable "enable_binary_authorization" { variable "pod_security_policy_config" { description = "enabled - Enable the PodSecurityPolicy controller for this cluster. If enabled, pods must be valid under a PodSecurityPolicy to be created." + default = [{ "enabled" = false }] diff --git a/modules/beta-private-cluster/variables_defaults.tf b/modules/beta-private-cluster/variables_defaults.tf new file mode 100644 index 0000000000..9a75764306 --- /dev/null +++ b/modules/beta-private-cluster/variables_defaults.tf @@ -0,0 +1,72 @@ +/** + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file was automatically generated from a template in ./autogen + +# Setup dynamic default values for variables which can't be setup using +# the standard terraform "variable default" functionality + +locals { + node_pools_labels = merge( + { all = {} }, + { default-node-pool = {} }, + zipmap( + [for node_pool in var.node_pools : node_pool["name"]], + [for node_pool in var.node_pools : {}] + ), + var.node_pools_labels + ) + + node_pools_metadata = merge( + { all = {} }, + { default-node-pool = {} }, + zipmap( + [for node_pool in var.node_pools : node_pool["name"]], + [for node_pool in var.node_pools : {}] + ), + var.node_pools_metadata + ) + + node_pools_taints = merge( + { all = [] }, + { default-node-pool = [] }, + zipmap( + [for node_pool in var.node_pools : node_pool["name"]], + [for node_pool in var.node_pools : []] + ), + var.node_pools_taints + ) + + node_pools_tags = merge( + { all = [] }, + { default-node-pool = [] }, + zipmap( + [for node_pool in var.node_pools : node_pool["name"]], + [for node_pool in var.node_pools : []] + ), + var.node_pools_tags + ) + + node_pools_oauth_scopes = merge( + { all = ["https://www.googleapis.com/auth/cloud-platform"] }, + { default-node-pool = [] }, + zipmap( + [for node_pool in var.node_pools : node_pool["name"]], + [for node_pool in var.node_pools : []] + ), + var.node_pools_oauth_scopes + ) +} diff --git a/modules/beta-public-cluster/cluster.tf b/modules/beta-public-cluster/cluster.tf index 638d65af9d..72b513cd57 100644 --- a/modules/beta-public-cluster/cluster.tf +++ b/modules/beta-public-cluster/cluster.tf @@ -209,7 +209,9 @@ resource "google_container_node_pool" "pools" { location = local.location // use node_locations if provided, defaults to cluster level node_locations if not specified node_locations = lookup(var.node_pools[count.index], "node_locations", "") != "" ? split(",", var.node_pools[count.index]["node_locations"]) : null - cluster = google_container_cluster.primary.name + + cluster = google_container_cluster.primary.name + version = lookup(var.node_pools[count.index], "auto_upgrade", false) ? "" : lookup( var.node_pools[count.index], "version", @@ -243,24 +245,24 @@ resource "google_container_node_pool" "pools" { image_type = lookup(var.node_pools[count.index], "image_type", "COS") machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2") labels = merge( - lookup(lookup(var.node_pools_labels, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, - lookup(lookup(var.node_pools_labels, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index]["name"] } : {}, - var.node_pools_labels["all"], - var.node_pools_labels[var.node_pools[count.index]["name"]], + lookup(lookup(local.node_pools_labels, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(local.node_pools_labels, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index]["name"] } : {}, + local.node_pools_labels["all"], + local.node_pools_labels[var.node_pools[count.index]["name"]], ) metadata = merge( - lookup(lookup(var.node_pools_metadata, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, - lookup(lookup(var.node_pools_metadata, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index]["name"] } : {}, - var.node_pools_metadata["all"], - var.node_pools_metadata[var.node_pools[count.index]["name"]], + lookup(lookup(local.node_pools_metadata, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(local.node_pools_metadata, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index]["name"] } : {}, + local.node_pools_metadata["all"], + local.node_pools_metadata[var.node_pools[count.index]["name"]], { "disable-legacy-endpoints" = var.disable_legacy_metadata_endpoints }, ) dynamic "taint" { for_each = concat( - var.node_pools_taints["all"], - var.node_pools_taints[var.node_pools[count.index]["name"]], + local.node_pools_taints["all"], + local.node_pools_taints[var.node_pools[count.index]["name"]], ) content { effect = taint.value.effect @@ -269,10 +271,10 @@ resource "google_container_node_pool" "pools" { } } tags = concat( - lookup(var.node_pools_tags, "default_values", [true, true])[0] ? ["gke-${var.name}"] : [], - lookup(var.node_pools_tags, "default_values", [true, true])[1] ? ["gke-${var.name}-${var.node_pools[count.index]["name"]}"] : [], - var.node_pools_tags["all"], - var.node_pools_tags[var.node_pools[count.index]["name"]], + lookup(local.node_pools_tags, "default_values", [true, true])[0] ? ["gke-${var.name}"] : [], + lookup(local.node_pools_tags, "default_values", [true, true])[1] ? ["gke-${var.name}-${var.node_pools[count.index]["name"]}"] : [], + local.node_pools_tags["all"], + local.node_pools_tags[var.node_pools[count.index]["name"]], ) local_ssd_count = lookup(var.node_pools[count.index], "local_ssd_count", 0) @@ -287,8 +289,8 @@ resource "google_container_node_pool" "pools" { preemptible = lookup(var.node_pools[count.index], "preemptible", false) oauth_scopes = concat( - var.node_pools_oauth_scopes["all"], - var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]], + local.node_pools_oauth_scopes["all"], + local.node_pools_oauth_scopes[var.node_pools[count.index]["name"]], ) guest_accelerator = [ @@ -320,6 +322,7 @@ resource "google_container_node_pool" "pools" { lifecycle { ignore_changes = [initial_node_count] + } timeouts { diff --git a/modules/beta-public-cluster/main.tf b/modules/beta-public-cluster/main.tf index 9668b6f1ea..dbc346ca0c 100644 --- a/modules/beta-public-cluster/main.tf +++ b/modules/beta-public-cluster/main.tf @@ -132,6 +132,7 @@ locals { cluster_pod_security_policy_enabled = local.cluster_output_pod_security_policy_enabled cluster_intranode_visibility_enabled = local.cluster_output_intranode_visbility_enabled cluster_vertical_pod_autoscaling_enabled = local.cluster_output_vertical_pod_autoscaling_enabled + cluster_workload_identity_config = var.identity_namespace == "" ? [] : [{ identity_namespace = var.identity_namespace }] diff --git a/modules/beta-public-cluster/variables.tf b/modules/beta-public-cluster/variables.tf index a1057f1843..b875aea14f 100644 --- a/modules/beta-public-cluster/variables.tf +++ b/modules/beta-public-cluster/variables.tf @@ -163,6 +163,7 @@ variable "node_pools_labels" { type = map(map(string)) description = "Map of maps containing node labels by node-pool name" + # Default is being set in variables_defaults.tf default = { all = {} default-node-pool = {} @@ -173,6 +174,7 @@ variable "node_pools_metadata" { type = map(map(string)) description = "Map of maps containing node metadata by node-pool name" + # Default is being set in variables_defaults.tf default = { all = {} default-node-pool = {} @@ -183,6 +185,7 @@ variable "node_pools_taints" { type = map(list(object({ key = string, value = string, effect = string }))) description = "Map of lists containing node taints by node-pool name" + # Default is being set in variables_defaults.tf default = { all = [] default-node-pool = [] @@ -193,6 +196,7 @@ variable "node_pools_tags" { type = map(list(string)) description = "Map of lists containing node network tags by node-pool name" + # Default is being set in variables_defaults.tf default = { all = [] default-node-pool = [] @@ -203,6 +207,7 @@ variable "node_pools_oauth_scopes" { type = map(list(string)) description = "Map of lists containing node oauth scopes by node-pool name" + # Default is being set in variables_defaults.tf default = { all = ["https://www.googleapis.com/auth/cloud-platform"] default-node-pool = [] @@ -328,6 +333,7 @@ variable "default_max_pods_per_node" { variable "database_encryption" { description = "Application-layer Secrets Encryption settings. The object format is {state = string, key_name = string}. Valid values of state are: \"ENCRYPTED\"; \"DECRYPTED\". key_name is the name of a CloudKMS key." type = list(object({ state = string, key_name = string })) + default = [{ state = "DECRYPTED" key_name = "" @@ -346,6 +352,7 @@ variable "enable_binary_authorization" { variable "pod_security_policy_config" { description = "enabled - Enable the PodSecurityPolicy controller for this cluster. If enabled, pods must be valid under a PodSecurityPolicy to be created." + default = [{ "enabled" = false }] diff --git a/modules/beta-public-cluster/variables_defaults.tf b/modules/beta-public-cluster/variables_defaults.tf new file mode 100644 index 0000000000..9a75764306 --- /dev/null +++ b/modules/beta-public-cluster/variables_defaults.tf @@ -0,0 +1,72 @@ +/** + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file was automatically generated from a template in ./autogen + +# Setup dynamic default values for variables which can't be setup using +# the standard terraform "variable default" functionality + +locals { + node_pools_labels = merge( + { all = {} }, + { default-node-pool = {} }, + zipmap( + [for node_pool in var.node_pools : node_pool["name"]], + [for node_pool in var.node_pools : {}] + ), + var.node_pools_labels + ) + + node_pools_metadata = merge( + { all = {} }, + { default-node-pool = {} }, + zipmap( + [for node_pool in var.node_pools : node_pool["name"]], + [for node_pool in var.node_pools : {}] + ), + var.node_pools_metadata + ) + + node_pools_taints = merge( + { all = [] }, + { default-node-pool = [] }, + zipmap( + [for node_pool in var.node_pools : node_pool["name"]], + [for node_pool in var.node_pools : []] + ), + var.node_pools_taints + ) + + node_pools_tags = merge( + { all = [] }, + { default-node-pool = [] }, + zipmap( + [for node_pool in var.node_pools : node_pool["name"]], + [for node_pool in var.node_pools : []] + ), + var.node_pools_tags + ) + + node_pools_oauth_scopes = merge( + { all = ["https://www.googleapis.com/auth/cloud-platform"] }, + { default-node-pool = [] }, + zipmap( + [for node_pool in var.node_pools : node_pool["name"]], + [for node_pool in var.node_pools : []] + ), + var.node_pools_oauth_scopes + ) +} diff --git a/modules/private-cluster-update-variant/cluster.tf b/modules/private-cluster-update-variant/cluster.tf index af422abb2d..eebb4cd2ab 100644 --- a/modules/private-cluster-update-variant/cluster.tf +++ b/modules/private-cluster-update-variant/cluster.tf @@ -159,10 +159,10 @@ resource "random_id" "name" { labels = join(",", sort( concat( - keys(var.node_pools_labels["all"]), - values(var.node_pools_labels["all"]), - keys(var.node_pools_labels[var.node_pools[count.index]["name"]]), - values(var.node_pools_labels[var.node_pools[count.index]["name"]]) + keys(local.node_pools_labels["all"]), + values(local.node_pools_labels["all"]), + keys(local.node_pools_labels[var.node_pools[count.index]["name"]]), + values(local.node_pools_labels[var.node_pools[count.index]["name"]]) ) ) ) @@ -171,10 +171,10 @@ resource "random_id" "name" { metadata = join(",", sort( concat( - keys(var.node_pools_metadata["all"]), - values(var.node_pools_metadata["all"]), - keys(var.node_pools_metadata[var.node_pools[count.index]["name"]]), - values(var.node_pools_metadata[var.node_pools[count.index]["name"]]) + keys(local.node_pools_metadata["all"]), + values(local.node_pools_metadata["all"]), + keys(local.node_pools_metadata[var.node_pools[count.index]["name"]]), + values(local.node_pools_metadata[var.node_pools[count.index]["name"]]) ) ) ) @@ -183,8 +183,8 @@ resource "random_id" "name" { oauth_scopes = join(",", sort( concat( - var.node_pools_oauth_scopes["all"], - var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]] + local.node_pools_oauth_scopes["all"], + local.node_pools_oauth_scopes[var.node_pools[count.index]["name"]] ) ) ) @@ -193,8 +193,8 @@ resource "random_id" "name" { tags = join(",", sort( concat( - var.node_pools_tags["all"], - var.node_pools_tags[var.node_pools[count.index]["name"]] + local.node_pools_tags["all"], + local.node_pools_tags[var.node_pools[count.index]["name"]] ) ) ) @@ -208,7 +208,9 @@ resource "google_container_node_pool" "pools" { name = random_id.name.*.hex[count.index] project = var.project_id location = local.location - cluster = google_container_cluster.primary.name + + cluster = google_container_cluster.primary.name + version = lookup(var.node_pools[count.index], "auto_upgrade", false) ? "" : lookup( var.node_pools[count.index], "version", @@ -241,25 +243,25 @@ resource "google_container_node_pool" "pools" { image_type = lookup(var.node_pools[count.index], "image_type", "COS") machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2") labels = merge( - lookup(lookup(var.node_pools_labels, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, - lookup(lookup(var.node_pools_labels, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index]["name"] } : {}, - var.node_pools_labels["all"], - var.node_pools_labels[var.node_pools[count.index]["name"]], + lookup(lookup(local.node_pools_labels, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(local.node_pools_labels, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index]["name"] } : {}, + local.node_pools_labels["all"], + local.node_pools_labels[var.node_pools[count.index]["name"]], ) metadata = merge( - lookup(lookup(var.node_pools_metadata, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, - lookup(lookup(var.node_pools_metadata, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index]["name"] } : {}, - var.node_pools_metadata["all"], - var.node_pools_metadata[var.node_pools[count.index]["name"]], + lookup(lookup(local.node_pools_metadata, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(local.node_pools_metadata, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index]["name"] } : {}, + local.node_pools_metadata["all"], + local.node_pools_metadata[var.node_pools[count.index]["name"]], { "disable-legacy-endpoints" = var.disable_legacy_metadata_endpoints }, ) tags = concat( - lookup(var.node_pools_tags, "default_values", [true, true])[0] ? ["gke-${var.name}"] : [], - lookup(var.node_pools_tags, "default_values", [true, true])[1] ? ["gke-${var.name}-${var.node_pools[count.index]["name"]}"] : [], - var.node_pools_tags["all"], - var.node_pools_tags[var.node_pools[count.index]["name"]], + lookup(local.node_pools_tags, "default_values", [true, true])[0] ? ["gke-${var.name}"] : [], + lookup(local.node_pools_tags, "default_values", [true, true])[1] ? ["gke-${var.name}-${var.node_pools[count.index]["name"]}"] : [], + local.node_pools_tags["all"], + local.node_pools_tags[var.node_pools[count.index]["name"]], ) local_ssd_count = lookup(var.node_pools[count.index], "local_ssd_count", 0) @@ -274,8 +276,8 @@ resource "google_container_node_pool" "pools" { preemptible = lookup(var.node_pools[count.index], "preemptible", false) oauth_scopes = concat( - var.node_pools_oauth_scopes["all"], - var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]], + local.node_pools_oauth_scopes["all"], + local.node_pools_oauth_scopes[var.node_pools[count.index]["name"]], ) guest_accelerator = [ @@ -290,7 +292,8 @@ resource "google_container_node_pool" "pools" { } lifecycle { - ignore_changes = [initial_node_count] + ignore_changes = [initial_node_count] + create_before_destroy = true } diff --git a/modules/private-cluster-update-variant/variables.tf b/modules/private-cluster-update-variant/variables.tf index 508a4f1b96..52ad93f973 100644 --- a/modules/private-cluster-update-variant/variables.tf +++ b/modules/private-cluster-update-variant/variables.tf @@ -163,6 +163,7 @@ variable "node_pools_labels" { type = map(map(string)) description = "Map of maps containing node labels by node-pool name" + # Default is being set in variables_defaults.tf default = { all = {} default-node-pool = {} @@ -173,6 +174,7 @@ variable "node_pools_metadata" { type = map(map(string)) description = "Map of maps containing node metadata by node-pool name" + # Default is being set in variables_defaults.tf default = { all = {} default-node-pool = {} @@ -183,6 +185,7 @@ variable "node_pools_tags" { type = map(list(string)) description = "Map of lists containing node network tags by node-pool name" + # Default is being set in variables_defaults.tf default = { all = [] default-node-pool = [] @@ -193,6 +196,7 @@ variable "node_pools_oauth_scopes" { type = map(list(string)) description = "Map of lists containing node oauth scopes by node-pool name" + # Default is being set in variables_defaults.tf default = { all = ["https://www.googleapis.com/auth/cloud-platform"] default-node-pool = [] diff --git a/modules/private-cluster-update-variant/variables_defaults.tf b/modules/private-cluster-update-variant/variables_defaults.tf new file mode 100644 index 0000000000..145685f074 --- /dev/null +++ b/modules/private-cluster-update-variant/variables_defaults.tf @@ -0,0 +1,62 @@ +/** + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file was automatically generated from a template in ./autogen + +# Setup dynamic default values for variables which can't be setup using +# the standard terraform "variable default" functionality + +locals { + node_pools_labels = merge( + { all = {} }, + { default-node-pool = {} }, + zipmap( + [for node_pool in var.node_pools : node_pool["name"]], + [for node_pool in var.node_pools : {}] + ), + var.node_pools_labels + ) + + node_pools_metadata = merge( + { all = {} }, + { default-node-pool = {} }, + zipmap( + [for node_pool in var.node_pools : node_pool["name"]], + [for node_pool in var.node_pools : {}] + ), + var.node_pools_metadata + ) + + node_pools_tags = merge( + { all = [] }, + { default-node-pool = [] }, + zipmap( + [for node_pool in var.node_pools : node_pool["name"]], + [for node_pool in var.node_pools : []] + ), + var.node_pools_tags + ) + + node_pools_oauth_scopes = merge( + { all = ["https://www.googleapis.com/auth/cloud-platform"] }, + { default-node-pool = [] }, + zipmap( + [for node_pool in var.node_pools : node_pool["name"]], + [for node_pool in var.node_pools : []] + ), + var.node_pools_oauth_scopes + ) +} diff --git a/modules/private-cluster/cluster.tf b/modules/private-cluster/cluster.tf index 4567530763..94f3637919 100644 --- a/modules/private-cluster/cluster.tf +++ b/modules/private-cluster/cluster.tf @@ -136,7 +136,9 @@ resource "google_container_node_pool" "pools" { name = var.node_pools[count.index]["name"] project = var.project_id location = local.location - cluster = google_container_cluster.primary.name + + cluster = google_container_cluster.primary.name + version = lookup(var.node_pools[count.index], "auto_upgrade", false) ? "" : lookup( var.node_pools[count.index], "version", @@ -169,25 +171,25 @@ resource "google_container_node_pool" "pools" { image_type = lookup(var.node_pools[count.index], "image_type", "COS") machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2") labels = merge( - lookup(lookup(var.node_pools_labels, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, - lookup(lookup(var.node_pools_labels, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index]["name"] } : {}, - var.node_pools_labels["all"], - var.node_pools_labels[var.node_pools[count.index]["name"]], + lookup(lookup(local.node_pools_labels, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(local.node_pools_labels, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index]["name"] } : {}, + local.node_pools_labels["all"], + local.node_pools_labels[var.node_pools[count.index]["name"]], ) metadata = merge( - lookup(lookup(var.node_pools_metadata, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, - lookup(lookup(var.node_pools_metadata, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index]["name"] } : {}, - var.node_pools_metadata["all"], - var.node_pools_metadata[var.node_pools[count.index]["name"]], + lookup(lookup(local.node_pools_metadata, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(local.node_pools_metadata, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index]["name"] } : {}, + local.node_pools_metadata["all"], + local.node_pools_metadata[var.node_pools[count.index]["name"]], { "disable-legacy-endpoints" = var.disable_legacy_metadata_endpoints }, ) tags = concat( - lookup(var.node_pools_tags, "default_values", [true, true])[0] ? ["gke-${var.name}"] : [], - lookup(var.node_pools_tags, "default_values", [true, true])[1] ? ["gke-${var.name}-${var.node_pools[count.index]["name"]}"] : [], - var.node_pools_tags["all"], - var.node_pools_tags[var.node_pools[count.index]["name"]], + lookup(local.node_pools_tags, "default_values", [true, true])[0] ? ["gke-${var.name}"] : [], + lookup(local.node_pools_tags, "default_values", [true, true])[1] ? ["gke-${var.name}-${var.node_pools[count.index]["name"]}"] : [], + local.node_pools_tags["all"], + local.node_pools_tags[var.node_pools[count.index]["name"]], ) local_ssd_count = lookup(var.node_pools[count.index], "local_ssd_count", 0) @@ -202,8 +204,8 @@ resource "google_container_node_pool" "pools" { preemptible = lookup(var.node_pools[count.index], "preemptible", false) oauth_scopes = concat( - var.node_pools_oauth_scopes["all"], - var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]], + local.node_pools_oauth_scopes["all"], + local.node_pools_oauth_scopes[var.node_pools[count.index]["name"]], ) guest_accelerator = [ @@ -219,6 +221,7 @@ resource "google_container_node_pool" "pools" { lifecycle { ignore_changes = [initial_node_count] + } timeouts { diff --git a/modules/private-cluster/variables.tf b/modules/private-cluster/variables.tf index 508a4f1b96..52ad93f973 100644 --- a/modules/private-cluster/variables.tf +++ b/modules/private-cluster/variables.tf @@ -163,6 +163,7 @@ variable "node_pools_labels" { type = map(map(string)) description = "Map of maps containing node labels by node-pool name" + # Default is being set in variables_defaults.tf default = { all = {} default-node-pool = {} @@ -173,6 +174,7 @@ variable "node_pools_metadata" { type = map(map(string)) description = "Map of maps containing node metadata by node-pool name" + # Default is being set in variables_defaults.tf default = { all = {} default-node-pool = {} @@ -183,6 +185,7 @@ variable "node_pools_tags" { type = map(list(string)) description = "Map of lists containing node network tags by node-pool name" + # Default is being set in variables_defaults.tf default = { all = [] default-node-pool = [] @@ -193,6 +196,7 @@ variable "node_pools_oauth_scopes" { type = map(list(string)) description = "Map of lists containing node oauth scopes by node-pool name" + # Default is being set in variables_defaults.tf default = { all = ["https://www.googleapis.com/auth/cloud-platform"] default-node-pool = [] diff --git a/modules/private-cluster/variables_defaults.tf b/modules/private-cluster/variables_defaults.tf new file mode 100644 index 0000000000..145685f074 --- /dev/null +++ b/modules/private-cluster/variables_defaults.tf @@ -0,0 +1,62 @@ +/** + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file was automatically generated from a template in ./autogen + +# Setup dynamic default values for variables which can't be setup using +# the standard terraform "variable default" functionality + +locals { + node_pools_labels = merge( + { all = {} }, + { default-node-pool = {} }, + zipmap( + [for node_pool in var.node_pools : node_pool["name"]], + [for node_pool in var.node_pools : {}] + ), + var.node_pools_labels + ) + + node_pools_metadata = merge( + { all = {} }, + { default-node-pool = {} }, + zipmap( + [for node_pool in var.node_pools : node_pool["name"]], + [for node_pool in var.node_pools : {}] + ), + var.node_pools_metadata + ) + + node_pools_tags = merge( + { all = [] }, + { default-node-pool = [] }, + zipmap( + [for node_pool in var.node_pools : node_pool["name"]], + [for node_pool in var.node_pools : []] + ), + var.node_pools_tags + ) + + node_pools_oauth_scopes = merge( + { all = ["https://www.googleapis.com/auth/cloud-platform"] }, + { default-node-pool = [] }, + zipmap( + [for node_pool in var.node_pools : node_pool["name"]], + [for node_pool in var.node_pools : []] + ), + var.node_pools_oauth_scopes + ) +} diff --git a/test/integration/deploy_service/controls/kubectl.rb b/test/integration/deploy_service/controls/kubectl.rb index 2d4a473d2c..67921b1298 100644 --- a/test/integration/deploy_service/controls/kubectl.rb +++ b/test/integration/deploy_service/controls/kubectl.rb @@ -58,6 +58,12 @@ it "is reachable" do expect { + 10.times do + unless host(service_load_balancer_ip, port: 8080, protocol: 'tcp').reachable? + puts "Nginx is not reachable, retrying.." + sleep 10 + end + end RestClient.get(service_load_balancer_address) }.to_not raise_exception end diff --git a/variables.tf b/variables.tf index 58cf1f4685..5b89c692ec 100644 --- a/variables.tf +++ b/variables.tf @@ -163,6 +163,7 @@ variable "node_pools_labels" { type = map(map(string)) description = "Map of maps containing node labels by node-pool name" + # Default is being set in variables_defaults.tf default = { all = {} default-node-pool = {} @@ -173,6 +174,7 @@ variable "node_pools_metadata" { type = map(map(string)) description = "Map of maps containing node metadata by node-pool name" + # Default is being set in variables_defaults.tf default = { all = {} default-node-pool = {} @@ -183,6 +185,7 @@ variable "node_pools_tags" { type = map(list(string)) description = "Map of lists containing node network tags by node-pool name" + # Default is being set in variables_defaults.tf default = { all = [] default-node-pool = [] @@ -193,6 +196,7 @@ variable "node_pools_oauth_scopes" { type = map(list(string)) description = "Map of lists containing node oauth scopes by node-pool name" + # Default is being set in variables_defaults.tf default = { all = ["https://www.googleapis.com/auth/cloud-platform"] default-node-pool = [] diff --git a/variables_defaults.tf b/variables_defaults.tf new file mode 100644 index 0000000000..145685f074 --- /dev/null +++ b/variables_defaults.tf @@ -0,0 +1,62 @@ +/** + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file was automatically generated from a template in ./autogen + +# Setup dynamic default values for variables which can't be setup using +# the standard terraform "variable default" functionality + +locals { + node_pools_labels = merge( + { all = {} }, + { default-node-pool = {} }, + zipmap( + [for node_pool in var.node_pools : node_pool["name"]], + [for node_pool in var.node_pools : {}] + ), + var.node_pools_labels + ) + + node_pools_metadata = merge( + { all = {} }, + { default-node-pool = {} }, + zipmap( + [for node_pool in var.node_pools : node_pool["name"]], + [for node_pool in var.node_pools : {}] + ), + var.node_pools_metadata + ) + + node_pools_tags = merge( + { all = [] }, + { default-node-pool = [] }, + zipmap( + [for node_pool in var.node_pools : node_pool["name"]], + [for node_pool in var.node_pools : []] + ), + var.node_pools_tags + ) + + node_pools_oauth_scopes = merge( + { all = ["https://www.googleapis.com/auth/cloud-platform"] }, + { default-node-pool = [] }, + zipmap( + [for node_pool in var.node_pools : node_pool["name"]], + [for node_pool in var.node_pools : []] + ), + var.node_pools_oauth_scopes + ) +} From 198c85b44799a3925b426cd0706124f5c9c3df01 Mon Sep 17 00:00:00 2001 From: Shashindran Vijayan Date: Thu, 21 Nov 2019 12:26:00 +0800 Subject: [PATCH 18/20] Fixed the files and updated README. --- README.md | 1 - cluster.tf | 4 ---- main.tf | 2 -- modules/beta-private-cluster-update-variant/README.md | 1 - modules/beta-private-cluster/README.md | 1 - modules/beta-public-cluster/README.md | 1 - modules/private-cluster-update-variant/README.md | 1 - modules/private-cluster/README.md | 1 - modules/private-cluster/main.tf | 2 +- outputs.tf | 5 ----- 10 files changed, 1 insertion(+), 18 deletions(-) diff --git a/README.md b/README.md index 15f6aff13b..514563ca17 100644 --- a/README.md +++ b/README.md @@ -184,7 +184,6 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | endpoint | Cluster endpoint | | horizontal\_pod\_autoscaling\_enabled | Whether horizontal pod autoscaling enabled | | http\_load\_balancing\_enabled | Whether http load balancing enabled | -| kubernetes\_dashboard\_enabled | Whether kubernetes dashboard enabled | | location | Cluster location (region if regional cluster, zone if zonal cluster) | | logging\_service | Logging service used | | master\_authorized\_networks\_config | Networks from which access to master is permitted | diff --git a/cluster.tf b/cluster.tf index 072a60fb14..d518027963 100644 --- a/cluster.tf +++ b/cluster.tf @@ -79,10 +79,6 @@ resource "google_container_cluster" "primary" { disabled = ! var.horizontal_pod_autoscaling } - kubernetes_dashboard { - disabled = ! var.kubernetes_dashboard - } - network_policy_config { disabled = ! var.network_policy } diff --git a/main.tf b/main.tf index 1090227fd8..754fcefe04 100644 --- a/main.tf +++ b/main.tf @@ -80,7 +80,6 @@ locals { cluster_output_network_policy_enabled = google_container_cluster.primary.addons_config.0.network_policy_config.0.disabled cluster_output_http_load_balancing_enabled = google_container_cluster.primary.addons_config.0.http_load_balancing.0.disabled cluster_output_horizontal_pod_autoscaling_enabled = google_container_cluster.primary.addons_config.0.horizontal_pod_autoscaling.0.disabled - cluster_output_kubernetes_dashboard_enabled = google_container_cluster.primary.addons_config.0.kubernetes_dashboard.0.disabled cluster_output_node_pools_names = concat(google_container_node_pool.pools.*.name, [""]) @@ -105,7 +104,6 @@ locals { cluster_network_policy_enabled = ! local.cluster_output_network_policy_enabled cluster_http_load_balancing_enabled = ! local.cluster_output_http_load_balancing_enabled cluster_horizontal_pod_autoscaling_enabled = ! local.cluster_output_horizontal_pod_autoscaling_enabled - cluster_kubernetes_dashboard_enabled = ! local.cluster_output_kubernetes_dashboard_enabled } /****************************************** diff --git a/modules/beta-private-cluster-update-variant/README.md b/modules/beta-private-cluster-update-variant/README.md index d2b5197726..cc36e4dcd5 100644 --- a/modules/beta-private-cluster-update-variant/README.md +++ b/modules/beta-private-cluster-update-variant/README.md @@ -216,7 +216,6 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | identity\_namespace | Workload Identity namespace | | intranode\_visibility\_enabled | Whether intra-node visibility is enabled | | istio\_enabled | Whether Istio is enabled | -| kubernetes\_dashboard\_enabled | Whether kubernetes dashboard enabled | | location | Cluster location (region if regional cluster, zone if zonal cluster) | | logging\_service | Logging service used | | master\_authorized\_networks\_config | Networks from which access to master is permitted | diff --git a/modules/beta-private-cluster/README.md b/modules/beta-private-cluster/README.md index 96ad9abf5a..54fcb702a2 100644 --- a/modules/beta-private-cluster/README.md +++ b/modules/beta-private-cluster/README.md @@ -216,7 +216,6 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | identity\_namespace | Workload Identity namespace | | intranode\_visibility\_enabled | Whether intra-node visibility is enabled | | istio\_enabled | Whether Istio is enabled | -| kubernetes\_dashboard\_enabled | Whether kubernetes dashboard enabled | | location | Cluster location (region if regional cluster, zone if zonal cluster) | | logging\_service | Logging service used | | master\_authorized\_networks\_config | Networks from which access to master is permitted | diff --git a/modules/beta-public-cluster/README.md b/modules/beta-public-cluster/README.md index 7fcf78cccc..94d38c0946 100644 --- a/modules/beta-public-cluster/README.md +++ b/modules/beta-public-cluster/README.md @@ -207,7 +207,6 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | identity\_namespace | Workload Identity namespace | | intranode\_visibility\_enabled | Whether intra-node visibility is enabled | | istio\_enabled | Whether Istio is enabled | -| kubernetes\_dashboard\_enabled | Whether kubernetes dashboard enabled | | location | Cluster location (region if regional cluster, zone if zonal cluster) | | logging\_service | Logging service used | | master\_authorized\_networks\_config | Networks from which access to master is permitted | diff --git a/modules/private-cluster-update-variant/README.md b/modules/private-cluster-update-variant/README.md index fa9cdb8852..c40e260a59 100644 --- a/modules/private-cluster-update-variant/README.md +++ b/modules/private-cluster-update-variant/README.md @@ -193,7 +193,6 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | endpoint | Cluster endpoint | | horizontal\_pod\_autoscaling\_enabled | Whether horizontal pod autoscaling enabled | | http\_load\_balancing\_enabled | Whether http load balancing enabled | -| kubernetes\_dashboard\_enabled | Whether kubernetes dashboard enabled | | location | Cluster location (region if regional cluster, zone if zonal cluster) | | logging\_service | Logging service used | | master\_authorized\_networks\_config | Networks from which access to master is permitted | diff --git a/modules/private-cluster/README.md b/modules/private-cluster/README.md index 5465544b82..926c61a461 100644 --- a/modules/private-cluster/README.md +++ b/modules/private-cluster/README.md @@ -193,7 +193,6 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | endpoint | Cluster endpoint | | horizontal\_pod\_autoscaling\_enabled | Whether horizontal pod autoscaling enabled | | http\_load\_balancing\_enabled | Whether http load balancing enabled | -| kubernetes\_dashboard\_enabled | Whether kubernetes dashboard enabled | | location | Cluster location (region if regional cluster, zone if zonal cluster) | | logging\_service | Logging service used | | master\_authorized\_networks\_config | Networks from which access to master is permitted | diff --git a/modules/private-cluster/main.tf b/modules/private-cluster/main.tf index 01af2436ef..7826dfff18 100644 --- a/modules/private-cluster/main.tf +++ b/modules/private-cluster/main.tf @@ -80,7 +80,7 @@ locals { cluster_output_network_policy_enabled = google_container_cluster.primary.addons_config.0.network_policy_config.0.disabled cluster_output_http_load_balancing_enabled = google_container_cluster.primary.addons_config.0.http_load_balancing.0.disabled cluster_output_horizontal_pod_autoscaling_enabled = google_container_cluster.primary.addons_config.0.horizontal_pod_autoscaling.0.disabled - + cluster_output_node_pools_names = concat(google_container_node_pool.pools.*.name, [""]) cluster_output_node_pools_versions = concat(google_container_node_pool.pools.*.version, [""]) diff --git a/outputs.tf b/outputs.tf index dea7b5c7b5..54080bfa21 100644 --- a/outputs.tf +++ b/outputs.tf @@ -103,11 +103,6 @@ output "horizontal_pod_autoscaling_enabled" { value = local.cluster_horizontal_pod_autoscaling_enabled } -output "kubernetes_dashboard_enabled" { - description = "Whether kubernetes dashboard enabled" - value = local.cluster_kubernetes_dashboard_enabled -} - output "node_pools_names" { description = "List of node pools names" value = local.cluster_node_pools_names From 2150112711892ea7068e76aa475da39f75a95cdc Mon Sep 17 00:00:00 2001 From: Shashindran Vijayan Date: Thu, 21 Nov 2019 13:37:30 +0800 Subject: [PATCH 19/20] Updated README and variables.tf --- README.md | 2 -- autogen/README.md | 1 - autogen/variables.tf.tmpl | 6 ------ modules/beta-private-cluster-update-variant/README.md | 2 -- modules/beta-private-cluster-update-variant/variables.tf | 6 ------ modules/beta-private-cluster/README.md | 2 -- modules/beta-private-cluster/variables.tf | 6 ------ modules/beta-public-cluster/README.md | 2 -- modules/beta-public-cluster/variables.tf | 6 ------ modules/private-cluster-update-variant/README.md | 2 -- modules/private-cluster-update-variant/variables.tf | 6 ------ modules/private-cluster/README.md | 2 -- modules/private-cluster/variables.tf | 6 ------ variables.tf | 6 ------ 14 files changed, 55 deletions(-) diff --git a/README.md b/README.md index c7e0b264b4..60931a99af 100644 --- a/README.md +++ b/README.md @@ -34,7 +34,6 @@ module "gke" { ip_range_services = "us-central1-01-gke-01-services" http_load_balancing = false horizontal_pod_autoscaling = true - kubernetes_dashboard = true network_policy = true node_pools = [ @@ -147,7 +146,6 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | ip\_range\_pods | The _name_ of the secondary subnet ip range to use for pods | string | n/a | yes | | ip\_range\_services | The _name_ of the secondary subnet range to use for services | string | n/a | yes | | issue\_client\_certificate | Issues a client certificate to authenticate to the cluster endpoint. To maximize the security of your cluster, leave this option disabled. Client certificates don't automatically rotate and aren't easily revocable. WARNING: changing this after cluster creation is destructive! | bool | `"false"` | no | -| kubernetes\_dashboard | Enable kubernetes dashboard addon | bool | `"false"` | no | | kubernetes\_version | The Kubernetes version of the masters. If set to 'latest' it will pull latest available version in the selected region. | string | `"latest"` | no | | logging\_service | The logging service that the cluster should write logs to. Available options include logging.googleapis.com, logging.googleapis.com/kubernetes (beta), and none | string | `"logging.googleapis.com"` | no | | maintenance\_start\_time | Time window specified for daily maintenance operations in RFC3339 format | string | `"05:00"` | no | diff --git a/autogen/README.md b/autogen/README.md index 4165c9b8d8..c8e956a76e 100644 --- a/autogen/README.md +++ b/autogen/README.md @@ -39,7 +39,6 @@ module "gke" { ip_range_services = "us-central1-01-gke-01-services" http_load_balancing = false horizontal_pod_autoscaling = true - kubernetes_dashboard = true network_policy = true {% if private_cluster %} enable_private_endpoint = true diff --git a/autogen/variables.tf.tmpl b/autogen/variables.tf.tmpl index ad5cc44e34..8d6c3e5985 100644 --- a/autogen/variables.tf.tmpl +++ b/autogen/variables.tf.tmpl @@ -96,12 +96,6 @@ variable "http_load_balancing" { default = true } -variable "kubernetes_dashboard" { - type = bool - description = "Enable kubernetes dashboard addon" - default = false -} - variable "network_policy" { type = bool description = "Enable network policy addon" diff --git a/modules/beta-private-cluster-update-variant/README.md b/modules/beta-private-cluster-update-variant/README.md index 4490f9cee8..2f52021bd7 100644 --- a/modules/beta-private-cluster-update-variant/README.md +++ b/modules/beta-private-cluster-update-variant/README.md @@ -36,7 +36,6 @@ module "gke" { ip_range_services = "us-central1-01-gke-01-services" http_load_balancing = false horizontal_pod_autoscaling = true - kubernetes_dashboard = true network_policy = true enable_private_endpoint = true enable_private_nodes = true @@ -168,7 +167,6 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | ip\_range\_services | The _name_ of the secondary subnet range to use for services | string | n/a | yes | | issue\_client\_certificate | Issues a client certificate to authenticate to the cluster endpoint. To maximize the security of your cluster, leave this option disabled. Client certificates don't automatically rotate and aren't easily revocable. WARNING: changing this after cluster creation is destructive! | bool | `"false"` | no | | istio | (Beta) Enable Istio addon | string | `"false"` | no | -| kubernetes\_dashboard | Enable kubernetes dashboard addon | bool | `"false"` | no | | kubernetes\_version | The Kubernetes version of the masters. If set to 'latest' it will pull latest available version in the selected region. | string | `"latest"` | no | | logging\_service | The logging service that the cluster should write logs to. Available options include logging.googleapis.com, logging.googleapis.com/kubernetes (beta), and none | string | `"logging.googleapis.com"` | no | | maintenance\_start\_time | Time window specified for daily maintenance operations in RFC3339 format | string | `"05:00"` | no | diff --git a/modules/beta-private-cluster-update-variant/variables.tf b/modules/beta-private-cluster-update-variant/variables.tf index d04ed5ac1f..eb7c78f502 100644 --- a/modules/beta-private-cluster-update-variant/variables.tf +++ b/modules/beta-private-cluster-update-variant/variables.tf @@ -96,12 +96,6 @@ variable "http_load_balancing" { default = true } -variable "kubernetes_dashboard" { - type = bool - description = "Enable kubernetes dashboard addon" - default = false -} - variable "network_policy" { type = bool description = "Enable network policy addon" diff --git a/modules/beta-private-cluster/README.md b/modules/beta-private-cluster/README.md index 37aa5b52b0..a60877eca3 100644 --- a/modules/beta-private-cluster/README.md +++ b/modules/beta-private-cluster/README.md @@ -36,7 +36,6 @@ module "gke" { ip_range_services = "us-central1-01-gke-01-services" http_load_balancing = false horizontal_pod_autoscaling = true - kubernetes_dashboard = true network_policy = true enable_private_endpoint = true enable_private_nodes = true @@ -168,7 +167,6 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | ip\_range\_services | The _name_ of the secondary subnet range to use for services | string | n/a | yes | | issue\_client\_certificate | Issues a client certificate to authenticate to the cluster endpoint. To maximize the security of your cluster, leave this option disabled. Client certificates don't automatically rotate and aren't easily revocable. WARNING: changing this after cluster creation is destructive! | bool | `"false"` | no | | istio | (Beta) Enable Istio addon | string | `"false"` | no | -| kubernetes\_dashboard | Enable kubernetes dashboard addon | bool | `"false"` | no | | kubernetes\_version | The Kubernetes version of the masters. If set to 'latest' it will pull latest available version in the selected region. | string | `"latest"` | no | | logging\_service | The logging service that the cluster should write logs to. Available options include logging.googleapis.com, logging.googleapis.com/kubernetes (beta), and none | string | `"logging.googleapis.com"` | no | | maintenance\_start\_time | Time window specified for daily maintenance operations in RFC3339 format | string | `"05:00"` | no | diff --git a/modules/beta-private-cluster/variables.tf b/modules/beta-private-cluster/variables.tf index d04ed5ac1f..eb7c78f502 100644 --- a/modules/beta-private-cluster/variables.tf +++ b/modules/beta-private-cluster/variables.tf @@ -96,12 +96,6 @@ variable "http_load_balancing" { default = true } -variable "kubernetes_dashboard" { - type = bool - description = "Enable kubernetes dashboard addon" - default = false -} - variable "network_policy" { type = bool description = "Enable network policy addon" diff --git a/modules/beta-public-cluster/README.md b/modules/beta-public-cluster/README.md index 3dd5b263d7..00da4429f2 100644 --- a/modules/beta-public-cluster/README.md +++ b/modules/beta-public-cluster/README.md @@ -34,7 +34,6 @@ module "gke" { ip_range_services = "us-central1-01-gke-01-services" http_load_balancing = false horizontal_pod_autoscaling = true - kubernetes_dashboard = true network_policy = true istio = true cloudrun = true @@ -160,7 +159,6 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | ip\_range\_services | The _name_ of the secondary subnet range to use for services | string | n/a | yes | | issue\_client\_certificate | Issues a client certificate to authenticate to the cluster endpoint. To maximize the security of your cluster, leave this option disabled. Client certificates don't automatically rotate and aren't easily revocable. WARNING: changing this after cluster creation is destructive! | bool | `"false"` | no | | istio | (Beta) Enable Istio addon | string | `"false"` | no | -| kubernetes\_dashboard | Enable kubernetes dashboard addon | bool | `"false"` | no | | kubernetes\_version | The Kubernetes version of the masters. If set to 'latest' it will pull latest available version in the selected region. | string | `"latest"` | no | | logging\_service | The logging service that the cluster should write logs to. Available options include logging.googleapis.com, logging.googleapis.com/kubernetes (beta), and none | string | `"logging.googleapis.com"` | no | | maintenance\_start\_time | Time window specified for daily maintenance operations in RFC3339 format | string | `"05:00"` | no | diff --git a/modules/beta-public-cluster/variables.tf b/modules/beta-public-cluster/variables.tf index a1057f1843..e0c1d89db9 100644 --- a/modules/beta-public-cluster/variables.tf +++ b/modules/beta-public-cluster/variables.tf @@ -96,12 +96,6 @@ variable "http_load_balancing" { default = true } -variable "kubernetes_dashboard" { - type = bool - description = "Enable kubernetes dashboard addon" - default = false -} - variable "network_policy" { type = bool description = "Enable network policy addon" diff --git a/modules/private-cluster-update-variant/README.md b/modules/private-cluster-update-variant/README.md index 26ccb3b08f..37f95ccf7d 100644 --- a/modules/private-cluster-update-variant/README.md +++ b/modules/private-cluster-update-variant/README.md @@ -36,7 +36,6 @@ module "gke" { ip_range_services = "us-central1-01-gke-01-services" http_load_balancing = false horizontal_pod_autoscaling = true - kubernetes_dashboard = true network_policy = true enable_private_endpoint = true enable_private_nodes = true @@ -155,7 +154,6 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | ip\_range\_pods | The _name_ of the secondary subnet ip range to use for pods | string | n/a | yes | | ip\_range\_services | The _name_ of the secondary subnet range to use for services | string | n/a | yes | | issue\_client\_certificate | Issues a client certificate to authenticate to the cluster endpoint. To maximize the security of your cluster, leave this option disabled. Client certificates don't automatically rotate and aren't easily revocable. WARNING: changing this after cluster creation is destructive! | bool | `"false"` | no | -| kubernetes\_dashboard | Enable kubernetes dashboard addon | bool | `"false"` | no | | kubernetes\_version | The Kubernetes version of the masters. If set to 'latest' it will pull latest available version in the selected region. | string | `"latest"` | no | | logging\_service | The logging service that the cluster should write logs to. Available options include logging.googleapis.com, logging.googleapis.com/kubernetes (beta), and none | string | `"logging.googleapis.com"` | no | | maintenance\_start\_time | Time window specified for daily maintenance operations in RFC3339 format | string | `"05:00"` | no | diff --git a/modules/private-cluster-update-variant/variables.tf b/modules/private-cluster-update-variant/variables.tf index 508a4f1b96..7c82afc753 100644 --- a/modules/private-cluster-update-variant/variables.tf +++ b/modules/private-cluster-update-variant/variables.tf @@ -96,12 +96,6 @@ variable "http_load_balancing" { default = true } -variable "kubernetes_dashboard" { - type = bool - description = "Enable kubernetes dashboard addon" - default = false -} - variable "network_policy" { type = bool description = "Enable network policy addon" diff --git a/modules/private-cluster/README.md b/modules/private-cluster/README.md index b3f16a1497..adbb3b8042 100644 --- a/modules/private-cluster/README.md +++ b/modules/private-cluster/README.md @@ -36,7 +36,6 @@ module "gke" { ip_range_services = "us-central1-01-gke-01-services" http_load_balancing = false horizontal_pod_autoscaling = true - kubernetes_dashboard = true network_policy = true enable_private_endpoint = true enable_private_nodes = true @@ -155,7 +154,6 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | ip\_range\_pods | The _name_ of the secondary subnet ip range to use for pods | string | n/a | yes | | ip\_range\_services | The _name_ of the secondary subnet range to use for services | string | n/a | yes | | issue\_client\_certificate | Issues a client certificate to authenticate to the cluster endpoint. To maximize the security of your cluster, leave this option disabled. Client certificates don't automatically rotate and aren't easily revocable. WARNING: changing this after cluster creation is destructive! | bool | `"false"` | no | -| kubernetes\_dashboard | Enable kubernetes dashboard addon | bool | `"false"` | no | | kubernetes\_version | The Kubernetes version of the masters. If set to 'latest' it will pull latest available version in the selected region. | string | `"latest"` | no | | logging\_service | The logging service that the cluster should write logs to. Available options include logging.googleapis.com, logging.googleapis.com/kubernetes (beta), and none | string | `"logging.googleapis.com"` | no | | maintenance\_start\_time | Time window specified for daily maintenance operations in RFC3339 format | string | `"05:00"` | no | diff --git a/modules/private-cluster/variables.tf b/modules/private-cluster/variables.tf index 508a4f1b96..7c82afc753 100644 --- a/modules/private-cluster/variables.tf +++ b/modules/private-cluster/variables.tf @@ -96,12 +96,6 @@ variable "http_load_balancing" { default = true } -variable "kubernetes_dashboard" { - type = bool - description = "Enable kubernetes dashboard addon" - default = false -} - variable "network_policy" { type = bool description = "Enable network policy addon" diff --git a/variables.tf b/variables.tf index 58cf1f4685..d50dd0150d 100644 --- a/variables.tf +++ b/variables.tf @@ -96,12 +96,6 @@ variable "http_load_balancing" { default = true } -variable "kubernetes_dashboard" { - type = bool - description = "Enable kubernetes dashboard addon" - default = false -} - variable "network_policy" { type = bool description = "Enable network policy addon" From ff73ef2437be1ef53ffba284fdfa0021f2dd5441 Mon Sep 17 00:00:00 2001 From: Morgante Pell Date: Thu, 21 Nov 2019 22:43:16 +0000 Subject: [PATCH 20/20] Update CHANGELOG.md --- CHANGELOG.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 85f598f641..a849f89183 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,10 @@ Extending the adopted spec, each change should have a link to its corresponding * Support for `local_ssd_count` in node pool configuration. [#244] * Wait for cluster to be ready before returning endpoint. [#340] +### Removed + +* **Breaking**: Removed support for enabling the Kubernetes dashboard, as this is deprecated on GKE. [#337] + ## [v5.1.1] - 2019-10-25 ### Fixed @@ -230,6 +234,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o [v0.3.0]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v0.2.0...v0.3.0 [v0.2.0]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/compare/v0.1.0...v0.2.0 +[#337]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/337 [#340]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/pull/340 [#268]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/issues/268 [#311]: https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/issues/311