diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000..2ad2fdfd88 --- /dev/null +++ b/.gitignore @@ -0,0 +1,49 @@ +# OSX leaves these everywhere on SMB shares +._* + +# OSX trash +.DS_Store + +# Python +*.pyc + +# Emacs save files +*~ +\#*\# +.\#* + +# Vim-related files +[._]*.s[a-w][a-z] +[._]s[a-w][a-z] +*.un~ +Session.vim +.netrwhist + +### https://raw.github.com/github/gitignore/90f149de451a5433aebd94d02d11b0e28843a1af/Terraform.gitignore + +# Local .terraform directories +**/.terraform/* + +# .tfstate files +*.tfstate +*.tfstate.* + +# Crash log files +crash.log + +# Kitchen files +**/inspec.lock +**.gem +**/.kitchen +**/.kitchen.local.yml +**/Gemfile.lock + +# Ignore any .tfvars files that are generated automatically for each Terraform run. Most +# .tfvars files are managed as part of configuration and so should be included in +# version control. +# +# example.tfvars +**/terraform.tfvars + +test/integration/gcloud/config.sh +test/integration/tmp diff --git a/LICENSE b/LICENSE index 261eeb9e9f..d645695673 100644 --- a/LICENSE +++ b/LICENSE @@ -1,3 +1,4 @@ + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ diff --git a/Makefile b/Makefile new file mode 100644 index 0000000000..de5a344569 --- /dev/null +++ b/Makefile @@ -0,0 +1,82 @@ +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Make will use bash instead of sh +SHELL := /usr/bin/env bash + +# All is the first target in the file so it will get picked up when you just run 'make' on its own +all: check_shell check_python check_golang check_terraform check_docker check_base_files test_check_headers check_headers check_trailing_whitespace generate_docs + +# The .PHONY directive tells make that this isn't a real target and so +# the presence of a file named 'check_shell' won't cause this target to stop +# working +.PHONY: check_shell +check_shell: + @source test/make.sh && check_shell + +.PHONY: check_python +check_python: + @source test/make.sh && check_python + +.PHONY: check_golang +check_golang: + @source test/make.sh && golang + +.PHONY: check_terraform +check_terraform: + @source test/make.sh && check_terraform + +.PHONY: check_docker +check_docker: + @source test/make.sh && docker + +.PHONY: check_base_files +check_base_files: + @source test/make.sh && basefiles + +.PHONY: check_shebangs +check_shebangs: + @source test/make.sh && check_bash + +.PHONY: check_trailing_whitespace +check_trailing_whitespace: + @source test/make.sh && check_trailing_whitespace + +.PHONY: test_check_headers +test_check_headers: + @echo "Testing the validity of the header check" + @python test/test_verify_boilerplate.py + +.PHONY: check_headers +check_headers: + @echo "Checking file headers" + @python test/verify_boilerplate.py + +.PHONY: generate_docs +generate_docs: + @source test/make.sh && generate_docs + +# Integration tests + +.PHONY: regional_test_integration +regional_test_integration: + ./test/integration/gcloud/run.sh regional + +.PHONY: zonal_test_integration +zonal_test_integration: + ./test/integration/gcloud/run.sh zonal + +.PHONY: test_integration +test_integration: regional_test_integration zonal_test_integration + @echo "Running tests for regional and zonal clusters" diff --git a/README.md b/README.md index c6e93b2aa1..c1170a9848 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,254 @@ -# terraform-google-kubernetes-engine -A Cloud Foundation Toolkit Module: Opinionated Google Cloud Platform project creation and configuration with Shared VPC, IAM, APIs, etc. +# Terraform Kubernetes Engine Module + +This module handles opinionated Google Cloud Platform Kubernetes Engine cluster creation and configuration with Node Pools, IP MASQ, Network Policy, etc. + +## Requirements +### Kubectl +- [kubectl](https://github.com/kubernetes/kubernetes/releases) 1.9.x +### Terraform plugins +- [Terraform](https://www.terraform.io/downloads.html) 0.10.x +- [terraform-provider-google](https://github.com/terraform-providers/terraform-provider-google) plugin v1.8.0 + +### Configure a Service Account +In order to execute this module you must have a Service Account with the following: + +#### Roles +The service account with the following roles: +- roles/compute.viewer on the project +- roles/container.clusterAdmin on the project + +### Enable API's +In order to operate with the Service Account you must activate the following APIs on the project where the Service Account was created: + +- Compute Engine API - compute.googleapis.com +- Kubernetes Engine API - container.googleapis.com + +## Install + +### Terraform +Be sure you have the correct Terraform version (0.10.x), you can choose the binary here: +- https://releases.hashicorp.com/terraform/ + +## Usage +There are multiple examples included in the [examples](./examples/) folder but simple usage is as follows: + +```hcl +module "gke" { + source = "github.com/terraform-google-modules/terraform-google-kubernetes-engine" + project_id = "" + name = "gke-test-1" + region = "us-central1" + zones = ["us-central1-a", "us-central1-b", "us-central1-f"] + network = "vpc-01" + subnetwork = "us-central1-01" + ip_range_pods = "us-central1-01-gke-01-pods" + ip_range_services = "us-central1-01-gke-01-services" + http_load_balancing = false + horizontal_pod_autoscaling = true + kubernetes_dashboard = true + network_policy = true + + node_pools = [ + { + name = "default-node-pool" + machine_type = "n1-standard-2" + min_count = 1 + max_count = 100 + disk_size_gb = 100 + disk_type = "pd-standard" + image_type = "COS" + auto_repair = true + auto_upgrade = true + service_account = "project-service-account@.iam.gserviceaccount.com" + }, + ] + + node_pools_labels = { + all = {} + + default-node-pool = { + default-node-pool = "true" + } + } + + node_pools_taints = { + all = [] + + default-node-pool = [ + { + key = "default-node-pool" + value = "true" + effect = "PREFER_NO_SCHEDULE" + }, + ] + } + + node_pools_tags = { + all = [] + + default-node-pool = [ + "default-node-pool", + ] + } +} +``` + +Then perform the following commands on the root folder: + +- `terraform init` to get the plugins +- `terraform plan` to see the infrastructure plan +- `terraform apply` to apply the infrastructure build +- `terraform destroy` to destroy the built infrastructure +[^]: (autogen_docs_start) + + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| description | The description of the cluster | string | `` | no | +| horizontal_pod_autoscaling | Enable horizontal pod autoscaling addon | string | `false` | no | +| http_load_balancing | Enable httpload balancer addon | string | `true` | no | +| ip_masq_link_local | Whether to masquerade traffic to the link-local prefix (169.254.0.0/16). | string | `false` | no | +| ip_masq_resync_interval | The interval at which the agent attempts to sync its ConfigMap file from the disk. | string | `60s` | no | +| ip_range_pods | The secondary ip range to use for pods | string | - | yes | +| ip_range_services | The secondary ip range to use for pods | string | - | yes | +| kubernetes_dashboard | Enable kubernetes dashboard addon | string | `false` | no | +| kubernetes_version | The Kubernetes version of the masters. If set to 'latest' it will pull latest available version in the selected region. | string | `1.10.6-gke.2` | no | +| maintenance_start_time | Time window specified for daily maintenance operations in RFC3339 format | string | `05:00` | no | +| name | The name of the cluster (required) | string | - | yes | +| network | The VPC network to host the cluster in (required) | string | - | yes | +| network_policy | Enable network policy addon | string | `false` | no | +| network_project_id | The project ID of the shared VPC's host (for shared vpc support) | string | `` | no | +| node_pools | List of maps containing node pools | list | `` | no | +| node_pools_labels | Map of maps containing node labels by node-pool name | map | `` | no | +| node_pools_tags | Map of lists containing node network tags by node-pool name | map | `` | no | +| node_pools_taints | Map of lists containing node taints by node-pool name | map | `` | no | +| node_version | The Kubernetes version of the node pools. Defaults kubernetes_version (master) variable and can be overridden for individual node pools by setting the `version` key on them. Must be empyty or set the same as master at cluster creation. | string | `` | no | +| non_masquerade_cidrs | List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading. | list | `` | no | +| project_id | The project ID to host the cluster in (required) | string | - | yes | +| region | The region to host the cluster in (required) | string | - | yes | +| regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | string | `true` | no | +| stub_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | map | `` | no | +| subnetwork | The subnetwork to host the cluster in (required) | string | - | yes | +| zones | The zones to host the cluster in (optional if regional cluster / required if zonal) | list | `` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| ca_certificate | Cluster ca certificate (base64 encoded) | +| endpoint | Cluster endpoint | +| horizontal_pod_autoscaling_enabled | Whether horizontal pod autoscaling enabled | +| http_load_balancing_enabled | Whether http load balancing enabled | +| kubernetes_dashboard_enabled | Whether kubernetes dashboard enabled | +| location | Cluster location (region if regional cluster, zone if zonal cluster) | +| master_version | Current master kubernetes version | +| min_master_version | Minimum master kubernetes version | +| name | Cluster name | +| network_policy_enabled | Whether network policy enabled | +| node_pools_names | List of node pools names | +| node_pools_versions | List of node pools versions | +| region | Cluster region | +| type | Cluster type (regional / zonal) | +| zones | List of zones in which the cluster resides | + +[^]: (autogen_docs_end) + +## Infrastructure +The resources/services/activations/deletions that this module will create/trigger are: +- Create a GKE cluster with the provided addons +- Create GKE Node Pool(s) with provided configuration and attach to cluster +- Replace the default kube-dns configmap if `stub_domains` are provided +- Activate network policy if `network_policy` is true +- Add `ip-masq-agent` configmap with provided `non_masquerade_cidrs` if `network_policy` is true + +## File structure +The project has the following folders and files: + +- /: root folder +- /examples: examples for using this module +- /scripts: Scripts for specific tasks on module (see Infrastructure section on this file) +- /test: Folders with files for testing the module (see Testing section on this file) +- /main.tf: main file for this module, contains all the resources to create +- /variables.tf: all the variables for the module +- /output.tf: the outputs of the module +- /readme.MD: this file + +## Testing + +### Requirements +- [bundler](https://github.com/bundler/bundler) +- [gcloud](https://cloud.google.com/sdk/install) +- [jq](https://stedolan.github.io/jq/) 1.5 +- [terraform-docs](https://github.com/segmentio/terraform-docs/releases) 0.3.0 + +### Autogeneration of documentation from .tf files +Run +``` +make generate_docs +``` + +### Integration test +#### Terraform integration tests +The integration tests for this module leverage [kitchen-terraform](https://github.com/newcontext-oss/kitchen-terraform) and [kitchen-inspec](https://github.com/inspec/kitchen-inspec). + +The tests will do the following: +- Perform `bundle install` command + - Installs `kitchen-terraform` and `kitchen-inspec` gems +- Perform `kitchen create` command + - Performs a `terraform init` +- Perform `kitchen converge` command + - Performs a `terraform apply -auto-approve` +- Perform `kitchen validate` command + - Performs inspec tests. + - Shell out to `gcloud` to validate expected resources in GCP. + - Shell out to `kubectl` to validate expected resource in Kubernetes. + - Shell out to `terraform` to validate outputs. +- Permos `kitchen destroy` command + - Performs a `terraform destroy -force` + +You can use the following command to run the integration test in the root folder + + `make test_integration` + +### Linting +The makefile in this project will lint or sometimes just format any shell, +Python, golang, Terraform, or Dockerfiles. The linters will only be run if +the makefile finds files with the appropriate file extension. + +All of the linter checks are in the default make target, so you just have to +run + +``` +make -s +``` + +The -s is for 'silent'. Successful output looks like this + +``` +Running shellcheck +Running flake8 +Running go fmt and go vet +Running terraform validate +Running hadolint on Dockerfiles +Checking for required files +Testing the validity of the header check +.. +---------------------------------------------------------------------- +Ran 2 tests in 0.026s + +OK +Checking file headers +The following lines have trailing whitespace +``` + +The linters +are as follows: +* Shell - shellcheck. Can be found in homebrew +* Python - flake8. Can be installed with 'pip install flake8' +* Golang - gofmt. gofmt comes with the standard golang installation. golang +is a compiled language so there is no standard linter. +* Terraform - terraform has a built-in linter in the 'terraform validate' +command. +* Dockerfiles - hadolint. Can be found in homebrew \ No newline at end of file diff --git a/auth.tf b/auth.tf new file mode 100644 index 0000000000..4a664d99f5 --- /dev/null +++ b/auth.tf @@ -0,0 +1,30 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/****************************************** + Retrieve authentication token + *****************************************/ +data "google_client_config" "default" {} + +/****************************************** + Configure provider + *****************************************/ +provider "kubernetes" { + load_config_file = false + host = "https://${local.cluster_endpoint}" + token = "${data.google_client_config.default.access_token}" + cluster_ca_certificate = "${base64decode(local.cluster_ca_certificate)}" +} diff --git a/cluster_regional.tf b/cluster_regional.tf new file mode 100644 index 0000000000..7db0002ce3 --- /dev/null +++ b/cluster_regional.tf @@ -0,0 +1,130 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/****************************************** + Create regional cluster + *****************************************/ +resource "google_container_cluster" "primary" { + count = "${var.regional ? 1 : 0}" + name = "${var.name}" + description = "${var.description}" + project = "${var.project_id}" + + region = "${var.region}" + additional_zones = "${var.zones}" + + network = "projects/${local.network_project_id}/global/networks/${var.network}" + subnetwork = "projects/${local.network_project_id}/regions/${var.region}/subnetworks/${var.subnetwork}" + min_master_version = "${local.kubernetes_version}" + + addons_config { + http_load_balancing { + disabled = "${var.http_load_balancing ? 0 : 1}" + } + + horizontal_pod_autoscaling { + disabled = "${var.horizontal_pod_autoscaling ? 0 : 1}" + } + + kubernetes_dashboard { + disabled = "${var.kubernetes_dashboard ? 0 : 1}" + } + + network_policy_config { + disabled = "${var.network_policy ? 0 : 1}" + } + } + + ip_allocation_policy { + cluster_secondary_range_name = "${var.ip_range_pods}" + services_secondary_range_name = "${var.ip_range_services}" + } + + maintenance_policy { + daily_maintenance_window { + start_time = "${var.maintenance_start_time}" + } + } + + lifecycle { + ignore_changes = ["node_pool"] + } + + timeouts { + create = "30m" + update = "30m" + delete = "30m" + } + + node_pool { + name = "default-pool" + + node_config { + service_account = "${lookup(var.node_pools[0], "service_account", "")}" + } + } +} + +/****************************************** + Create regional node pools + *****************************************/ +resource "google_container_node_pool" "pools" { + count = "${var.regional ? length(var.node_pools) : 0}" + name = "${lookup(var.node_pools[count.index], "name")}" + project = "${var.project_id}" + region = "${var.region}" + cluster = "${var.name}" + version = "${lookup(var.node_pools[count.index], "auto_upgrade", false) ? "" : lookup(var.node_pools[count.index], "version", local.node_version)}" + initial_node_count = "${lookup(var.node_pools[count.index], "min_count", 1)}" + + autoscaling { + min_node_count = "${lookup(var.node_pools[count.index], "min_count", 1)}" + max_node_count = "${lookup(var.node_pools[count.index], "max_count", 100)}" + } + + management { + auto_repair = "${lookup(var.node_pools[count.index], "auto_repair", true)}" + auto_upgrade = "${lookup(var.node_pools[count.index], "auto_upgrade", true)}" + } + + node_config { + image_type = "${lookup(var.node_pools[count.index], "image_type", "COS")}" + machine_type = "${lookup(var.node_pools[count.index], "machine_type", "n1-standard-2")}" + labels = "${merge(map("cluster_name", var.name), map("node_pool", lookup(var.node_pools[count.index], "name")), var.node_pools_labels["all"], var.node_pools_labels[lookup(var.node_pools[count.index], "name")])}" + taint = "${concat(var.node_pools_taints["all"], var.node_pools_taints[lookup(var.node_pools[count.index], "name")])}" + tags = "${concat(list("gke-${var.name}"), list("gke-${var.name}-${lookup(var.node_pools[count.index], "name")}"), var.node_pools_tags["all"], var.node_pools_tags[lookup(var.node_pools[count.index], "name")])}" + + disk_size_gb = "${lookup(var.node_pools[count.index], "disk_size_gb", 100)}" + disk_type = "${lookup(var.node_pools[count.index], "disk_type", "pd-standard")}" + service_account = "${lookup(var.node_pools[count.index], "service_account", "")}" + + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform", + ] + } + + lifecycle { + ignore_changes = ["initial_node_count"] + } + + timeouts { + create = "30m" + update = "30m" + delete = "30m" + } + + depends_on = ["google_container_cluster.primary"] +} diff --git a/cluster_zonal.tf b/cluster_zonal.tf new file mode 100644 index 0000000000..8fe2df3cdc --- /dev/null +++ b/cluster_zonal.tf @@ -0,0 +1,130 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/****************************************** + Create zonal cluster + *****************************************/ +resource "google_container_cluster" "zonal_primary" { + count = "${var.regional ? 0 : 1}" + name = "${var.name}" + description = "${var.description}" + project = "${var.project_id}" + + zone = "${var.zones[0]}" + additional_zones = "${slice(var.zones,1,length(var.zones))}" + + network = "projects/${local.network_project_id}/global/networks/${var.network}" + subnetwork = "projects/${local.network_project_id}/regions/${var.region}/subnetworks/${var.subnetwork}" + min_master_version = "${local.kubernetes_version}" + + addons_config { + http_load_balancing { + disabled = "${var.http_load_balancing ? 0 : 1}" + } + + horizontal_pod_autoscaling { + disabled = "${var.horizontal_pod_autoscaling ? 0 : 1}" + } + + kubernetes_dashboard { + disabled = "${var.kubernetes_dashboard ? 0 : 1}" + } + + network_policy_config { + disabled = "${var.network_policy ? 0 : 1}" + } + } + + ip_allocation_policy { + cluster_secondary_range_name = "${var.ip_range_pods}" + services_secondary_range_name = "${var.ip_range_services}" + } + + maintenance_policy { + daily_maintenance_window { + start_time = "${var.maintenance_start_time}" + } + } + + lifecycle { + ignore_changes = ["node_pool"] + } + + timeouts { + create = "30m" + update = "30m" + delete = "30m" + } + + node_pool { + name = "default-pool" + + node_config { + service_account = "${lookup(var.node_pools[0], "service_account", "")}" + } + } +} + +/****************************************** + Create zonal node pools + *****************************************/ +resource "google_container_node_pool" "zonal_pools" { + count = "${var.regional ? 0 : length(var.node_pools)}" + name = "${lookup(var.node_pools[count.index], "name")}" + project = "${var.project_id}" + zone = "${var.zones[0]}" + cluster = "${var.name}" + version = "${lookup(var.node_pools[count.index], "auto_upgrade", false) ? "" : lookup(var.node_pools[count.index], "version", local.node_version)}" + initial_node_count = "${lookup(var.node_pools[count.index], "min_count", 1)}" + + autoscaling { + min_node_count = "${lookup(var.node_pools[count.index], "min_count", 1)}" + max_node_count = "${lookup(var.node_pools[count.index], "max_count", 100)}" + } + + management { + auto_repair = "${lookup(var.node_pools[count.index], "auto_repair", true)}" + auto_upgrade = "${lookup(var.node_pools[count.index], "auto_upgrade", false)}" + } + + node_config { + image_type = "${lookup(var.node_pools[count.index], "image_type", "COS")}" + machine_type = "${lookup(var.node_pools[count.index], "machine_type", "n1-standard-2")}" + labels = "${merge(map("cluster_name", var.name), map("node_pool", lookup(var.node_pools[count.index], "name")), var.node_pools_labels["all"], var.node_pools_labels[lookup(var.node_pools[count.index], "name")])}" + taint = "${concat(var.node_pools_taints["all"], var.node_pools_taints[lookup(var.node_pools[count.index], "name")])}" + tags = "${concat(list("gke-${var.name}"), list("gke-${var.name}-${lookup(var.node_pools[count.index], "name")}"), var.node_pools_tags["all"], var.node_pools_tags[lookup(var.node_pools[count.index], "name")])}" + + disk_size_gb = "${lookup(var.node_pools[count.index], "disk_size_gb", 100)}" + disk_type = "${lookup(var.node_pools[count.index], "disk_type", "pd-standard")}" + service_account = "${lookup(var.node_pools[count.index], "service_account", "")}" + + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform", + ] + } + + lifecycle { + ignore_changes = ["initial_node_count"] + } + + timeouts { + create = "30m" + update = "30m" + delete = "30m" + } + + depends_on = ["google_container_cluster.zonal_primary"] +} diff --git a/dns.tf b/dns.tf new file mode 100644 index 0000000000..cec3ad1c34 --- /dev/null +++ b/dns.tf @@ -0,0 +1,52 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/****************************************** + Delete default kube-dns configmap + *****************************************/ +resource "null_resource" "delete_default_kube_dns_configmap" { + count = "${local.custom_kube_dns_config ? 1 : 0}" + + provisioner "local-exec" { + command = "${path.module}/scripts/kubectl_wrapper.sh https://${local.cluster_endpoint} ${data.google_client_config.default.access_token} ${local.cluster_ca_certificate} ${path.module}/scripts/delete-default-resource.sh kube-system configmap kube-dns" + } + + depends_on = ["data.google_client_config.default", "google_container_cluster.primary", "google_container_node_pool.pools", "google_container_cluster.zonal_primary", "google_container_node_pool.zonal_pools"] +} + +/****************************************** + Create kube-dns confimap + *****************************************/ +resource "kubernetes_config_map" "kube-dns" { + count = "${local.custom_kube_dns_config ? 1 : 0}" + + metadata { + name = "kube-dns" + namespace = "kube-system" + + labels { + maintained_by = "terraform" + } + } + + data { + stubDomains = <&2 echo "3 arguments expected. Exiting." + exit 1 +fi + +RESOURCE_NAMESPACE=$1 +RESOURCE_TYPE=$2 +RESOURCE_NAME=$3 + +RESOURCE_LIST=$(kubectl -n "${RESOURCE_NAMESPACE}" get "${RESOURCE_TYPE}" || exit 1) + +# Delete requested resource +if [[ $RESOURCE_LIST = *"${RESOURCE_NAME}"* ]]; then + RESOURCE_MAINTAINED_LABEL=$(kubectl -n "${RESOURCE_NAMESPACE}" get "${RESOURCE_TYPE}" -o json "${RESOURCE_NAME}" | jq -r '.metadata.labels."maintained_by"') + if [[ $RESOURCE_MAINTAINED_LABEL = "terraform" ]]; then + echo "Terraform maintained ${RESOURCE_NAME} ${RESOURCE_TYPE} appears to have already been created in ${RESOURCE_NAMESPACE} namespace" + else + echo "Deleting default ${RESOURCE_NAME} ${RESOURCE_TYPE} found in ${RESOURCE_NAMESPACE} namespace" + kubectl -n "${RESOURCE_NAMESPACE}" delete "${RESOURCE_TYPE}" "${RESOURCE_NAME}" + fi +else + echo "No default ${RESOURCE_NAME} ${RESOURCE_TYPE} found in ${RESOURCE_NAMESPACE} namespace" +fi diff --git a/scripts/kubectl_wrapper.sh b/scripts/kubectl_wrapper.sh new file mode 100755 index 0000000000..a2054a7009 --- /dev/null +++ b/scripts/kubectl_wrapper.sh @@ -0,0 +1,51 @@ +#!/bin/bash +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +set -e + +if [ "$#" -lt 3 ]; then + >&2 echo "Not all expected arguments set." + exit 1 +fi + +HOST=$1 +TOKEN=$2 +CA_CERTIFICATE=$3 + +shift 3 + +RANDOM_ID="${RANDOM}_${RANDOM}" +export TMPDIR="/tmp/kubectl_wrapper_${RANDOM_ID}" + +function cleanup { + rm -rf "${TMPDIR}" +} +trap cleanup EXIT + +mkdir "${TMPDIR}" + +export KUBECONFIG="${TMPDIR}/config" + +echo "${CA_CERTIFICATE}" | base64 --decode > "${TMPDIR}/ca_certificate" + +kubectl config set-cluster kubectl-wrapper --server="${HOST}" --certificate-authority="${TMPDIR}/ca_certificate" --embed-certs=true 1>/dev/null +rm -f "${TMPDIR}/ca_certificate" +kubectl config set-context kubectl-wrapper --cluster=kubectl-wrapper --user=kubectl-wrapper --namespace=default 1>/dev/null +kubectl config set-credentials kubectl-wrapper --token="${TOKEN}" 1>/dev/null +kubectl config use-context kubectl-wrapper 1>/dev/null +kubectl version 1>/dev/null + +"$@" diff --git a/test/boilerplate/boilerplate.Dockerfile.txt b/test/boilerplate/boilerplate.Dockerfile.txt new file mode 100644 index 0000000000..b0c7da3d77 --- /dev/null +++ b/test/boilerplate/boilerplate.Dockerfile.txt @@ -0,0 +1,13 @@ +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/test/boilerplate/boilerplate.Makefile.txt b/test/boilerplate/boilerplate.Makefile.txt new file mode 100644 index 0000000000..b0c7da3d77 --- /dev/null +++ b/test/boilerplate/boilerplate.Makefile.txt @@ -0,0 +1,13 @@ +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/test/boilerplate/boilerplate.go.txt b/test/boilerplate/boilerplate.go.txt new file mode 100644 index 0000000000..557e16f064 --- /dev/null +++ b/test/boilerplate/boilerplate.go.txt @@ -0,0 +1,15 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ diff --git a/test/boilerplate/boilerplate.py.txt b/test/boilerplate/boilerplate.py.txt new file mode 100644 index 0000000000..b0c7da3d77 --- /dev/null +++ b/test/boilerplate/boilerplate.py.txt @@ -0,0 +1,13 @@ +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/test/boilerplate/boilerplate.sh.txt b/test/boilerplate/boilerplate.sh.txt new file mode 100644 index 0000000000..2e94f3e551 --- /dev/null +++ b/test/boilerplate/boilerplate.sh.txt @@ -0,0 +1,13 @@ +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/test/boilerplate/boilerplate.tf.txt b/test/boilerplate/boilerplate.tf.txt new file mode 100644 index 0000000000..cfccff84ca --- /dev/null +++ b/test/boilerplate/boilerplate.tf.txt @@ -0,0 +1,15 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ diff --git a/test/boilerplate/boilerplate.xml.txt b/test/boilerplate/boilerplate.xml.txt new file mode 100644 index 0000000000..3d98cdc6e5 --- /dev/null +++ b/test/boilerplate/boilerplate.xml.txt @@ -0,0 +1,15 @@ + diff --git a/test/boilerplate/boilerplate.yaml.txt b/test/boilerplate/boilerplate.yaml.txt new file mode 100644 index 0000000000..b0c7da3d77 --- /dev/null +++ b/test/boilerplate/boilerplate.yaml.txt @@ -0,0 +1,13 @@ +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/test/integration/gcloud/.gitignore b/test/integration/gcloud/.gitignore new file mode 100644 index 0000000000..cdd7c19c8d --- /dev/null +++ b/test/integration/gcloud/.gitignore @@ -0,0 +1 @@ +config.sh diff --git a/test/integration/gcloud/.kitchen.yml b/test/integration/gcloud/.kitchen.yml new file mode 100644 index 0000000000..65885a1b7b --- /dev/null +++ b/test/integration/gcloud/.kitchen.yml @@ -0,0 +1,33 @@ +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- +driver: + name: "terraform" + command_timeout: 1800 + +provisioner: + name: "terraform" + +transport: + name: exec + +platforms: + - name: local + +verifier: + name: inspec + +suites: + - name: "default" diff --git a/test/integration/gcloud/Gemfile b/test/integration/gcloud/Gemfile new file mode 100644 index 0000000000..d7139b76dc --- /dev/null +++ b/test/integration/gcloud/Gemfile @@ -0,0 +1,20 @@ +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +ruby '2.4.2' + +source 'https://rubygems.org/' do + gem 'kitchen-terraform', '~> 3.3' + gem 'kitchen-inspec', :git => 'https://github.com/inspec/kitchen-inspec.git', :ref => '0590f1b' +end diff --git a/test/integration/gcloud/run.sh b/test/integration/gcloud/run.sh new file mode 100755 index 0000000000..88ae698e35 --- /dev/null +++ b/test/integration/gcloud/run.sh @@ -0,0 +1,335 @@ +#!/bin/bash +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if [ "$#" -lt 1 ]; then + >&2 echo "Must specify cluster type (regional/zonal)" + exit 1 +fi + +export CLUSTER_TYPE="$1" + +TEMPDIR=$(pwd)/test/integration/tmp +TESTDIR=${BASH_SOURCE%/*} + +function export_vars() { + export TEST_ID="modules_gke_integration_gcloud_${RANDOM}" + export KUBECONFIG="${TEMPDIR}/${CLUSTER_TYPE}/${TEST_ID}.kubeconfig" + if [[ $CLUSTER_TYPE = "regional" ]]; then + export CLUSTER_REGIONAL="true" + export CLUSTER_LOCATION="$REGIONAL_LOCATION" + export CLUSTER_NAME="$REGIONAL_CLUSTER_NAME" + export IP_RANGE_PODS="$REGIONAL_IP_RANGE_PODS" + export IP_RANGE_SERVICES="$REGIONAL_IP_RANGE_SERVICES" + else + export CLUSTER_REGIONAL="false" + export CLUSTER_LOCATION="$ZONAL_LOCATION" + export CLUSTER_NAME="$ZONAL_CLUSTER_NAME" + export IP_RANGE_PODS="$ZONAL_IP_RANGE_PODS" + export IP_RANGE_SERVICES="$ZONAL_IP_RANGE_SERVICES" + fi +} + +# Activate test working directory +function make_testdir() { + mkdir -p "${TEMPDIR}/${CLUSTER_TYPE}" + cp -r "${TESTDIR}"/* "${TEMPDIR}/${CLUSTER_TYPE}/" + cp -r "$TESTDIR"/.kitchen.yml "${TEMPDIR}/${CLUSTER_TYPE}/" +} + +# Activate test config +function activate_config() { + # shellcheck disable=SC1091 + source config.sh + echo "$PROJECT_NAME" +} + +# Cleans the workdir +function clean_workdir() { + #rm -rf "$TEMPDIR" + + export CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE="" + unset CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE + +} + +# Creates the main.tf file for Terraform +function create_main_tf_file() { + echo "Creating main.tf file" + cat < main.tf +locals { + credentials_file_path = "$CREDENTIALS_PATH" +} + +provider "google" { + credentials = "\${file(local.credentials_file_path)}" +} + +provider "kubernetes" { + load_config_file = false + host = "https://\${module.gke.endpoint}" + token = "\${data.google_client_config.default.access_token}" + cluster_ca_certificate = "\${base64decode(module.gke.ca_certificate)}" +} + +data "google_client_config" "default" {} + +module "gke" { + source = "../../../../" + project_id = "$PROJECT_ID" + name = "$CLUSTER_NAME" + description = "Test GKE cluster" + regional = $CLUSTER_REGIONAL + region = "$REGION" + zones = [$ZONES] + kubernetes_version = "$KUBERNETES_VERSION" + network = "$NETWORK" + subnetwork = "$SUBNETWORK" + ip_range_pods = "$IP_RANGE_PODS" + ip_range_services = "$IP_RANGE_SERVICES" + + http_load_balancing = false + horizontal_pod_autoscaling = true + kubernetes_dashboard = true + network_policy = true + + stub_domains { + "example.com" = [ + "10.254.154.11", + "10.254.154.12", + ] + + "testola.com" = [ + "10.254.154.11", + "10.254.154.12", + ] + } + + non_masquerade_cidrs = [ + "10.0.0.0/8", + "192.168.20.0/24", + "192.168.21.0/24", + ] + + node_pools = [ + { + name = "pool-01" + machine_type = "n1-standard-1" + image_type = "COS" + initial_node_count = 2 + min_count = 1 + max_count = 2 + auto_upgrade = false + disk_size_gb = 30 + disk_type = "pd-standard" + service_account = "$NODE_POOL_SERVICE_ACCOUNT" + }, + ] + node_pools_labels = { + all = { + all_pools_label = "something" + } + + pool-01 = { + pool_01_label = "yes" + pool_01_another_label = "no" + } + } + node_pools_taints = { + all = [ + { + key = "all_pools_taint" + value = "true" + effect = "PREFER_NO_SCHEDULE" + }, + ] + + pool-01 = [ + { + key = "pool_01_taint" + value = "true" + effect = "PREFER_NO_SCHEDULE" + }, + { + key = "pool_01_another_taint" + value = "true" + effect = "PREFER_NO_SCHEDULE" + }, + ] + } + node_pools_tags = { + all = [ + "all-node-network-tag", + ] + + pool-01 = [ + "pool-01-network-tag", + ] + } +} + +resource "kubernetes_pod" "nginx-example" { + metadata { + name = "nginx-example" + + labels { + maintained_by = "terraform" + app = "nginx-example" + } + } + + spec { + container { + image = "nginx:1.7.9" + name = "nginx-example" + } + } + + depends_on = ["module.gke"] +} + +resource "kubernetes_service" "nginx-example" { + metadata { + name = "terraform-example" + } + + spec { + selector { + app = "\${kubernetes_pod.nginx-example.metadata.0.labels.app}" + } + + session_affinity = "ClientIP" + + port { + port = 8080 + target_port = 80 + } + + type = "LoadBalancer" + } + + depends_on = ["module.gke"] +} + +EOF +} + +# Creates the outputs.tf file +function create_outputs_file() { + echo "Creating outputs.tf file" + cat <<'EOF' > outputs.tf +output "name_example" { + value = "${module.gke.name}" +} + +output "type_example" { + value = "${module.gke.type}" +} + +output "location_example" { + value = "${module.gke.location}" +} + +output "region_example" { + value = "${module.gke.region}" +} + +output "zones_example" { + value = "${module.gke.zones}" +} + +output "endpoint_example" { + sensitive = true + value = "${module.gke.endpoint}" +} + +output "ca_certificate_example" { + sensitive = true + value = "${module.gke.ca_certificate}" +} + +output "min_master_version_example" { + value = "${module.gke.min_master_version}" +} + +output "master_version_example" { + value = "${module.gke.master_version}" +} + +output "network_policy_example" { + value = "${module.gke.network_policy_enabled}" +} + +output "http_load_balancing_example" { + value = "${module.gke.http_load_balancing_enabled}" +} + +output "horizontal_pod_autoscaling_example" { + value = "${module.gke.horizontal_pod_autoscaling_enabled}" +} + +output "kubernetes_dashboard_example" { + value = "${module.gke.kubernetes_dashboard_enabled}" +} + +output "node_pools_names_example" { + value = "${module.gke.node_pools_names}" +} + +output "node_pools_versions_example" { + value = "${module.gke.node_pools_versions}" +} + +# For use in integration tests +output "module_path" { + value = "${path.module}/../../../../" +} + +output "client_token" { + sensitive = true + value = "${base64encode(data.google_client_config.default.access_token)}" +} + +EOF +} + +# Install gems +function bundle_install() { + bundle install +} + +# Execute kitchen tests +function run_kitchen() { + kitchen create + kitchen converge + kitchen converge # second time to enable network policy + kitchen verify + kitchen destroy +} + +# Preparing environment +make_testdir + +cd "${TEMPDIR}/${CLUSTER_TYPE}/" || exit +activate_config +export_vars zonal +create_main_tf_file +create_outputs_file +bundle_install +run_kitchen + +# # # Clean the environment +cd - || exit +clean_workdir +echo "Integration test finished" diff --git a/test/integration/gcloud/sample.sh b/test/integration/gcloud/sample.sh new file mode 100644 index 0000000000..d357bf8d32 --- /dev/null +++ b/test/integration/gcloud/sample.sh @@ -0,0 +1,44 @@ +#!/bin/bash +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +################################################################# +# PLEASE FILL THE VARIABLES WITH VALID VALUES FOR TESTING # +# DO NOT REMOVE ANY OF THE VARIABLES # +################################################################# + +## These values you *MUST* modify to match your environment +export PROJECT_ID="gke-test-integration" +export CREDENTIALS_PATH="$HOME/sa-key.json" +export NETWORK="vpc-01" +export SUBNETWORK="us-east4-01" +export REGIONAL_IP_RANGE_PODS="us-east4-01-gke-01-pod" +export REGIONAL_IP_RANGE_SERVICES="us-east4-01-gke-01-service" +export ZONAL_IP_RANGE_PODS="us-east4-01-gke-02-pod" +export ZONAL_IP_RANGE_SERVICES="us-east4-01-gke-02-service" + +## These values you can potentially leave at the defaults +export CLUSTER_NAME="int-test-cluster-01" +export REGION="us-east4" +export ZONE="us-east4-a" +export ADDITIONAL_ZONES='"us-east4-b","us-east4-c"' +export ZONES="\"$ZONE\",$ADDITIONAL_ZONES" +export KUBERNETES_VERSION="1.10.6-gke.2" +export NODE_POOL_SERVICE_ACCOUNT="" +export REGIONAL_CLUSTER_NAME="int-test-regional-01" +export REGIONAL_LOCATION="$REGION" +export ZONAL_CLUSTER_NAME="int-test-zonal-01" +export ZONAL_LOCATION="$ZONE" +export CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE=$CREDENTIALS_PATH diff --git a/test/integration/gcloud/test/integration/default/inspec/gcloud.rb b/test/integration/gcloud/test/integration/default/inspec/gcloud.rb new file mode 100644 index 0000000000..b88743cb7c --- /dev/null +++ b/test/integration/gcloud/test/integration/default/inspec/gcloud.rb @@ -0,0 +1,127 @@ +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Test the cluster is in running status +describe command('gcloud --project=${PROJECT_ID} container clusters --zone=${CLUSTER_LOCATION} describe ${CLUSTER_NAME} --format=json| jq -cre \'.status\'') do + its('exit_status') { should eq 0 } + its('stdout.strip') { should eq 'RUNNING' } +end + +# Test the cluster has the expected initial cluster version +describe command('gcloud --project=${PROJECT_ID} container clusters --zone=${CLUSTER_LOCATION} describe ${CLUSTER_NAME} --format=json | jq -cre \'.initialClusterVersion\'') do + its('exit_status') { should eq 0 } + its('stdout.strip') { should eq ENV['KUBERNETES_VERSION'] } +end + +# Test the cluster is in the expected network +describe command('gcloud --project=${PROJECT_ID} container clusters --zone=${CLUSTER_LOCATION} describe ${CLUSTER_NAME} --format=json | jq -cre \'.network\'') do + its('exit_status') { should eq 0 } + its('stdout.strip') { should eq ENV['NETWORK'] } +end + +# Test the cluster is in the expected subnetwork +describe command('gcloud --project=${PROJECT_ID} container clusters --zone=${CLUSTER_LOCATION} describe ${CLUSTER_NAME} --format=json | jq -cre \'.subnetwork\'') do + its('exit_status') { should eq 0 } + its('stdout.strip') { should eq ENV['SUBNETWORK'] } +end + +# Test the cluster has the expected secondary ip range for pods +describe command('gcloud --project=${PROJECT_ID} container clusters --zone=${CLUSTER_LOCATION} describe ${CLUSTER_NAME} --format=json | jq -cre \'.ipAllocationPolicy.clusterSecondaryRangeName\'') do + its('exit_status') { should eq 0 } + its('stdout.strip') { should eq ENV['IP_RANGE_PODS'] } +end + +# Test the cluster has the expected secondary ip range for services +describe command('gcloud --project=${PROJECT_ID} container clusters --zone=${CLUSTER_LOCATION} describe ${CLUSTER_NAME} --format=json | jq -cre \'.ipAllocationPolicy.servicesSecondaryRangeName\'') do + its('exit_status') { should eq 0 } + its('stdout.strip') { should eq ENV['IP_RANGE_SERVICES'] } +end + +# Test the cluster has the expected addon settings +describe command('gcloud --project=${PROJECT_ID} container clusters --zone=${CLUSTER_LOCATION} describe ${CLUSTER_NAME} --format=json | jq -cre \'.addonsConfig\'') do + its('exit_status') { should eq 0 } + its('stdout.strip') { should eq '{"horizontalPodAutoscaling":{},"httpLoadBalancing":{"disabled":true},"kubernetesDashboard":{},"networkPolicyConfig":{}}' } +end + +# Test default pool has no initial node count +describe command('gcloud --project=${PROJECT_ID} container clusters --zone=${CLUSTER_LOCATION} describe ${CLUSTER_NAME} --format=json | jq -cre \'.nodePools[] | select(.name == "default-pool") | .initialNodeCount\'') do + its('exit_status') { should eq 1 } + its('stdout.strip') { should eq 'null' } +end + +# Test default pool has not auto scaling enabled +describe command('gcloud --project=${PROJECT_ID} container clusters --zone=${CLUSTER_LOCATION} describe ${CLUSTER_NAME} --format=json | jq -cre \'.nodePools[] | select(.name == "default-pool") | .autoscaling.enabled\'') do + its('exit_status') { should eq 1 } + its('stdout.strip') { should eq 'null' } +end + +# Test pool-01 is expected version +describe command('gcloud --project=${PROJECT_ID} container clusters --zone=${CLUSTER_LOCATION} describe ${CLUSTER_NAME} --format=json | jq -cre \'.nodePools[] | select(.name == "pool-01") | .version\'') do + its('exit_status') { should eq 0 } + its('stdout.strip') { should eq ENV['KUBERNETES_VERSION'] } +end + +# Test pool-01 has auto scaling enabled +describe command('gcloud --project=${PROJECT_ID} container clusters --zone=${CLUSTER_LOCATION} describe ${CLUSTER_NAME} --format=json | jq -cre \'.nodePools[] | select(.name == "pool-01") | .autoscaling.enabled\'') do + its('exit_status') { should eq 0 } + its('stdout.strip') { should eq 'true' } +end + +# Test pool-01 has expected min node count +describe command('gcloud --project=${PROJECT_ID} container clusters --zone=${CLUSTER_LOCATION} describe ${CLUSTER_NAME} --format=json | jq -cre \'.nodePools[] | select(.name == "pool-01") | .autoscaling.minNodeCount\'') do + its('exit_status') { should eq 0 } + its('stdout.strip') { should eq '1' } +end + +# Test pool-01 has expected max node count +describe command('gcloud --project=${PROJECT_ID} container clusters --zone=${CLUSTER_LOCATION} describe ${CLUSTER_NAME} --format=json | jq -cre \'.nodePools[] | select(.name == "pool-01") | .autoscaling.maxNodeCount\'') do + its('exit_status') { should eq 0 } + its('stdout.strip') { should eq '2' } +end + +# Test pool-01 is expected machine type +describe command('gcloud --project=${PROJECT_ID} container clusters --zone=${CLUSTER_LOCATION} describe ${CLUSTER_NAME} --format=json | jq -cre \'.nodePools[] | select(.name == "pool-01") | .config.machineType\'') do + its('exit_status') { should eq 0 } + its('stdout.strip') { should eq 'n1-standard-1' } +end + +# Test pool-01 has expected disk size +describe command('gcloud --project=${PROJECT_ID} container clusters --zone=${CLUSTER_LOCATION} describe ${CLUSTER_NAME} --format=json | jq -cre \'.nodePools[] | select(.name == "pool-01") | .config.diskSizeGb\'') do + its('exit_status') { should eq 0 } + its('stdout.strip') { should eq '30' } +end + +# Test pool-01 has expected labels +describe command('gcloud --project=${PROJECT_ID} container clusters --zone=${CLUSTER_LOCATION} describe ${CLUSTER_NAME} --format=json | jq -cre \'.nodePools[] | select(.name == "pool-01") | .config.labels\'') do + its('exit_status') { should eq 0 } + its('stdout.strip') { should eq '{"all_pools_label":"something","cluster_name":"' + ENV['CLUSTER_NAME'] + '","node_pool":"pool-01","pool_01_another_label":"no","pool_01_label":"yes"}' } +end + +# Test pool-01 has expected network tags +describe command('gcloud --project=${PROJECT_ID} container clusters --zone=${CLUSTER_LOCATION} describe ${CLUSTER_NAME} --format=json | jq -cre \'.nodePools[] | select(.name == "pool-01") | .config.tags\'') do + its('exit_status') { should eq 0 } + its('stdout.strip') { should eq '["gke-' + ENV['CLUSTER_NAME'] + '","gke-' + ENV['CLUSTER_NAME'] + '-pool-01","all-node-network-tag","pool-01-network-tag"]' } +end + +# Test pool-01 has auto repair enabled +describe command('gcloud --project=${PROJECT_ID} container clusters --zone=${CLUSTER_LOCATION} describe ${CLUSTER_NAME} --format=json | jq -cre \'.nodePools[] | select(.name == "pool-01") | .management.autoRepair\'') do + its('exit_status') { should eq 0 } + its('stdout.strip') { should eq 'true' } +end + +# Test pool-01 has auto upgrade disabled +describe command('gcloud --project=${PROJECT_ID} container clusters --zone=${CLUSTER_LOCATION} describe ${CLUSTER_NAME} --format=json | jq -cre \'.nodePools[] | select(.name == "pool-01") | .management.autoUpgrade\'') do + its('exit_status') { should eq 1 } + its('stdout.strip') { should eq 'null' } +end diff --git a/test/integration/gcloud/test/integration/default/inspec/kubectl.rb b/test/integration/gcloud/test/integration/default/inspec/kubectl.rb new file mode 100644 index 0000000000..ed4e077b70 --- /dev/null +++ b/test/integration/gcloud/test/integration/default/inspec/kubectl.rb @@ -0,0 +1,38 @@ +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Test pool-01 has expected taints +describe command('$(terraform output module_path)/scripts/kubectl_wrapper.sh https://$(terraform output endpoint_example) $(terraform output client_token | base64 --decode) $(terraform output ca_certificate_example) kubectl get nodes -o json -l node_pool=pool-01 | jq -cre \'.items[0].spec.taints\'') do + its('exit_status') { should eq 0 } + its('stdout.strip') { should eq '[{"effect":"PreferNoSchedule","key":"all_pools_taint","value":"true"},{"effect":"PreferNoSchedule","key":"pool_01_taint","value":"true"},{"effect":"PreferNoSchedule","key":"pool_01_another_taint","value":"true"}]' } +end + +# Test kube dns configmap created" { +describe command('$(terraform output module_path)/scripts/kubectl_wrapper.sh https://$(terraform output endpoint_example) $(terraform output client_token | base64 --decode) $(terraform output ca_certificate_example) kubectl -n kube-system get configmap -o json kube-dns | jq -cre \'.metadata.labels.maintained_by\'') do + its('exit_status') { should eq 0 } + its('stdout.strip') { should eq 'terraform' } +end + +# Test ip masq agent configmap created" { +describe command('$(terraform output module_path)/scripts/kubectl_wrapper.sh https://$(terraform output endpoint_example) $(terraform output client_token | base64 --decode) $(terraform output ca_certificate_example) kubectl -n kube-system get configmap -o json ip-masq-agent | jq -cre \'.metadata.labels.maintained_by\'') do + its('exit_status') { should eq 0 } + its('stdout.strip') { should eq 'terraform' } +end + +# Test that the nginx example service is reachable" { +describe command('curl -Ifs -m 10 $($(terraform output module_path)/scripts/kubectl_wrapper.sh https://$(terraform output endpoint_example) $(terraform output client_token | base64 --decode) $(terraform output ca_certificate_example) kubectl get service terraform-example -o json | jq -cre \'.status.loadBalancer.ingress[0].ip\'):8080') do + its('exit_status') { should eq 0 } + its('stdout.strip') { should include 'HTTP/1.1 200 OK' } + its('stdout.strip') { should include 'Server: nginx' } +end diff --git a/test/integration/gcloud/test/integration/default/inspec/terraform.rb b/test/integration/gcloud/test/integration/default/inspec/terraform.rb new file mode 100644 index 0000000000..03f1781c27 --- /dev/null +++ b/test/integration/gcloud/test/integration/default/inspec/terraform.rb @@ -0,0 +1,88 @@ +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Test the name output +describe command('terraform output name_example') do + its('stdout.strip') { should eq ENV['CLUSTER_NAME'] } +end + +# Test the location output +describe command('terraform output type_example') do + its('stdout.strip') { should eq ENV['CLUSTER_TYPE'] } +end + +# Test the location output +describe command('terraform output location_example') do + its('stdout.strip') { should eq ENV['CLUSTER_LOCATION'] } +end + +# Test the region output +describe command('terraform output region_example') do + its('stdout.strip') { should eq ENV['REGION'] } +end + +# Test the zones output +describe command('terraform output -json zones_example | jq -cre \'.value\'') do + its('stdout.strip') { should eq '[' + ENV['ZONES'] + ']' } +end + +# Test the endpoint output +describe command('terraform output endpoint_example') do + its('stdout.strip') { should_not eq '' } +end + +# Test the ca_certificate output +describe command('terraform output ca_certificate_example') do + its('stdout.strip') { should_not eq '' } +end + +# Test the min_master_version output +describe command('terraform output min_master_version_example') do + its('stdout.strip') { should eq ENV['KUBERNETES_VERSION'] } +end + +# Test the master_version output +describe command('terraform output master_version_example') do + its('stdout.strip') { should eq ENV['KUBERNETES_VERSION'] } +end + +# Test the network_policy output +describe command('terraform output network_policy_example') do + its('stdout.strip') { should eq 'true' } +end + +# Test the http_load_balancing_enabled output +describe command('terraform output http_load_balancing_example') do + its('stdout.strip') { should eq 'false' } +end + +# Test the horizontal_pod_autoscaling_enabled output +describe command('terraform output horizontal_pod_autoscaling_example') do + its('stdout.strip') { should eq 'true' } +end + +# Test the kubernetes_dashboard_enabled output +describe command('terraform output kubernetes_dashboard_example') do + its('stdout.strip') { should eq 'true' } +end + +# Test the node_pools_names output +describe command('terraform output node_pools_names_example') do + its('stdout.strip') { should eq 'pool-01,' } +end + +# Test the node_pools_versions output +describe command('terraform output node_pools_versions_example') do + its('stdout.strip') { should eq ENV['KUBERNETES_VERSION'] + ',' } +end diff --git a/test/make.sh b/test/make.sh new file mode 100755 index 0000000000..a48cd91bbf --- /dev/null +++ b/test/make.sh @@ -0,0 +1,96 @@ +#!/usr/bin/env bash + +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This function checks to make sure that every +# shebang has a '- e' flag, which causes it +# to exit on error +function check_bash() { +find . -name "*.sh" | while IFS= read -d '' -r file; +do + if [[ "$file" != *"bash -e"* ]]; + then + echo "$file is missing shebang with -e"; + exit 1; + fi; +done; +} + +# This function makes sure that the required files for +# releasing to OSS are present +function basefiles() { + echo "Checking for required files" + test -f LICENSE || echo "Missing LICENSE" + test -f README.md || echo "Missing README.md" +} + +# This function runs the hadolint linter on +# every file named 'Dockerfile' +function docker() { + echo "Running hadolint on Dockerfiles" + find . -name "Dockerfile" -exec hadolint {} \; +} + +# This function runs 'terraform validate' against all +# files ending in '.tf' +function check_terraform() { + echo "Running terraform validate" + #shellcheck disable=SC2156 + find . -name "*.tf" -exec bash -c 'terraform validate --check-variables=false $(dirname "{}")' \; +} + +# This function runs 'go fmt' and 'go vet' on every file +# that ends in '.go' +function golang() { + echo "Running go fmt and go vet" + find . -name "*.go" -exec go fmt {} \; + find . -name "*.go" -exec go vet {} \; +} + +# This function runs the flake8 linter on every file +# ending in '.py' +function check_python() { + echo "Running flake8" + find . -name "*.py" -exec flake8 {} \; +} + +# This function runs the shellcheck linter on every +# file ending in '.sh' +function check_shell() { + echo "Running shellcheck" + find . -name "*.sh" -exec shellcheck -x {} \; +} + +# This function makes sure that there is no trailing whitespace +# in any files in the project. +# There are some exclusions +function check_trailing_whitespace() { + echo "The following lines have trailing whitespace" + grep -r '[[:blank:]]$' --exclude-dir=".terraform" --exclude="*.png" --exclude="*.pyc" --exclude-dir=".git" . + rc=$? + if [ $rc = 0 ]; then + exit 1 + fi +} + +function generate_docs() { + echo "Generating markdown docs with terraform-docs" + TMPFILE=$(mktemp) + for j in `for i in $(find . -type f | grep \.tf$) ; do dirname $i ; done | sort -u` ; do + terraform-docs markdown "$j" > "$TMPFILE" + python helpers/combine_docfiles.py "$j"/README.md "$TMPFILE" + done + rm -f "$TMPFILE" +} diff --git a/test/test_verify_boilerplate.py b/test/test_verify_boilerplate.py new file mode 100755 index 0000000000..22a3cca055 --- /dev/null +++ b/test/test_verify_boilerplate.py @@ -0,0 +1,136 @@ +#!/usr/bin/env python3 + +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +''' A simple test for the verify_boilerplate python script. +This will create a set of test files, both valid and invalid, +and confirm that the has_valid_header call returns the correct +value. + +It also checks the number of files that are found by the +get_files call. +''' +from copy import deepcopy +from tempfile import mkdtemp +from shutil import rmtree +import unittest +from verify_boilerplate import has_valid_header, get_refs, get_regexs, \ + get_args, get_files + + +class AllTestCase(unittest.TestCase): + """ + All of the setup, teardown, and tests are contained in this + class. + """ + + def write_file(self, filename, content, expected): + """ + A utility method that creates test files, and adds them to + the cases that will be tested. + + Args: + filename: (string) the file name (path) to be created. + content: (list of strings) the contents of the file. + expected: (boolean) True if the header is expected to be valid, + false if not. + """ + + file = open(filename, 'w+') + for line in content: + file.write(line + "\n") + file.close() + self.cases[filename] = expected + + def create_test_files(self, tmp_path, extension, header): + """ + Creates 2 test files for .tf, .xml, .go, etc and one for + Dockerfile, and Makefile. + + The reason for the difference is that Makefile and Dockerfile + don't have an extension. These would be substantially more + difficult to create negative test cases, unless the files + were written, deleted, and re-written. + + Args: + tmp_path: (string) the path in which to create the files + extension: (string) the file extension + header: (list of strings) the header/boilerplate content + """ + + content = "\n...blah \ncould be code or could be garbage\n" + special_cases = ["Dockerfile", "Makefile"] + header_template = deepcopy(header) + valid_filename = tmp_path + extension + valid_content = header_template.append(content) + if extension not in special_cases: + # Invalid test cases for non-*file files (.tf|.py|.sh|.yaml|.xml..) + invalid_header = [] + for line in header_template: + if "2018" in line: + invalid_header.append(line.replace('2018', 'YEAR')) + else: + invalid_header.append(line) + invalid_header.append(content) + invalid_content = invalid_header + invalid_filename = tmp_path + "invalid." + extension + self.write_file(invalid_filename, invalid_content, False) + valid_filename = tmp_path + "testfile." + extension + + valid_content = header_template + self.write_file(valid_filename, valid_content, True) + + def setUp(self): + """ + Set initial counts and values, and initializes the setup of the + test files. + """ + self.cases = {} + self.tmp_path = mkdtemp() + "/" + self.my_args = get_args() + self.my_refs = get_refs(self.my_args) + self.my_regex = get_regexs() + self.prexisting_file_count = len( + get_files(self.my_refs.keys(), self.my_args)) + for key in self.my_refs: + self.create_test_files(self.tmp_path, key, + self.my_refs.get(key)) + + def tearDown(self): + """ Delete the test directory. """ + rmtree(self.tmp_path) + + def test_files_headers(self): + """ + Confirms that the expected output of has_valid_header is correct. + """ + for case in self.cases: + if self.cases[case]: + self.assertTrue(has_valid_header(case, self.my_refs, + self.my_regex)) + else: + self.assertFalse(has_valid_header(case, self.my_refs, + self.my_regex)) + + def test_invalid_count(self): + """ + Test that the initial files found isn't zero, indicating + a problem with the code. + """ + self.assertFalse(self.prexisting_file_count == 0) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/verify_boilerplate.py b/test/verify_boilerplate.py new file mode 100644 index 0000000000..a632fdedcc --- /dev/null +++ b/test/verify_boilerplate.py @@ -0,0 +1,279 @@ +#!/usr/bin/env python + +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# Verifies that all source files contain the necessary copyright boilerplate +# snippet. +# This is based on existing work +# https://github.com/kubernetes/test-infra/blob/master/hack +# /verify_boilerplate.py +from __future__ import print_function +import argparse +import glob +import os +import re +import sys + + +def get_args(): + """Parses command line arguments. + + Configures and runs argparse.ArgumentParser to extract command line + arguments. + + Returns: + An argparse.Namespace containing the arguments parsed from the + command line + """ + parser = argparse.ArgumentParser() + parser.add_argument("filenames", + help="list of files to check, " + "all files if unspecified", + nargs='*') + rootdir = os.path.dirname(__file__) + "/../" + rootdir = os.path.abspath(rootdir) + parser.add_argument( + "--rootdir", + default=rootdir, + help="root directory to examine") + + default_boilerplate_dir = os.path.join(rootdir, "test/boilerplate") + parser.add_argument("--boilerplate-dir", default=default_boilerplate_dir) + return parser.parse_args() + + +def get_refs(ARGS): + """Converts the directory of boilerplate files into a map keyed by file + extension. + + Reads each boilerplate file's contents into an array, then adds that array + to a map keyed by the file extension. + + Returns: + A map of boilerplate lines, keyed by file extension. For example, + boilerplate.py.txt would result in the k,v pair {".py": py_lines} where + py_lines is an array containing each line of the file. + """ + refs = {} + + # Find and iterate over the absolute path for each boilerplate template + for path in glob.glob(os.path.join( + ARGS.boilerplate_dir, + "boilerplate.*.txt")): + extension = os.path.basename(path).split(".")[1] + ref_file = open(path, 'r') + ref = ref_file.read().splitlines() + ref_file.close() + refs[extension] = ref + return refs + + +# pylint: disable=too-many-locals +def has_valid_header(filename, refs, regexs): + """Test whether a file has the correct boilerplate header. + + Tests each file against the boilerplate stored in refs for that file type + (based on extension), or by the entire filename (eg Dockerfile, Makefile). + Some heuristics are applied to remove build tags and shebangs, but little + variance in header formatting is tolerated. + + Args: + filename: A string containing the name of the file to test + refs: A map of boilerplate headers, keyed by file extension + regexs: a map of compiled regex objects used in verifying boilerplate + + Returns: + True if the file has the correct boilerplate header, otherwise returns + False. + """ + try: + with open(filename, 'r') as fp: # pylint: disable=invalid-name + data = fp.read() + except IOError: + return False + basename = os.path.basename(filename) + extension = get_file_extension(filename) + if extension: + ref = refs[extension] + else: + ref = refs[basename] + # remove build tags from the top of Go files + if extension == "go": + con = regexs["go_build_constraints"] + (data, found) = con.subn("", data, 1) + # remove shebang + elif extension == "sh" or extension == "py": + she = regexs["shebang"] + (data, found) = she.subn("", data, 1) + data = data.splitlines() + # if our test file is smaller than the reference it surely fails! + if len(ref) > len(data): + return False + # trim our file to the same number of lines as the reference file + data = data[:len(ref)] + year = regexs["year"] + for datum in data: + if year.search(datum): + return False + + # if we don't match the reference at this point, fail + if ref != data: + return False + return True + + +def get_file_extension(filename): + """Extracts the extension part of a filename. + + Identifies the extension as everything after the last period in filename. + + Args: + filename: string containing the filename + + Returns: + A string containing the extension in lowercase + """ + return os.path.splitext(filename)[1].split(".")[-1].lower() + + +# These directories will be omitted from header checks +SKIPPED_DIRS = [ + 'Godeps', 'third_party', '_gopath', '_output', + '.git', 'vendor', '__init__.py', 'node_modules' +] + + +def normalize_files(files): + """Extracts the files that require boilerplate checking from the files + argument. + + A new list will be built. Each path from the original files argument will + be added unless it is within one of SKIPPED_DIRS. All relative paths will + be converted to absolute paths by prepending the root_dir path parsed from + the command line, or its default value. + + Args: + files: a list of file path strings + + Returns: + A modified copy of the files list where any any path in a skipped + directory is removed, and all paths have been made absolute. + """ + newfiles = [] + for pathname in files: + if any(x in pathname for x in SKIPPED_DIRS): + continue + newfiles.append(pathname) + for idx, pathname in enumerate(newfiles): + if not os.path.isabs(pathname): + newfiles[idx] = os.path.join(ARGS.rootdir, pathname) + return newfiles + + +def get_files(extensions, ARGS): + """Generates a list of paths whose boilerplate should be verified. + + If a list of file names has been provided on the command line, it will be + treated as the initial set to search. Otherwise, all paths within rootdir + will be discovered and used as the initial set. + + Once the initial set of files is identified, it is normalized via + normalize_files() and further stripped of any file name whose extension is + not in extensions. + + Args: + extensions: a list of file extensions indicating which file types + should have their boilerplate verified + + Returns: + A list of absolute file paths + """ + files = [] + if ARGS.filenames: + files = ARGS.filenames + else: + for root, dirs, walkfiles in os.walk(ARGS.rootdir): + # don't visit certain dirs. This is just a performance improvement + # as we would prune these later in normalize_files(). But doing it + # cuts down the amount of filesystem walking we do and cuts down + # the size of the file list + for dpath in SKIPPED_DIRS: + if dpath in dirs: + dirs.remove(dpath) + for name in walkfiles: + pathname = os.path.join(root, name) + files.append(pathname) + files = normalize_files(files) + outfiles = [] + for pathname in files: + basename = os.path.basename(pathname) + extension = get_file_extension(pathname) + if extension in extensions or basename in extensions: + outfiles.append(pathname) + return outfiles + + +def get_regexs(): + """Builds a map of regular expressions used in boilerplate validation. + + There are two scenarios where these regexes are used. The first is in + validating the date referenced is the boilerplate, by ensuring it is an + acceptable year. The second is in identifying non-boilerplate elements, + like shebangs and compiler hints that should be ignored when validating + headers. + + Returns: + A map of compiled regular expression objects, keyed by mnemonic. + """ + regexs = {} + # Search for "YEAR" which exists in the boilerplate, but shouldn't in the + # real thing + regexs["year"] = re.compile('YEAR') + # dates can be 2014, 2015, 2016 or 2017, company holder names can be + # anything + regexs["date"] = re.compile('(2014|2015|2016|2017|2018)') + # strip // +build \n\n build constraints + regexs["go_build_constraints"] = re.compile(r"^(// \+build.*\n)+\n", + re.MULTILINE) + # strip #!.* from shell/python scripts + regexs["shebang"] = re.compile(r"^(#!.*\n)\n*", re.MULTILINE) + return regexs + + +def main(args): + """Identifies and verifies files that should have the desired boilerplate. + + Retrieves the lists of files to be validated and tests each one in turn. + If all files contain correct boilerplate, this function terminates + normally. Otherwise it prints the name of each non-conforming file and + exists with a non-zero status code. + """ + regexs = get_regexs() + refs = get_refs(args) + filenames = get_files(refs.keys(), args) + nonconforming_files = [] + for filename in filenames: + if not has_valid_header(filename, refs, regexs): + nonconforming_files.append(filename) + if nonconforming_files: + print('%d files have incorrect boilerplate headers:' % len( + nonconforming_files)) + for filename in sorted(nonconforming_files): + print(os.path.relpath(filename, args.rootdir)) + sys.exit(1) + + +if __name__ == "__main__": + ARGS = get_args() + main(ARGS) diff --git a/variables.tf b/variables.tf new file mode 100644 index 0000000000..f6e9b1f192 --- /dev/null +++ b/variables.tf @@ -0,0 +1,162 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +variable "project_id" { + description = "The project ID to host the cluster in (required)" +} + +variable "name" { + description = "The name of the cluster (required)" +} + +variable "description" { + description = "The description of the cluster" + default = "" +} + +variable "regional" { + description = "Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!)" + default = true +} + +variable "region" { + description = "The region to host the cluster in (required)" +} + +variable "zones" { + type = "list" + description = "The zones to host the cluster in (optional if regional cluster / required if zonal)" + default = [] +} + +variable "network" { + description = "The VPC network to host the cluster in (required)" +} + +variable "network_project_id" { + description = "The project ID of the shared VPC's host (for shared vpc support)" + default = "" +} + +variable "subnetwork" { + description = "The subnetwork to host the cluster in (required)" +} + +variable "kubernetes_version" { + description = "The Kubernetes version of the masters. If set to 'latest' it will pull latest available version in the selected region." + default = "1.10.6-gke.2" +} + +variable "node_version" { + description = "The Kubernetes version of the node pools. Defaults kubernetes_version (master) variable and can be overridden for individual node pools by setting the `version` key on them. Must be empyty or set the same as master at cluster creation." + default = "" +} + +variable "horizontal_pod_autoscaling" { + description = "Enable horizontal pod autoscaling addon" + default = false +} + +variable "http_load_balancing" { + description = "Enable httpload balancer addon" + default = true +} + +variable "kubernetes_dashboard" { + description = "Enable kubernetes dashboard addon" + default = false +} + +variable "network_policy" { + description = "Enable network policy addon" + default = false +} + +variable "maintenance_start_time" { + description = "Time window specified for daily maintenance operations in RFC3339 format" + default = "05:00" +} + +variable "ip_range_pods" { + description = "The secondary ip range to use for pods" +} + +variable "ip_range_services" { + description = "The secondary ip range to use for pods" +} + +variable "node_pools" { + type = "list" + description = "List of maps containing node pools" + + default = [ + { + name = "default-node-pool" + }, + ] +} + +variable "node_pools_labels" { + type = "map" + description = "Map of maps containing node labels by node-pool name" + + default = { + all = {} + default-node-pool = {} + } +} + +variable "node_pools_taints" { + type = "map" + description = "Map of lists containing node taints by node-pool name" + + default = { + all = [] + default-node-pool = [] + } +} + +variable "node_pools_tags" { + type = "map" + description = "Map of lists containing node network tags by node-pool name" + + default = { + all = [] + default-node-pool = [] + } +} + +variable "stub_domains" { + type = "map" + description = "Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server" + default = {} +} + +variable "non_masquerade_cidrs" { + type = "list" + description = "List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading." + default = ["10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16"] +} + +variable "ip_masq_resync_interval" { + description = "The interval at which the agent attempts to sync its ConfigMap file from the disk." + default = "60s" +} + +variable "ip_masq_link_local" { + description = "Whether to masquerade traffic to the link-local prefix (169.254.0.0/16)." + default = "false" +}