From 95781f5bc2cd1699966a22fa651bb1bbc8d29c16 Mon Sep 17 00:00:00 2001 From: Ihor Pukha Date: Thu, 1 Aug 2019 19:33:37 +0300 Subject: [PATCH] Add support for Terraform 0.12 #52 --- CHANGELOG.md | 4 +- Gemfile | 2 +- Makefile | 20 +- README.md | 13 +- examples/mysql-and-postgres/main.tf | 137 ++-- examples/mysql-and-postgres/outputs.tf | 45 ++ examples/mysql-and-postgres/variables.tf | 49 ++ examples/mysql-and-postgres/versions.tf | 19 + helpers/combine_docfiles.py | 47 -- helpers/terraform_docs | 694 ++++++++++++++++++ helpers/terraform_validate | 23 + kitchen.yml | 14 + main.tf | 128 +++- modules/mysql/failover_replica.tf | 107 ++- modules/mysql/main.tf | 146 ++-- modules/mysql/outputs.tf | 43 +- modules/mysql/read_replica.tf | 125 +++- modules/mysql/variables.tf | 142 +++- modules/mysql/versions.tf | 19 + modules/postgresql/main.tf | 144 ++-- modules/postgresql/outputs.tf | 31 +- modules/postgresql/read_replica.tf | 129 ++-- modules/postgresql/variables.tf | 105 ++- modules/postgresql/versions.tf | 19 + modules/private_service_access/main.tf | 27 +- modules/private_service_access/outputs.tf | 9 +- modules/private_service_access/variables.tf | 9 +- modules/private_service_access/versions.tf | 19 + modules/safer_mysql/main.tf | 183 ++--- modules/safer_mysql/outputs.tf | 29 +- modules/safer_mysql/variables.tf | 103 ++- modules/safer_mysql/versions.tf | 19 + outputs.tf | 27 +- test/boilerplate/boilerplate.Makefile.txt | 2 +- test/boilerplate/boilerplate.go.txt | 2 +- test/boilerplate/boilerplate.py.txt | 2 +- test/boilerplate/boilerplate.rb.txt | 2 +- test/boilerplate/boilerplate.sh.txt | 2 +- test/boilerplate/boilerplate.tf.txt | 2 +- test/boilerplate/boilerplate.xml.txt | 2 +- test/boilerplate/boilerplate.yaml.txt | 2 +- test/boilerplate/boilerplate.yml.txt | 2 +- test/ci_integration.sh | 2 +- test/fixtures/mysql-ha/main.tf | 93 +-- test/fixtures/mysql-ha/outputs.tf | 9 +- test/fixtures/mysql-ha/variables.tf | 6 +- test/fixtures/mysql-ha/versions.tf | 19 + test/fixtures/mysql-simple/main.tf | 10 +- test/fixtures/mysql-simple/outputs.tf | 9 +- test/fixtures/mysql-simple/variables.tf | 6 +- test/fixtures/mysql-simple/versions.tf | 19 + test/fixtures/postgresql-ha/main.tf | 58 +- test/fixtures/postgresql-ha/outputs.tf | 9 +- test/fixtures/postgresql-ha/variables.tf | 7 +- test/fixtures/postgresql-ha/versions.tf | 19 + test/fixtures/postgresql-simple/main.tf | 11 +- test/fixtures/postgresql-simple/outputs.tf | 7 +- test/fixtures/postgresql-simple/variables.tf | 5 +- test/fixtures/postgresql-simple/versions.tf | 19 + test/fixtures/private-service-access/main.tf | 17 +- .../private-service-access/outputs.tf | 7 +- .../private-service-access/variables.tf | 4 +- .../private-service-access/versions.tf | 19 + test/fixtures/safer-mysql-simple/main.tf | 31 +- test/fixtures/safer-mysql-simple/outputs.tf | 9 +- test/fixtures/safer-mysql-simple/variables.tf | 5 +- test/fixtures/safer-mysql-simple/versions.tf | 19 + test/fixtures/shared/terraform.tfvars | 1 - test/integration/mysql-ha/controls/mysql.rb | 2 +- test/integration/mysql-ha/inspec.yml | 2 +- .../mysql-simple/controls/mysql.rb | 2 +- test/integration/mysql-simple/inspec.yml | 2 +- test/integration/postgresql-ha/controls/pg.rb | 2 +- test/integration/postgresql-ha/inspec.yml | 2 +- .../postgresql-simple/controls/pg.rb | 2 +- test/integration/postgresql-simple/inspec.yml | 2 +- .../controls/peering.rb | 2 +- .../private-service-access/inspec.yml | 2 +- .../safer-mysql-simple/controls/mysql.rb | 2 +- .../integration/safer-mysql-simple/inspec.yml | 2 +- test/make.sh | 134 ++-- test/test.sh | 23 - test/test_verify_boilerplate.py | 10 +- test/verify_boilerplate.py | 8 +- variables.tf | 133 ++-- versions.tf | 19 + 86 files changed, 2503 insertions(+), 916 deletions(-) create mode 100644 examples/mysql-and-postgres/outputs.tf create mode 100644 examples/mysql-and-postgres/variables.tf create mode 100644 examples/mysql-and-postgres/versions.tf delete mode 100644 helpers/combine_docfiles.py create mode 100755 helpers/terraform_docs create mode 100644 helpers/terraform_validate create mode 100644 modules/mysql/versions.tf create mode 100644 modules/postgresql/versions.tf create mode 100644 modules/private_service_access/versions.tf create mode 100644 modules/safer_mysql/versions.tf create mode 100644 test/fixtures/mysql-ha/versions.tf create mode 100644 test/fixtures/mysql-simple/versions.tf create mode 100644 test/fixtures/postgresql-ha/versions.tf create mode 100644 test/fixtures/postgresql-simple/versions.tf create mode 100644 test/fixtures/private-service-access/versions.tf create mode 100644 test/fixtures/safer-mysql-simple/versions.tf mode change 100644 => 100755 test/make.sh delete mode 100644 test/test.sh mode change 100644 => 100755 test/test_verify_boilerplate.py mode change 100644 => 100755 test/verify_boilerplate.py create mode 100644 versions.tf diff --git a/CHANGELOG.md b/CHANGELOG.md index b0acccc4..267637cb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,9 +7,11 @@ project adheres to [Semantic Versioning](http://semver.org/). ## [Unreleased] -### Added +### Added +- Added support for Terraform 0.12 [53] ### Changed +[53]: https://github.com/GoogleCloudPlatform/terraform-google-sql-db/pull/53 \ No newline at end of file diff --git a/Gemfile b/Gemfile index 794e1877..01a94121 100644 --- a/Gemfile +++ b/Gemfile @@ -1,5 +1,5 @@ source "https://rubygems.org/" -gem "kitchen-terraform", "~> 4.0" +gem "kitchen-terraform", "~> 4.9" gem "test-kitchen" gem 'kitchen-inspec' diff --git a/Makefile b/Makefile index beda9a9b..5b894fd7 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,26 +12,20 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Please note that this file was generated from [terraform-google-module-template](https://github.com/terraform-google-modules/terraform-google-module-template). -# Please make sure to contribute relevant changes upstream! - -# Make will use bash instead of sh SHELL := /usr/bin/env bash -# Docker build config variables -CREDENTIALS_PATH ?= /cft/workdir/credentials.json - - -# Docker build config variables +CREDENTIALS_PATH ?= /cft/workdir/credentials.json DOCKER_ORG := gcr.io/cloud-foundation-cicd -DOCKER_TAG_BASE_KITCHEN_TERRAFORM ?= 1.0.1 +DOCKER_TAG_BASE_KITCHEN_TERRAFORM ?= 2.3.0 DOCKER_REPO_BASE_KITCHEN_TERRAFORM := ${DOCKER_ORG}/cft/kitchen-terraform:${DOCKER_TAG_BASE_KITCHEN_TERRAFORM} # All is the first target in the file so it will get picked up when you just run 'make' on its own +.PHONY: all all: check generate_docs # Run all available linters -check: check_shell check_python check_golang check_terraform check_docker check_base_files test_check_headers check_headers check_trailing_whitespace +.PHONY: check +check: check_shell check_python check_golang check_terraform check_base_files test_check_headers check_headers check_trailing_whitespace # The .PHONY directive tells make that this isn't a real target and so # the presence of a file named 'check_shell' won't cause this target to stop @@ -146,4 +140,4 @@ test_integration_docker: -e GOOGLE_APPLICATION_CREDENTIALS=${CREDENTIALS_PATH} \ -v $(CURDIR):/cft/workdir \ ${DOCKER_REPO_BASE_KITCHEN_TERRAFORM} \ - make test_integration \ No newline at end of file + /bin/bash -c "source test/ci_integration.sh && setup_environment && make test_integration" diff --git a/README.md b/README.md index 40f6d47a..4c8bbafe 100644 --- a/README.md +++ b/README.md @@ -8,12 +8,20 @@ This module consists of the following submodules: See more details in each module's README. +## Compatibility + + This module is meant for use with Terraform 0.12. If you haven't [upgraded](https://www.terraform.io/upgrade-guides/0-12.html) + and need a Terraform 0.11.x-compatible version of this module, the last released version intended for + Terraform 0.11.x is [v1.2.0](https://registry.terraform.io/modules/GoogleCloudPlatform/sql-db/google/1.2.0). + + + ## Requirements ### Installation Dependencies -- [terraform](https://www.terraform.io/downloads.html) 0.11.x -- [terraform-provider-google](https://github.com/terraform-providers/terraform-provider-google) plugin v1.12.x +- [terraform](https://www.terraform.io/downloads.html) 0.12.x +- [terraform-provider-google](https://github.com/terraform-providers/terraform-provider-google) plugin v2.5.x ### Configure a Service Account @@ -119,7 +127,6 @@ Running flake8 Running go fmt and go vet Running terraform validate Running terraform fmt -Running hadolint on Dockerfiles Checking for required files The following lines have trailing whitespace Generating markdown docs with terraform-docs diff --git a/examples/mysql-and-postgres/main.tf b/examples/mysql-and-postgres/main.tf index 069de0bf..c3ba1bd9 100644 --- a/examples/mysql-and-postgres/main.tf +++ b/examples/mysql-and-postgres/main.tf @@ -1,11 +1,11 @@ -/* - * Copyright 2017 Google Inc. +/** + * Copyright 2019 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -14,54 +14,29 @@ * limitations under the License. */ -variable "region" { - default = "us-central1" -} - -variable "network" { - default = "default" -} - -variable "zone" { - default = "us-central1-b" -} - -variable "mysql_version" { - default = "MYSQL_5_6" -} - -variable "postgresql_version" { - default = "POSTGRES_9_6" -} - provider "google" { - region = "${var.region}" + region = var.region } provider "google-beta" { - region = "${var.region}" + region = var.region } -variable "network_name" { - default = "mysql-psql-example" +data "google_client_config" "current" { } -data "google_client_config" "current" {} - -variable "project_id" {} - resource "google_compute_network" "default" { - project = "${var.project_id}" - name = "${var.network_name}" + project = var.project_id + name = var.network_name auto_create_subnetworks = false } resource "google_compute_subnetwork" "default" { - project = "${var.project_id}" - name = "${var.network_name}" + project = var.project_id + name = var.network_name ip_cidr_range = "10.127.0.0/20" - network = "${google_compute_network.default.self_link}" - region = "${var.region}" + network = google_compute_network.default.self_link + region = var.region private_ip_google_access = true } @@ -72,18 +47,22 @@ resource "random_id" "name" { module "mysql-db" { source = "../../modules/mysql" name = "example-mysql-${random_id.name.hex}" - database_version = "${var.mysql_version}" - project_id = "${var.project_id}" + database_version = var.mysql_version + project_id = var.project_id zone = "c" - ip_configuration = [{ - ipv4_enabled = true + ip_configuration = { + ipv4_enabled = true + private_network = null + require_ssl = true + authorized_networks = [ + { + name = var.network_name + value = google_compute_subnetwork.default.ip_cidr_range + }, + ] + } - authorized_networks = [{ - name = "${var.network_name}" - value = "${google_compute_subnetwork.default.ip_cidr_range}" - }] - }] database_flags = [ { @@ -96,72 +75,54 @@ module "mysql-db" { module "postgresql-db" { source = "../../modules/postgresql" name = "example-postgresql-${random_id.name.hex}" - database_version = "${var.postgresql_version}" - project_id = "${var.project_id}" + database_version = var.postgresql_version + project_id = var.project_id zone = "c" - ip_configuration = [{ - ipv4_enabled = true - - authorized_networks = [{ - name = "${var.network_name}" - value = "${google_compute_subnetwork.default.ip_cidr_range}" - }] - }] + ip_configuration = { + ipv4_enabled = true + private_network = null + require_ssl = true + authorized_networks = [ + { + name = var.network_name + value = google_compute_subnetwork.default.ip_cidr_range + }, + ] + } } // We define a connection with the VPC of the Cloud SQL instance. module "private-service-access" { source = "../../modules/private_service_access" - project_id = "${var.project_id}" - vpc_network = "${google_compute_network.default.name}" + project_id = var.project_id + vpc_network = google_compute_network.default.name } module "safer-mysql-db" { source = "../../modules/safer_mysql" name = "example-safer-mysql-${random_id.name.hex}" - database_version = "${var.mysql_version}" - project_id = "${var.project_id}" - region = "${var.region}" + database_version = var.mysql_version + project_id = var.project_id + region = var.region zone = "c" # By default, all users will be permitted to connect only via the # Cloud SQL proxy. - additional_users = [{ - name = "app" - }, + additional_users = [ + { + name = "app" + }, { name = "readonly" }, ] assign_public_ip = true - vpc_network = "${google_compute_network.default.self_link}" + vpc_network = google_compute_network.default.self_link // Used to enforce ordering in the creation of resources. - peering_completed = "${module.private-service-access.peering_completed}" -} - -output "mysql_conn" { - value = "${module.mysql-db.instance_connection_name}" -} - -output "mysql_user_pass" { - value = "${module.mysql-db.generated_user_password}" + peering_completed = module.private-service-access.peering_completed } -output "psql_conn" { - value = "${module.postgresql-db.instance_connection_name}" -} - -output "psql_user_pass" { - value = "${module.postgresql-db.generated_user_password}" -} -output "safer_mysql_conn" { - value = "${module.safer-mysql-db.instance_connection_name}" -} - -output "safer_mysql_user_pass" { - value = "${module.safer-mysql-db.generated_user_password}" -} diff --git a/examples/mysql-and-postgres/outputs.tf b/examples/mysql-and-postgres/outputs.tf new file mode 100644 index 00000000..451e6a37 --- /dev/null +++ b/examples/mysql-and-postgres/outputs.tf @@ -0,0 +1,45 @@ +/** + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +output "mysql_conn" { + value = module.mysql-db.instance_connection_name + description = "The connection name of the master instance to be used in connection strings" +} + +output "mysql_user_pass" { + value = module.mysql-db.generated_user_password + description = "The password for the default user. If not set, a random one will be generated and available in the generated_user_password output variable." +} + +output "psql_conn" { + value = module.postgresql-db.instance_connection_name + description = "The connection name of the master instance to be used in connection strings" +} + +output "psql_user_pass" { + value = module.postgresql-db.generated_user_password + description = "The password for the default user. If not set, a random one will be generated and available in the generated_user_password output variable." +} + +output "safer_mysql_conn" { + value = module.safer-mysql-db.instance_connection_name + description = "The connection name of the master instance to be used in connection strings" +} + +output "safer_mysql_user_pass" { + value = module.safer-mysql-db.generated_user_password + description = "The password for the default user. If not set, a random one will be generated and available in the generated_user_password output variable." +} diff --git a/examples/mysql-and-postgres/variables.tf b/examples/mysql-and-postgres/variables.tf new file mode 100644 index 00000000..5e94c63e --- /dev/null +++ b/examples/mysql-and-postgres/variables.tf @@ -0,0 +1,49 @@ +/** + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +variable "region" { + default = "us-central1" + type = string +} + +variable "network" { + default = "default" + type = string +} + +variable "zone" { + default = "us-central1-b" + type = string +} + +variable "mysql_version" { + default = "MYSQL_5_6" + type = string +} + +variable "postgresql_version" { + default = "POSTGRES_9_6" + type = string +} + +variable "network_name" { + default = "mysql-psql-example" + type = string +} + +variable "project_id" { + type = string +} diff --git a/examples/mysql-and-postgres/versions.tf b/examples/mysql-and-postgres/versions.tf new file mode 100644 index 00000000..29704272 --- /dev/null +++ b/examples/mysql-and-postgres/versions.tf @@ -0,0 +1,19 @@ +/** + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +terraform { + required_version = ">= 0.12" +} diff --git a/helpers/combine_docfiles.py b/helpers/combine_docfiles.py deleted file mode 100644 index 5f7a1122..00000000 --- a/helpers/combine_docfiles.py +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -''' Combine file from: - * script argument 1 - with content of file from: - * script argument 2 - using the beginning of line separators - hardcoded using regexes in this file: - - We exclude any text using the separate - regex specified here -''' - -import re -import sys - -insert_separator_regex = '(.*?\[\^\]\:\ \(autogen_docs_start\))(.*?)(\n\[\^\]\:\ \(autogen_docs_end\).*?$)' -exclude_separator_regex = '(.*?)Copyright 20\d\d Google LLC.*?limitations under the License.(.*?)$' - -if len(sys.argv) != 3: - sys.exit(1) - -input = open(sys.argv[1], "r").read() -replace_content = open(sys.argv[2], "r").read() - -# Exclude the specified content from the replacement content -groups = re.match(exclude_separator_regex, replace_content, re.DOTALL).groups(0) -replace_content = groups[0] + groups[1] - -# Find where to put the replacement content, overwrite the input file -groups = re.match(insert_separator_regex, input, re.DOTALL).groups(0) -output = groups[0] + replace_content + groups[2] -open(sys.argv[1], "w").write(output) diff --git a/helpers/terraform_docs b/helpers/terraform_docs new file mode 100755 index 00000000..0935b69e --- /dev/null +++ b/helpers/terraform_docs @@ -0,0 +1,694 @@ +#!/usr/bin/env bash + +set -e + +main() { + declare argv + argv=$(getopt -o a: --long args: -- "$@") || return + eval "set -- $argv" + + declare args + declare files + + for argv; do + case $argv in + (-a|--args) + shift + args="$1" + shift + ;; + (--) + shift + files="$@" + break + ;; + esac + done + + local hack_terraform_docs=$(terraform version | head -1 | grep -c 0.12) + + if [[ "$hack_terraform_docs" == "1" ]]; then + which awk 2>&1 >/dev/null || ( echo "awk is required for terraform-docs hack to work with Terraform 0.12"; exit 1) + + tmp_file_awk=$(mktemp "${TMPDIR:-/tmp}/terraform-docs-XXXXXXXXXX") + terraform_docs_awk "$tmp_file_awk" + terraform_docs "$tmp_file_awk" "$args" "$files" + rm -f "$tmp_file_awk" + else + terraform_docs "0" "$args" "$files" + fi + +} + +terraform_docs() { + readonly terraform_docs_awk_file="$1" + readonly args="$2" + readonly files="$3" + + declare -a paths + declare -a tfvars_files + + index=0 + + for file_with_path in $files; do + file_with_path="${file_with_path// /__REPLACED__SPACE__}" + + paths[index]=$(dirname "$file_with_path") + + if [[ "$file_with_path" == *".tfvars" ]]; then + tfvars_files+=("$file_with_path") + fi + + ((index+=1)) + done + + readonly tmp_file=$(mktemp) + readonly text_file="README.md" + + for path_uniq in $(echo "${paths[*]}" | tr ' ' '\n' | sort -u); do + path_uniq="${path_uniq//__REPLACED__SPACE__/ }" + + pushd "$path_uniq" > /dev/null + + if [[ ! -f "$text_file" ]]; then + popd > /dev/null + continue + fi + + if [[ "$terraform_docs_awk_file" == "0" ]]; then + terraform-docs $args md ./ > "$tmp_file" + else + # Can't append extension for mktemp, so renaming instead + tmp_file_docs=$(mktemp "${TMPDIR:-/tmp}/terraform-docs-XXXXXXXXXX") + mv "$tmp_file_docs" "$tmp_file_docs.tf" + tmp_file_docs_tf="$tmp_file_docs.tf" + + awk -f "$terraform_docs_awk_file" ./*.tf > "$tmp_file_docs_tf" + terraform-docs $args md "$tmp_file_docs_tf" > "$tmp_file" + rm -f "$tmp_file_docs_tf" + fi + + # Replace content between markers with the placeholder - https://stackoverflow.com/questions/1212799/how-do-i-extract-lines-between-two-line-delimiters-in-perl#1212834 + perl -i -ne 'if (/BEGINNING OF PRE-COMMIT-TERRAFORM DOCS HOOK/../END OF PRE-COMMIT-TERRAFORM DOCS HOOK/) { print $_ if /BEGINNING OF PRE-COMMIT-TERRAFORM DOCS HOOK/; print "I_WANT_TO_BE_REPLACED\n$_" if /END OF PRE-COMMIT-TERRAFORM DOCS HOOK/;} else { print $_ }' "$text_file" + + # Replace placeholder with the content of the file + perl -i -e 'open(F, "'"$tmp_file"'"); $f = join "", ; while(<>){if (/I_WANT_TO_BE_REPLACED/) {print $f} else {print $_};}' "$text_file" + + rm -f "$tmp_file" + + popd > /dev/null + done +} + +terraform_docs_awk() { + readonly output_file=$1 + + cat <<"EOF" > $output_file +# This script converts Terraform 0.12 variables/outputs to something suitable for `terraform-docs` +# As of terraform-docs v0.6.0, HCL2 is not supported. This script is a *dirty hack* to get around it. +# https://github.com/segmentio/terraform-docs/ +# https://github.com/segmentio/terraform-docs/issues/62 + +# Script was originally found here: https://github.com/cloudposse/build-harness/blob/master/bin/terraform-docs.awk + +{ + if ( $0 ~ /\{/ ) { + braceCnt++ + } + + if ( $0 ~ /\}/ ) { + braceCnt-- + } + + # [START] variable or output block started + if ($0 ~ /^[[:space:]]*(variable|output)[[:space:]][[:space:]]*"(.*?)"/) { + # Normalize the braceCnt (should be 1 now) + braceCnt = 1 + # [CLOSE] "default" block + if (blockDefCnt > 0) { + blockDefCnt = 0 + } + blockCnt++ + print $0 + } + + # [START] multiline default statement started + if (blockCnt > 0) { + if ($0 ~ /^[[:space:]][[:space:]]*(default)[[:space:]][[:space:]]*=/) { + if ($3 ~ "null") { + print " default = \"null\"" + } else { + print $0 + blockDefCnt++ + blockDefStart=1 + } + } + } + + # [PRINT] single line "description" + if (blockCnt > 0) { + if (blockDefCnt == 0) { + if ($0 ~ /^[[:space:]][[:space:]]*description[[:space:]][[:space:]]*=/) { + # [CLOSE] "default" block + if (blockDefCnt > 0) { + blockDefCnt = 0 + } + print $0 + } + } + } + + # [PRINT] single line "type" + if (blockCnt > 0) { + if ($0 ~ /^[[:space:]][[:space:]]*type[[:space:]][[:space:]]*=/ ) { + # [CLOSE] "default" block + if (blockDefCnt > 0) { + blockDefCnt = 0 + } + type=$3 + if (type ~ "object") { + print " type = \"object\"" + } else { + # legacy quoted types: "string", "list", and "map" + if ($3 ~ /^[[:space:]]*"(.*?)"[[:space:]]*$/) { + print " type = " $3 + } else { + print " type = \"" $3 "\"" + } + } + } + } + + # [CLOSE] variable/output block + if (blockCnt > 0) { + if (braceCnt == 0 && blockCnt > 0) { + blockCnt-- + print $0 + } + } + + # [PRINT] Multiline "default" statement + if (blockCnt > 0 && blockDefCnt > 0) { + if (blockDefStart == 1) { + blockDefStart = 0 + } else { + print $0 + } + } +} +EOF + +} + +getopt() { + # pure-getopt, a drop-in replacement for GNU getopt in pure Bash. + # version 1.4.3 + # + # Copyright 2012-2018 Aron Griffis + # + # Permission is hereby granted, free of charge, to any person obtaining + # a copy of this software and associated documentation files (the + # "Software"), to deal in the Software without restriction, including + # without limitation the rights to use, copy, modify, merge, publish, + # distribute, sublicense, and/or sell copies of the Software, and to + # permit persons to whom the Software is furnished to do so, subject to + # the following conditions: + # + # The above copyright notice and this permission notice shall be included + # in all copies or substantial portions of the Software. + # + # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + _getopt_main() { + # Returns one of the following statuses: + # 0 success + # 1 error parsing parameters + # 2 error in getopt invocation + # 3 internal error + # 4 reserved for -T + # + # For statuses 0 and 1, generates normalized and shell-quoted + # "options -- parameters" on stdout. + + declare parsed status + declare short long name flags + declare have_short=false + + # Synopsis from getopt man-page: + # + # getopt optstring parameters + # getopt [options] [--] optstring parameters + # getopt [options] -o|--options optstring [options] [--] parameters + # + # The first form can be normalized to the third form which + # _getopt_parse() understands. The second form can be recognized after + # first parse when $short hasn't been set. + + if [[ -n ${GETOPT_COMPATIBLE+isset} || $1 == [^-]* ]]; then + # Enable compatibility mode + flags=c$flags + # Normalize first to third synopsis form + set -- -o "$1" -- "${@:2}" + fi + + # First parse always uses flags=p since getopt always parses its own + # arguments effectively in this mode. + parsed=$(_getopt_parse getopt ahl:n:o:qQs:TuV \ + alternative,help,longoptions:,name:,options:,quiet,quiet-output,shell:,test,version \ + p "$@") + status=$? + if [[ $status != 0 ]]; then + if [[ $status == 1 ]]; then + echo "Try \`getopt --help' for more information." >&2 + # Since this is the first parse, convert status 1 to 2 + status=2 + fi + return $status + fi + eval "set -- $parsed" + + while [[ $# -gt 0 ]]; do + case $1 in + (-a|--alternative) + flags=a$flags ;; + + (-h|--help) + _getopt_help + return 2 # as does GNU getopt + ;; + + (-l|--longoptions) + long="$long${long:+,}$2" + shift ;; + + (-n|--name) + name=$2 + shift ;; + + (-o|--options) + short=$2 + have_short=true + shift ;; + + (-q|--quiet) + flags=q$flags ;; + + (-Q|--quiet-output) + flags=Q$flags ;; + + (-s|--shell) + case $2 in + (sh|bash) + flags=${flags//t/} ;; + (csh|tcsh) + flags=t$flags ;; + (*) + echo 'getopt: unknown shell after -s or --shell argument' >&2 + echo "Try \`getopt --help' for more information." >&2 + return 2 ;; + esac + shift ;; + + (-u|--unquoted) + flags=u$flags ;; + + (-T|--test) + return 4 ;; + + (-V|--version) + echo "pure-getopt 1.4.3" + return 0 ;; + + (--) + shift + break ;; + esac + + shift + done + + if ! $have_short; then + # $short was declared but never set, not even to an empty string. + # This implies the second form in the synopsis. + if [[ $# == 0 ]]; then + echo 'getopt: missing optstring argument' >&2 + echo "Try \`getopt --help' for more information." >&2 + return 2 + fi + short=$1 + have_short=true + shift + fi + + if [[ $short == -* ]]; then + # Leading dash means generate output in place rather than reordering, + # unless we're already in compatibility mode. + [[ $flags == *c* ]] || flags=i$flags + short=${short#?} + elif [[ $short == +* ]]; then + # Leading plus means POSIXLY_CORRECT, unless we're already in + # compatibility mode. + [[ $flags == *c* ]] || flags=p$flags + short=${short#?} + fi + + # This should fire if POSIXLY_CORRECT is in the environment, even if + # it's an empty string. That's the difference between :+ and + + flags=${POSIXLY_CORRECT+p}$flags + + _getopt_parse "${name:-getopt}" "$short" "$long" "$flags" "$@" + } + + _getopt_parse() { + # Inner getopt parser, used for both first parse and second parse. + # Returns 0 for success, 1 for error parsing, 3 for internal error. + # In the case of status 1, still generates stdout with whatever could + # be parsed. + # + # $flags is a string of characters with the following meanings: + # a - alternative parsing mode + # c - GETOPT_COMPATIBLE + # i - generate output in place rather than reordering + # p - POSIXLY_CORRECT + # q - disable error reporting + # Q - disable normal output + # t - quote for csh/tcsh + # u - unquoted output + + declare name="$1" short="$2" long="$3" flags="$4" + shift 4 + + # Split $long on commas, prepend double-dashes, strip colons; + # for use with _getopt_resolve_abbrev + declare -a longarr + _getopt_split longarr "$long" + longarr=( "${longarr[@]/#/--}" ) + longarr=( "${longarr[@]%:}" ) + longarr=( "${longarr[@]%:}" ) + + # Parse and collect options and parameters + declare -a opts params + declare o alt_recycled=false error=0 + + while [[ $# -gt 0 ]]; do + case $1 in + (--) + params=( "${params[@]}" "${@:2}" ) + break ;; + + (--*=*) + o=${1%%=*} + if ! o=$(_getopt_resolve_abbrev "$o" "${longarr[@]}"); then + error=1 + elif [[ ,"$long", == *,"${o#--}"::,* ]]; then + opts=( "${opts[@]}" "$o" "${1#*=}" ) + elif [[ ,"$long", == *,"${o#--}":,* ]]; then + opts=( "${opts[@]}" "$o" "${1#*=}" ) + elif [[ ,"$long", == *,"${o#--}",* ]]; then + if $alt_recycled; then o=${o#-}; fi + _getopt_err "$name: option '$o' doesn't allow an argument" + error=1 + else + echo "getopt: assertion failed (1)" >&2 + return 3 + fi + alt_recycled=false + ;; + + (--?*) + o=$1 + if ! o=$(_getopt_resolve_abbrev "$o" "${longarr[@]}"); then + error=1 + elif [[ ,"$long", == *,"${o#--}",* ]]; then + opts=( "${opts[@]}" "$o" ) + elif [[ ,"$long", == *,"${o#--}::",* ]]; then + opts=( "${opts[@]}" "$o" '' ) + elif [[ ,"$long", == *,"${o#--}:",* ]]; then + if [[ $# -ge 2 ]]; then + shift + opts=( "${opts[@]}" "$o" "$1" ) + else + if $alt_recycled; then o=${o#-}; fi + _getopt_err "$name: option '$o' requires an argument" + error=1 + fi + else + echo "getopt: assertion failed (2)" >&2 + return 3 + fi + alt_recycled=false + ;; + + (-*) + if [[ $flags == *a* ]]; then + # Alternative parsing mode! + # Try to handle as a long option if any of the following apply: + # 1. There's an equals sign in the mix -x=3 or -xy=3 + # 2. There's 2+ letters and an abbreviated long match -xy + # 3. There's a single letter and an exact long match + # 4. There's a single letter and no short match + o=${1::2} # temp for testing #4 + if [[ $1 == *=* || $1 == -?? || \ + ,$long, == *,"${1#-}"[:,]* || \ + ,$short, != *,"${o#-}"[:,]* ]]; then + o=$(_getopt_resolve_abbrev "${1%%=*}" "${longarr[@]}" 2>/dev/null) + case $? in + (0) + # Unambiguous match. Let the long options parser handle + # it, with a flag to get the right error message. + set -- "-$1" "${@:2}" + alt_recycled=true + continue ;; + (1) + # Ambiguous match, generate error and continue. + _getopt_resolve_abbrev "${1%%=*}" "${longarr[@]}" >/dev/null + error=1 + shift + continue ;; + (2) + # No match, fall through to single-character check. + true ;; + (*) + echo "getopt: assertion failed (3)" >&2 + return 3 ;; + esac + fi + fi + + o=${1::2} + if [[ "$short" == *"${o#-}"::* ]]; then + if [[ ${#1} -gt 2 ]]; then + opts=( "${opts[@]}" "$o" "${1:2}" ) + else + opts=( "${opts[@]}" "$o" '' ) + fi + elif [[ "$short" == *"${o#-}":* ]]; then + if [[ ${#1} -gt 2 ]]; then + opts=( "${opts[@]}" "$o" "${1:2}" ) + elif [[ $# -ge 2 ]]; then + shift + opts=( "${opts[@]}" "$o" "$1" ) + else + _getopt_err "$name: option requires an argument -- '${o#-}'" + error=1 + fi + elif [[ "$short" == *"${o#-}"* ]]; then + opts=( "${opts[@]}" "$o" ) + if [[ ${#1} -gt 2 ]]; then + set -- "$o" "-${1:2}" "${@:2}" + fi + else + if [[ $flags == *a* ]]; then + # Alternative parsing mode! Report on the entire failed + # option. GNU includes =value but we omit it for sanity with + # very long values. + _getopt_err "$name: unrecognized option '${1%%=*}'" + else + _getopt_err "$name: invalid option -- '${o#-}'" + if [[ ${#1} -gt 2 ]]; then + set -- "$o" "-${1:2}" "${@:2}" + fi + fi + error=1 + fi ;; + + (*) + # GNU getopt in-place mode (leading dash on short options) + # overrides POSIXLY_CORRECT + if [[ $flags == *i* ]]; then + opts=( "${opts[@]}" "$1" ) + elif [[ $flags == *p* ]]; then + params=( "${params[@]}" "$@" ) + break + else + params=( "${params[@]}" "$1" ) + fi + esac + + shift + done + + if [[ $flags == *Q* ]]; then + true # generate no output + else + echo -n ' ' + if [[ $flags == *[cu]* ]]; then + printf '%s -- %s' "${opts[*]}" "${params[*]}" + else + if [[ $flags == *t* ]]; then + _getopt_quote_csh "${opts[@]}" -- "${params[@]}" + else + _getopt_quote "${opts[@]}" -- "${params[@]}" + fi + fi + echo + fi + + return $error + } + + _getopt_err() { + if [[ $flags != *q* ]]; then + printf '%s\n' "$1" >&2 + fi + } + + _getopt_resolve_abbrev() { + # Resolves an abbrevation from a list of possibilities. + # If the abbreviation is unambiguous, echoes the expansion on stdout + # and returns 0. If the abbreviation is ambiguous, prints a message on + # stderr and returns 1. (For first parse this should convert to exit + # status 2.) If there is no match at all, prints a message on stderr + # and returns 2. + declare a q="$1" + declare -a matches + shift + for a; do + if [[ $q == "$a" ]]; then + # Exact match. Squash any other partial matches. + matches=( "$a" ) + break + elif [[ $flags == *a* && $q == -[^-]* && $a == -"$q" ]]; then + # Exact alternative match. Squash any other partial matches. + matches=( "$a" ) + break + elif [[ $a == "$q"* ]]; then + # Abbreviated match. + matches=( "${matches[@]}" "$a" ) + elif [[ $flags == *a* && $q == -[^-]* && $a == -"$q"* ]]; then + # Abbreviated alternative match. + matches=( "${matches[@]}" "${a#-}" ) + fi + done + case ${#matches[@]} in + (0) + [[ $flags == *q* ]] || \ + printf "$name: unrecognized option %s\\n" >&2 \ + "$(_getopt_quote "$q")" + return 2 ;; + (1) + printf '%s' "${matches[0]}"; return 0 ;; + (*) + [[ $flags == *q* ]] || \ + printf "$name: option %s is ambiguous; possibilities: %s\\n" >&2 \ + "$(_getopt_quote "$q")" "$(_getopt_quote "${matches[@]}")" + return 1 ;; + esac + } + + _getopt_split() { + # Splits $2 at commas to build array specified by $1 + declare IFS=, + eval "$1=( \$2 )" + } + + _getopt_quote() { + # Quotes arguments with single quotes, escaping inner single quotes + declare s space q=\' + for s; do + printf "$space'%s'" "${s//$q/$q\\$q$q}" + space=' ' + done + } + + _getopt_quote_csh() { + # Quotes arguments with single quotes, escaping inner single quotes, + # bangs, backslashes and newlines + declare s i c space + for s; do + echo -n "$space'" + for ((i=0; i<${#s}; i++)); do + c=${s:i:1} + case $c in + (\\|\'|!) + echo -n "'\\$c'" ;; + ($'\n') + echo -n "\\$c" ;; + (*) + echo -n "$c" ;; + esac + done + echo -n \' + space=' ' + done + } + + _getopt_help() { + cat <<-EOT >&2 + + Usage: + getopt + getopt [options] [--] + getopt [options] -o|--options [options] [--] + + Parse command options. + + Options: + -a, --alternative allow long options starting with single - + -l, --longoptions the long options to be recognized + -n, --name the name under which errors are reported + -o, --options the short options to be recognized + -q, --quiet disable error reporting by getopt(3) + -Q, --quiet-output no normal output + -s, --shell set quoting conventions to those of + -T, --test test for getopt(1) version + -u, --unquoted do not quote the output + + -h, --help display this help and exit + -V, --version output version information and exit + + For more details see getopt(1). + EOT + } + + _getopt_version_check() { + if [[ -z $BASH_VERSION ]]; then + echo "getopt: unknown version of bash might not be compatible" >&2 + return 1 + fi + + # This is a lexical comparison that should be sufficient forever. + if [[ $BASH_VERSION < 2.05b ]]; then + echo "getopt: bash $BASH_VERSION might not be compatible" >&2 + return 1 + fi + + return 0 + } + + _getopt_version_check + _getopt_main "$@" + declare status=$? + unset -f _getopt_main _getopt_err _getopt_parse _getopt_quote \ + _getopt_quote_csh _getopt_resolve_abbrev _getopt_split _getopt_help \ + _getopt_version_check + return $status +} + +[[ $BASH_SOURCE != "$0" ]] || main "$@" diff --git a/helpers/terraform_validate b/helpers/terraform_validate new file mode 100644 index 00000000..7f609827 --- /dev/null +++ b/helpers/terraform_validate @@ -0,0 +1,23 @@ +#! /bin/bash +# +# Copyright 2019 Google LLC. This software is provided as-is, without warranty +# or representation for any use or purpose. Your use of it is subject to your +# agreement with Google. +# +# This script initializes modules so that terraform validate as of 0.12 behaves +# as expected and does not issue errors such as: +# +# Error: Module not installed +# +# on test/fixtures/shared_vpc_no_subnets/main.tf line 37: +# 37: module "project-factory" { +# +# This module is not yet installed. Run "terraform init" to install all modules +# required by this configuration. + +# The first and only argument to this script is the directory containing *.tf +# files to validate. This directory is assumed to be a root module. + +cd "$1" +terraform init -backend=false +terraform validate diff --git a/kitchen.yml b/kitchen.yml index 9c235527..8e321a75 100644 --- a/kitchen.yml +++ b/kitchen.yml @@ -1,3 +1,17 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + --- provisioner: name: terraform diff --git a/main.tf b/main.tf index 042e4cd2..c2710660 100644 --- a/main.tf +++ b/main.tf @@ -1,11 +1,11 @@ -/* - * Copyright 2017 Google Inc. +/** + * Copyright 2019 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,39 +15,96 @@ */ resource "google_sql_database_instance" "default" { - name = "${var.name}" - project = "${var.project}" - region = "${var.region}" - database_version = "${var.database_version}" - master_instance_name = "${var.master_instance_name}" + name = var.name + project = var.project + region = var.region + database_version = var.database_version + master_instance_name = var.master_instance_name settings { - tier = "${var.tier}" - activation_policy = "${var.activation_policy}" - authorized_gae_applications = ["${var.authorized_gae_applications}"] - disk_autoresize = "${var.disk_autoresize}" - backup_configuration = ["${var.backup_configuration}"] - ip_configuration = ["${var.ip_configuration}"] - location_preference = ["${var.location_preference}"] - maintenance_window = ["${var.maintenance_window}"] - disk_size = "${var.disk_size}" - disk_type = "${var.disk_type}" - pricing_plan = "${var.pricing_plan}" - replication_type = "${var.replication_type}" - database_flags = ["${var.database_flags}"] - availability_type = "${var.availability_type}" + tier = var.tier + activation_policy = var.activation_policy + authorized_gae_applications = var.authorized_gae_applications + disk_autoresize = var.disk_autoresize + dynamic "backup_configuration" { + for_each = [var.backup_configuration] + content { + binary_log_enabled = var.backup_configuration["binary_log_enabled"] + enabled = var.backup_configuration["enabled"] + start_time = var.backup_configuration["start_time"] + } + } + dynamic "ip_configuration" { + for_each = [var.ip_configuration] + content { + ipv4_enabled = var.ip_configuration["ipv4_enabled"] + private_network = var.ip_configuration["private_network"] + require_ssl = var.ip_configuration["require_ssl"] + + dynamic "authorized_networks" { + for_each = lookup(ip_configuration.value, "authorized_networks", []) + content { + expiration_time = authorized_networks["expiration_time"] + name = authorized_networks["name"] + value = authorized_networks["value"] + } + } + } + } + dynamic "location_preference" { + for_each = [var.location_preference] + content { + follow_gae_application = lookup(location_preference.value, "follow_gae_application", null) + zone = lookup(location_preference.value, "zone", null) + } + } + dynamic "maintenance_window" { + for_each = [var.maintenance_window] + content { + day = lookup(maintenance_window.value, "day", null) + hour = lookup(maintenance_window.value, "hour", null) + update_track = lookup(maintenance_window.value, "update_track", null) + } + } + disk_size = var.disk_size + disk_type = var.disk_type + pricing_plan = var.pricing_plan + replication_type = var.replication_type + dynamic "database_flags" { + for_each = var.database_flags + content { + name = lookup(database_flags.value, "name", null) + value = lookup(database_flags.value, "value", null) + } + } + availability_type = var.availability_type } - replica_configuration = ["${var.replica_configuration}"] + dynamic "replica_configuration" { + for_each = [var.replica_configuration] + content { + ca_certificate = lookup(replica_configuration.value, "ca_certificate", null) + client_certificate = lookup(replica_configuration.value, "client_certificate", null) + client_key = lookup(replica_configuration.value, "client_key", null) + connect_retry_interval = lookup(replica_configuration.value, "connect_retry_interval", null) + dump_file_path = lookup(replica_configuration.value, "dump_file_path", null) + failover_target = lookup(replica_configuration.value, "failover_target", null) + master_heartbeat_period = lookup(replica_configuration.value, "master_heartbeat_period", null) + password = lookup(replica_configuration.value, "password", null) + ssl_cipher = lookup(replica_configuration.value, "ssl_cipher", null) + username = lookup(replica_configuration.value, "username", null) + verify_server_certificate = lookup(replica_configuration.value, "verify_server_certificate", null) + } + } } resource "google_sql_database" "default" { - count = "${var.master_instance_name == "" ? 1 : 0}" - name = "${var.db_name}" - project = "${var.project}" - instance = "${google_sql_database_instance.default.name}" - charset = "${var.db_charset}" - collation = "${var.db_collation}" + count = var.master_instance_name == "" ? 1 : 0 + name = var.db_name + project = var.project + instance = google_sql_database_instance.default.name + charset = var.db_charset + collation = var.db_collation } resource "random_id" "user-password" { @@ -55,10 +112,11 @@ resource "random_id" "user-password" { } resource "google_sql_user" "default" { - count = "${var.master_instance_name == "" ? 1 : 0}" - name = "${var.user_name}" - project = "${var.project}" - instance = "${google_sql_database_instance.default.name}" - host = "${var.user_host}" - password = "${var.user_password == "" ? random_id.user-password.hex : var.user_password}" + count = var.master_instance_name == "" ? 1 : 0 + name = var.user_name + project = var.project + instance = google_sql_database_instance.default.name + host = var.user_host + password = var.user_password == "" ? random_id.user-password.hex : var.user_password } + diff --git a/modules/mysql/failover_replica.tf b/modules/mysql/failover_replica.tf index 6dd28f54..c3330717 100644 --- a/modules/mysql/failover_replica.tf +++ b/modules/mysql/failover_replica.tf @@ -1,5 +1,5 @@ /** - * Copyright 2018 Google LLC + * Copyright 2019 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,58 +15,103 @@ */ locals { - failover_replica_ip_configuration_enabled = "${length(keys(var.failover_replica_ip_configuration)) > 0 ? true : false}" + failover_replica_ip_configuration_enabled = length(keys(var.failover_replica_ip_configuration)) > 0 ? true : false failover_replica_ip_configurations = { - enabled = "${var.failover_replica_ip_configuration}" - disabled = "${map()}" + enabled = var.failover_replica_ip_configuration + disabled = {} } } resource "google_sql_database_instance" "failover-replica" { - count = "${var.failover_replica ? 1 : 0}" - project = "${var.project_id}" - name = "${var.name}-failover${var.failover_replica_name_suffix}" - database_version = "${var.database_version}" - region = "${var.region}" - master_instance_name = "${google_sql_database_instance.default.name}" - replica_configuration = ["${merge(var.failover_replica_configuration, map("failover_target", true))}"] + count = var.failover_replica ? 1 : 0 + project = var.project_id + name = "${var.name}-failover${var.failover_replica_name_suffix}" + database_version = var.database_version + region = var.region + master_instance_name = google_sql_database_instance.default.name + dynamic "replica_configuration" { + for_each = [merge( + var.failover_replica_configuration, + { + "failover_target" = true + }, + )] + content { + ca_certificate = lookup(replica_configuration.value, "ca_certificate", null) + client_certificate = lookup(replica_configuration.value, "client_certificate", null) + client_key = lookup(replica_configuration.value, "client_key", null) + connect_retry_interval = lookup(replica_configuration.value, "connect_retry_interval", null) + dump_file_path = lookup(replica_configuration.value, "dump_file_path", null) + failover_target = lookup(replica_configuration.value, "failover_target", null) + master_heartbeat_period = lookup(replica_configuration.value, "master_heartbeat_period", null) + password = lookup(replica_configuration.value, "password", null) + ssl_cipher = lookup(replica_configuration.value, "ssl_cipher", null) + username = lookup(replica_configuration.value, "username", null) + verify_server_certificate = lookup(replica_configuration.value, "verify_server_certificate", null) + } + } settings { - tier = "${var.failover_replica_tier}" - activation_policy = "${var.failover_replica_activation_policy}" - authorized_gae_applications = ["${var.authorized_gae_applications}"] - ip_configuration = ["${local.failover_replica_ip_configurations["${local.failover_replica_ip_configuration_enabled ? "enabled" : "disabled"}"]}"] + tier = var.failover_replica_tier + activation_policy = var.failover_replica_activation_policy + authorized_gae_applications = var.authorized_gae_applications + dynamic "ip_configuration" { + for_each = [local.failover_replica_ip_configurations[local.failover_replica_ip_configuration_enabled ? "enabled" : "disabled"]] + content { + ipv4_enabled = lookup(ip_configuration.value, "ipv4_enabled", null) + private_network = lookup(ip_configuration.value, "private_network", null) + require_ssl = lookup(ip_configuration.value, "require_ssl", false) + + dynamic "authorized_networks" { + for_each = lookup(ip_configuration.value, "authorized_networks", []) + content { + expiration_time = lookup(authorized_networks.value, "expiration_time", null) + name = lookup(authorized_networks.value, "name", null) + value = lookup(authorized_networks.value, "value", null) + } + } + } + } - crash_safe_replication = "${var.failover_replica_crash_safe_replication}" - disk_autoresize = "${var.failover_replica_disk_autoresize}" - disk_size = "${var.failover_replica_disk_size}" - disk_type = "${var.failover_replica_disk_type}" - pricing_plan = "${var.failover_replica_pricing_plan}" - replication_type = "${var.failover_replica_replication_type}" - user_labels = "${var.failover_replica_user_labels}" - database_flags = ["${var.failover_replica_database_flags}"] + crash_safe_replication = var.failover_replica_crash_safe_replication + disk_autoresize = var.failover_replica_disk_autoresize + disk_size = var.failover_replica_disk_size + disk_type = var.failover_replica_disk_type + pricing_plan = var.failover_replica_pricing_plan + replication_type = var.failover_replica_replication_type + user_labels = var.failover_replica_user_labels + dynamic "database_flags" { + for_each = var.failover_replica_database_flags + content { + name = lookup(database_flags.value, "name", null) + value = lookup(database_flags.value, "value", null) + } + } location_preference { zone = "${var.region}-${var.failover_replica_zone}" } maintenance_window { - day = "${var.failover_replica_maintenance_window_day}" - hour = "${var.failover_replica_maintenance_window_hour}" - update_track = "${var.failover_replica_maintenance_window_update_track}" + day = var.failover_replica_maintenance_window_day + hour = var.failover_replica_maintenance_window_hour + update_track = var.failover_replica_maintenance_window_update_track } } - depends_on = ["google_sql_database_instance.default"] + depends_on = [google_sql_database_instance.default] lifecycle { - ignore_changes = ["disk_size"] + ignore_changes = [ + "settings[0].disk_size" + ] } timeouts { - create = "${var.create_timeout}" - update = "${var.update_timeout}" - delete = "${var.delete_timeout}" + create = var.create_timeout + update = var.update_timeout + delete = var.delete_timeout } } + diff --git a/modules/mysql/main.tf b/modules/mysql/main.tf index 2a3a7308..32124c99 100644 --- a/modules/mysql/main.tf +++ b/modules/mysql/main.tf @@ -1,5 +1,5 @@ /** - * Copyright 2018 Google LLC + * Copyright 2019 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,100 +16,136 @@ locals { default_user_host = "%" - ip_configuration_enabled = "${length(keys(var.ip_configuration)) > 0 ? true : false}" + ip_configuration_enabled = length(keys(var.ip_configuration)) > 0 ? true : false ip_configurations = { - enabled = "${var.ip_configuration}" - disabled = "${map()}" + enabled = var.ip_configuration + disabled = {} } } resource "google_sql_database_instance" "default" { - project = "${var.project_id}" - name = "${var.name}" - database_version = "${var.database_version}" - region = "${var.region}" + project = var.project_id + name = var.name + database_version = var.database_version + region = var.region settings { - tier = "${var.tier}" - activation_policy = "${var.activation_policy}" - authorized_gae_applications = ["${var.authorized_gae_applications}"] - backup_configuration = ["${var.backup_configuration}"] - ip_configuration = ["${local.ip_configurations["${local.ip_configuration_enabled ? "enabled" : "disabled"}"]}"] - - disk_autoresize = "${var.disk_autoresize}" + tier = var.tier + activation_policy = var.activation_policy + authorized_gae_applications = var.authorized_gae_applications + dynamic "backup_configuration" { + for_each = [var.backup_configuration] + content { + binary_log_enabled = lookup(backup_configuration.value, "binary_log_enabled", false) + enabled = lookup(backup_configuration.value, "enabled", false) + start_time = lookup(backup_configuration.value, "start_time", "") + } + } + dynamic "ip_configuration" { + for_each = [local.ip_configurations[local.ip_configuration_enabled ? "enabled" : "disabled"]] + content { + ipv4_enabled = lookup(ip_configuration.value, "ipv4_enabled", null) + private_network = lookup(ip_configuration.value, "private_network", null) + require_ssl = lookup(ip_configuration.value, "require_ssl", true) + + dynamic "authorized_networks" { + for_each = lookup(ip_configuration.value, "authorized_networks", []) + content { + expiration_time = lookup(authorized_networks.value, "expiration_time", null) + name = lookup(authorized_networks.value, "name", null) + value = lookup(authorized_networks.value, "value", null) + } + } + } + } - disk_size = "${var.disk_size}" - disk_type = "${var.disk_type}" - pricing_plan = "${var.pricing_plan}" - user_labels = "${var.user_labels}" - database_flags = ["${var.database_flags}"] + disk_autoresize = var.disk_autoresize + + disk_size = var.disk_size + disk_type = var.disk_type + pricing_plan = var.pricing_plan + user_labels = var.user_labels + dynamic "database_flags" { + for_each = var.database_flags + content { + name = lookup(database_flags.value, "name", null) + value = lookup(database_flags.value, "value", null) + } + } location_preference { zone = "${var.region}-${var.zone}" } maintenance_window { - day = "${var.maintenance_window_day}" - hour = "${var.maintenance_window_hour}" - update_track = "${var.maintenance_window_update_track}" + day = var.maintenance_window_day + hour = var.maintenance_window_hour + update_track = var.maintenance_window_update_track } } lifecycle { - ignore_changes = ["disk_size"] + ignore_changes = [ + "settings[0].disk_size" + ] } timeouts { - create = "${var.create_timeout}" - update = "${var.update_timeout}" - delete = "${var.delete_timeout}" + create = var.create_timeout + update = var.update_timeout + delete = var.delete_timeout } } resource "google_sql_database" "default" { - name = "${var.db_name}" - project = "${var.project_id}" - instance = "${google_sql_database_instance.default.name}" - charset = "${var.db_charset}" - collation = "${var.db_collation}" - depends_on = ["google_sql_database_instance.default"] + name = var.db_name + project = var.project_id + instance = google_sql_database_instance.default.name + charset = var.db_charset + collation = var.db_collation + depends_on = [google_sql_database_instance.default] } resource "google_sql_database" "additional_databases" { - count = "${length(var.additional_databases)}" - project = "${var.project_id}" - name = "${lookup(var.additional_databases[count.index], "name")}" - charset = "${lookup(var.additional_databases[count.index], "charset", "")}" - collation = "${lookup(var.additional_databases[count.index], "collation", "")}" - instance = "${google_sql_database_instance.default.name}" - depends_on = ["google_sql_database_instance.default"] + count = length(var.additional_databases) + project = var.project_id + name = var.additional_databases[count.index]["name"] + charset = lookup(var.additional_databases[count.index], "charset", null) + collation = lookup(var.additional_databases[count.index], "collation", null) + instance = google_sql_database_instance.default.name + depends_on = [google_sql_database_instance.default] } resource "random_id" "user-password" { keepers = { - name = "${google_sql_database_instance.default.name}" + name = google_sql_database_instance.default.name } byte_length = 8 - depends_on = ["google_sql_database_instance.default"] + depends_on = [google_sql_database_instance.default] } resource "google_sql_user" "default" { - name = "${var.user_name}" - project = "${var.project_id}" - instance = "${google_sql_database_instance.default.name}" - host = "${var.user_host}" - password = "${var.user_password == "" ? random_id.user-password.hex : var.user_password}" - depends_on = ["google_sql_database_instance.default"] + name = var.user_name + project = var.project_id + instance = google_sql_database_instance.default.name + host = var.user_host + password = var.user_password == "" ? random_id.user-password.hex : var.user_password + depends_on = [google_sql_database_instance.default] } resource "google_sql_user" "additional_users" { - count = "${length(var.additional_users)}" - project = "${var.project_id}" - name = "${lookup(var.additional_users[count.index], "name")}" - password = "${lookup(var.additional_users[count.index], "password", random_id.user-password.hex)}" - host = "${lookup(var.additional_users[count.index], "host", var.user_host)}" - instance = "${google_sql_database_instance.default.name}" - depends_on = ["google_sql_database_instance.default"] + count = length(var.additional_users) + project = var.project_id + name = var.additional_users[count.index]["name"] + password = lookup( + var.additional_users[count.index], + "password", + random_id.user-password.hex, + ) + host = lookup(var.additional_users[count.index], "host", var.user_host) + instance = google_sql_database_instance.default.name + depends_on = [google_sql_database_instance.default] } + diff --git a/modules/mysql/outputs.tf b/modules/mysql/outputs.tf index 0640e0aa..f8d63a5d 100644 --- a/modules/mysql/outputs.tf +++ b/modules/mysql/outputs.tf @@ -1,5 +1,5 @@ /** - * Copyright 2018 Google LLC + * Copyright 2019 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,104 +16,105 @@ // Master output "instance_name" { - value = "${google_sql_database_instance.default.name}" + value = google_sql_database_instance.default.name description = "The instance name for the master instance" } output "instance_ip_address" { - value = "${google_sql_database_instance.default.ip_address}" + value = google_sql_database_instance.default.ip_address description = "The IPv4 address assigned for the master instance" } output "instance_first_ip_address" { - value = "${google_sql_database_instance.default.first_ip_address}" + value = google_sql_database_instance.default.first_ip_address description = "The first IPv4 address of the addresses assigned for the master instance." } output "instance_connection_name" { - value = "${google_sql_database_instance.default.connection_name}" + value = google_sql_database_instance.default.connection_name description = "The connection name of the master instance to be used in connection strings" } output "instance_self_link" { - value = "${google_sql_database_instance.default.self_link}" + value = google_sql_database_instance.default.self_link description = "The URI of the master instance" } output "instance_server_ca_cert" { - value = "${google_sql_database_instance.default.server_ca_cert}" + value = google_sql_database_instance.default.server_ca_cert description = "The CA certificate information used to connect to the SQL instance via SSL" } output "instance_service_account_email_address" { - value = "${google_sql_database_instance.default.service_account_email_address}" + value = google_sql_database_instance.default.service_account_email_address description = "The service account email address assigned to the master instance" } // Replicas output "replicas_instance_first_ip_addresses" { - value = ["${google_sql_database_instance.replicas.*.ip_address}"] + value = [google_sql_database_instance.replicas.*.ip_address] description = "The first IPv4 addresses of the addresses assigned for the replica instances" } output "replicas_instance_connection_names" { - value = ["${google_sql_database_instance.replicas.*.connection_name}"] + value = [google_sql_database_instance.replicas.*.connection_name] description = "The connection names of the replica instances to be used in connection strings" } output "replicas_instance_self_links" { - value = ["${google_sql_database_instance.replicas.*.self_link}"] + value = [google_sql_database_instance.replicas.*.self_link] description = "The URIs of the replica instances" } output "replicas_instance_server_ca_certs" { - value = ["${google_sql_database_instance.replicas.*.server_ca_cert}"] + value = [google_sql_database_instance.replicas.*.server_ca_cert] description = "The CA certificates information used to connect to the replica instances via SSL" } output "replicas_instance_service_account_email_addresses" { - value = ["${google_sql_database_instance.replicas.*.service_account_email_address}"] + value = [google_sql_database_instance.replicas.*.service_account_email_address] description = "The service account email addresses assigned to the replica instances" } output "read_replica_instance_names" { - value = "${google_sql_database_instance.replicas.*.name}" + value = google_sql_database_instance.replicas.*.name description = "The instance names for the read replica instances" } // Failover Replicas output "failover-replica_instance_first_ip_address" { - value = "${google_sql_database_instance.failover-replica.*.ip_address}" + value = google_sql_database_instance.failover-replica.*.ip_address description = "The first IPv4 address of the addesses assigned for the failover-replica instance" } output "failover-replica_instance_connection_name" { - value = "${google_sql_database_instance.failover-replica.*.connection_name}" + value = google_sql_database_instance.failover-replica.*.connection_name description = "The connection name of the failover-replica instance to be used in connection strings" } output "failover-replica_instance_self_link" { - value = "${google_sql_database_instance.failover-replica.*.self_link}" + value = google_sql_database_instance.failover-replica.*.self_link description = "The URI of the failover-replica instance" } output "failover-replica_instance_server_ca_cert" { - value = "${google_sql_database_instance.failover-replica.*.server_ca_cert}" + value = google_sql_database_instance.failover-replica.*.server_ca_cert description = "The CA certificate information used to connect to the failover-replica instance via SSL" } output "failover-replica_instance_service_account_email_address" { - value = "${google_sql_database_instance.failover-replica.*.service_account_email_address}" + value = google_sql_database_instance.failover-replica.*.service_account_email_address description = "The service account email addresses assigned to the failover-replica instance" } output "failover-replica_instance_name" { - value = "${google_sql_database_instance.failover-replica.*.name}" + value = google_sql_database_instance.failover-replica.*.name description = "The instance name for the failover replica instance" } output "generated_user_password" { description = "The auto generated default user password if not input password was provided" - value = "${random_id.user-password.hex}" + value = random_id.user-password.hex sensitive = true } + diff --git a/modules/mysql/read_replica.tf b/modules/mysql/read_replica.tf index 4bb52d99..97e614bb 100644 --- a/modules/mysql/read_replica.tf +++ b/modules/mysql/read_replica.tf @@ -1,5 +1,5 @@ /** - * Copyright 2018 Google LLC + * Copyright 2019 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,71 +15,116 @@ */ locals { - primary_zone = "${var.zone}" - read_replica_zones = ["${compact(split(",", var.read_replica_zones))}"] + primary_zone = var.zone + read_replica_zones = compact(split(",", var.read_replica_zones)) zone_mapping = { - enabled = ["${local.read_replica_zones}"] - disabled = "${list(local.primary_zone)}" + enabled = local.read_replica_zones + disabled = local.primary_zone } - zones_enabled = "${length(local.read_replica_zones) > 0}" - mod_by = "${local.zones_enabled ? length(local.read_replica_zones) : 1}" + zones_enabled = length(local.read_replica_zones) > 0 + mod_by = local.zones_enabled ? length(local.read_replica_zones) : 1 - zones = "${local.zone_mapping["${local.zones_enabled ? "enabled" : "disabled"}"]}" + zones = local.zone_mapping[local.zones_enabled ? "enabled" : "disabled"] - read_replica_ip_configuration_enabled = "${length(keys(var.read_replica_ip_configuration)) > 0 ? true : false}" + read_replica_ip_configuration_enabled = length(keys(var.read_replica_ip_configuration)) > 0 ? true : false read_replica_ip_configurations = { - enabled = "${var.read_replica_ip_configuration}" - disabled = "${map()}" + enabled = var.read_replica_ip_configuration + disabled = {} } } resource "google_sql_database_instance" "replicas" { - count = "${var.read_replica_size}" - project = "${var.project_id}" - name = "${var.name}-replica${var.read_replica_name_suffix}${count.index}" - database_version = "${var.database_version}" - region = "${var.region}" - master_instance_name = "${google_sql_database_instance.default.name}" - replica_configuration = ["${merge(var.read_replica_configuration, map("failover_target", false))}"] + count = var.read_replica_size + project = var.project_id + name = "${var.name}-replica${var.read_replica_name_suffix}${count.index}" + database_version = var.database_version + region = var.region + master_instance_name = google_sql_database_instance.default.name + dynamic "replica_configuration" { + for_each = [merge( + var.read_replica_configuration, + { + "failover_target" = false + }, + )] + content { + ca_certificate = lookup(replica_configuration.value, "ca_certificate", null) + client_certificate = lookup(replica_configuration.value, "client_certificate", null) + client_key = lookup(replica_configuration.value, "client_key", null) + connect_retry_interval = lookup(replica_configuration.value, "connect_retry_interval", null) + dump_file_path = lookup(replica_configuration.value, "dump_file_path", null) + failover_target = lookup(replica_configuration.value, "failover_target", null) + master_heartbeat_period = lookup(replica_configuration.value, "master_heartbeat_period", null) + password = lookup(replica_configuration.value, "password", null) + ssl_cipher = lookup(replica_configuration.value, "ssl_cipher", null) + username = lookup(replica_configuration.value, "username", null) + verify_server_certificate = lookup(replica_configuration.value, "verify_server_certificate", null) + } + } settings { - tier = "${var.read_replica_tier}" - activation_policy = "${var.read_replica_activation_policy}" - ip_configuration = ["${local.read_replica_ip_configurations["${local.read_replica_ip_configuration_enabled ? "enabled" : "disabled"}"]}"] - authorized_gae_applications = ["${var.authorized_gae_applications}"] - - crash_safe_replication = "${var.read_replica_crash_safe_replication}" - disk_autoresize = "${var.read_replica_disk_autoresize}" - disk_size = "${var.read_replica_disk_size}" - disk_type = "${var.read_replica_disk_type}" - pricing_plan = "${var.read_replica_pricing_plan}" - replication_type = "${var.read_replica_replication_type}" - user_labels = "${var.read_replica_user_labels}" - database_flags = ["${var.read_replica_database_flags}"] + tier = var.read_replica_tier + activation_policy = var.read_replica_activation_policy + dynamic "ip_configuration" { + for_each = [local.read_replica_ip_configurations[local.read_replica_ip_configuration_enabled ? "enabled" : "disabled"]] + content { + ipv4_enabled = lookup(ip_configuration.value, "ipv4_enabled", null) + private_network = lookup(ip_configuration.value, "private_network", null) + require_ssl = lookup(ip_configuration.value, "require_ssl", false) + + dynamic "authorized_networks" { + for_each = lookup(ip_configuration.value, "authorized_networks", []) + content { + expiration_time = lookup(authorized_networks.value, "expiration_time", null) + name = lookup(authorized_networks.value, "name", null) + value = lookup(authorized_networks.value, "value", null) + } + } + } + } + authorized_gae_applications = var.authorized_gae_applications + + crash_safe_replication = var.read_replica_crash_safe_replication + disk_autoresize = var.read_replica_disk_autoresize + disk_size = var.read_replica_disk_size + disk_type = var.read_replica_disk_type + pricing_plan = var.read_replica_pricing_plan + replication_type = var.read_replica_replication_type + user_labels = var.read_replica_user_labels + dynamic "database_flags" { + for_each = var.read_replica_database_flags + content { + name = lookup(database_flags.value, "name", null) + value = lookup(database_flags.value, "value", null) + } + } location_preference { - zone = "${length(local.zones) == 0 ? "" : "${var.region}-${local.zones[count.index % local.mod_by]}"}" + zone = length(local.zones) == 0 ? "" : "${var.region}-${local.zones[count.index % local.mod_by]}" } maintenance_window { - day = "${var.read_replica_maintenance_window_day}" - hour = "${var.read_replica_maintenance_window_hour}" - update_track = "${var.read_replica_maintenance_window_update_track}" + day = var.read_replica_maintenance_window_day + hour = var.read_replica_maintenance_window_hour + update_track = var.read_replica_maintenance_window_update_track } } - depends_on = ["google_sql_database_instance.default"] + depends_on = [google_sql_database_instance.default] lifecycle { - ignore_changes = ["disk_size"] + ignore_changes = [ + "settings[0].disk_size" + ] } timeouts { - create = "${var.create_timeout}" - update = "${var.update_timeout}" - delete = "${var.delete_timeout}" + create = var.create_timeout + update = var.update_timeout + delete = var.delete_timeout } } + diff --git a/modules/mysql/variables.tf b/modules/mysql/variables.tf index 4b2aca2f..a2d5e88e 100644 --- a/modules/mysql/variables.tf +++ b/modules/mysql/variables.tf @@ -1,5 +1,5 @@ /** - * Copyright 2018 Google LLC + * Copyright 2019 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,104 +16,133 @@ variable "project_id" { description = "The project ID to manage the Cloud SQL resources" + type = string } variable "name" { + type = string description = "The name of the Cloud SQL resources" } // required variable "database_version" { description = "The database version to use" + type = string } // required variable "region" { description = "The region of the Cloud SQL resources" + type = string default = "us-central1" } // Master variable "tier" { description = "The tier for the master instance." + type = string default = "db-n1-standard-1" } variable "zone" { description = "The zone for the master instance, it should be something like: `a`, `c`." + type = string } variable "activation_policy" { description = "The activation policy for the master instance. Can be either `ALWAYS`, `NEVER` or `ON_DEMAND`." + type = string default = "ALWAYS" } variable "authorized_gae_applications" { description = "The list of authorized App Engine project names" + type = list(string) default = [] } variable "disk_autoresize" { description = "Configuration to increase storage size" + type = bool default = true } variable "disk_size" { description = "The disk size for the master instance" + type = number default = 10 } variable "disk_type" { description = "The disk type for the master instance." + type = string default = "PD_SSD" } variable "pricing_plan" { description = "The pricing plan for the master instance." + type = string default = "PER_USE" } variable "maintenance_window_day" { description = "The day of week (1-7) for the master instance maintenance." + type = number default = 1 } variable "maintenance_window_hour" { description = "The hour of day (0-23) maintenance window for the master instance maintenance." + type = number default = 23 } variable "maintenance_window_update_track" { description = "The update track of maintenance window for the master instance maintenance. Can be either `canary` or `stable`." + type = string default = "canary" } variable "database_flags" { - description = "The database flags for the master instance. See [more details](https://cloud.google.com/sql/docs/mysql/flags)" + description = "List of Cloud SQL flags that are applied to the database server" + type = list(map(string)) default = [] } + variable "user_labels" { + type = map(string) default = {} description = "The key/value labels for the master instances." } variable "backup_configuration" { - default = {} - - description = < 0 ? true : false ip_configurations = { - enabled = "${var.ip_configuration}" - disabled = "${map()}" + enabled = var.ip_configuration + disabled = {} } } resource "google_sql_database_instance" "default" { - project = "${var.project_id}" - name = "${var.name}" - database_version = "${var.database_version}" - region = "${var.region}" + project = var.project_id + name = var.name + database_version = var.database_version + region = var.region settings { - tier = "${var.tier}" - activation_policy = "${var.activation_policy}" - availability_type = "${var.availability_type}" - authorized_gae_applications = ["${var.authorized_gae_applications}"] - backup_configuration = ["${var.backup_configuration}"] - ip_configuration = ["${local.ip_configurations["${local.ip_configuration_enabled ? "enabled" : "disabled"}"]}"] - - disk_autoresize = "${var.disk_autoresize}" - disk_size = "${var.disk_size}" - disk_type = "${var.disk_type}" - pricing_plan = "${var.pricing_plan}" - user_labels = "${var.user_labels}" - database_flags = ["${var.database_flags}"] + tier = var.tier + activation_policy = var.activation_policy + availability_type = var.availability_type + authorized_gae_applications = var.authorized_gae_applications + dynamic "backup_configuration" { + for_each = [var.backup_configuration] + content { + binary_log_enabled = lookup(backup_configuration.value, "binary_log_enabled", false) + enabled = lookup(backup_configuration.value, "enabled", false) + start_time = lookup(backup_configuration.value, "start_time", "") + } + } + dynamic "ip_configuration" { + for_each = [local.ip_configurations[local.ip_configuration_enabled ? "enabled" : "disabled"]] + content { + ipv4_enabled = lookup(ip_configuration.value, "ipv4_enabled", null) + private_network = lookup(ip_configuration.value, "private_network", null) + require_ssl = lookup(ip_configuration.value, "require_ssl", true) + + dynamic "authorized_networks" { + for_each = lookup(ip_configuration.value, "authorized_networks", []) + content { + expiration_time = lookup(authorized_networks.value, "expiration_time", null) + name = lookup(authorized_networks.value, "name", null) + value = lookup(authorized_networks.value, "value", null) + } + } + } + } + + disk_autoresize = var.disk_autoresize + disk_size = var.disk_size + disk_type = var.disk_type + pricing_plan = var.pricing_plan + user_labels = var.user_labels + dynamic "database_flags" { + for_each = var.database_flags + content { + name = lookup(database_flags.value, "name", null) + value = lookup(database_flags.value, "value", null) + } + } location_preference { zone = "${var.region}-${var.zone}" } maintenance_window { - day = "${var.maintenance_window_day}" - hour = "${var.maintenance_window_hour}" - update_track = "${var.maintenance_window_update_track}" + day = var.maintenance_window_day + hour = var.maintenance_window_hour + update_track = var.maintenance_window_update_track } } lifecycle { - ignore_changes = ["disk_size"] + ignore_changes = [ + "settings[0].disk_size" + ] } timeouts { - create = "${var.create_timeout}" - update = "${var.update_timeout}" - delete = "${var.delete_timeout}" + create = var.create_timeout + update = var.update_timeout + delete = var.delete_timeout } } resource "google_sql_database" "default" { - name = "${var.db_name}" - project = "${var.project_id}" - instance = "${google_sql_database_instance.default.name}" - charset = "${var.db_charset}" - collation = "${var.db_collation}" - depends_on = ["google_sql_database_instance.default"] + name = var.db_name + project = var.project_id + instance = google_sql_database_instance.default.name + charset = var.db_charset + collation = var.db_collation + depends_on = [google_sql_database_instance.default] } resource "google_sql_database" "additional_databases" { - count = "${length(var.additional_databases)}" - project = "${var.project_id}" - name = "${lookup(var.additional_databases[count.index], "name")}" - charset = "${lookup(var.additional_databases[count.index], "charset", "")}" - collation = "${lookup(var.additional_databases[count.index], "collation", "")}" - instance = "${google_sql_database_instance.default.name}" - depends_on = ["google_sql_database_instance.default"] + count = length(var.additional_databases) + project = var.project_id + name = var.additional_databases[count.index]["name"] + charset = lookup(var.additional_databases[count.index], "charset", "") + collation = lookup(var.additional_databases[count.index], "collation", "") + instance = google_sql_database_instance.default.name + depends_on = [google_sql_database_instance.default] } resource "random_id" "user-password" { keepers = { - name = "${google_sql_database_instance.default.name}" + name = google_sql_database_instance.default.name } byte_length = 8 - depends_on = ["google_sql_database_instance.default"] + depends_on = [google_sql_database_instance.default] } resource "google_sql_user" "default" { - name = "${var.user_name}" - project = "${var.project_id}" - instance = "${google_sql_database_instance.default.name}" - password = "${var.user_password == "" ? random_id.user-password.hex : var.user_password}" - depends_on = ["google_sql_database_instance.default"] + name = var.user_name + project = var.project_id + instance = google_sql_database_instance.default.name + password = var.user_password == "" ? random_id.user-password.hex : var.user_password + depends_on = [google_sql_database_instance.default] } resource "google_sql_user" "additional_users" { - count = "${length(var.additional_users)}" - project = "${var.project_id}" - name = "${lookup(var.additional_users[count.index], "name")}" - password = "${lookup(var.additional_users[count.index], "password", random_id.user-password.hex)}" - instance = "${google_sql_database_instance.default.name}" - depends_on = ["google_sql_database_instance.default"] + count = length(var.additional_users) + project = var.project_id + name = var.additional_users[count.index]["name"] + password = lookup( + var.additional_users[count.index], + "password", + random_id.user-password.hex, + ) + instance = google_sql_database_instance.default.name + depends_on = [google_sql_database_instance.default] } + diff --git a/modules/postgresql/outputs.tf b/modules/postgresql/outputs.tf index 4b2808d6..9abeed12 100644 --- a/modules/postgresql/outputs.tf +++ b/modules/postgresql/outputs.tf @@ -1,5 +1,5 @@ /** - * Copyright 2018 Google LLC + * Copyright 2019 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,73 +16,74 @@ // Master output "instance_name" { - value = "${google_sql_database_instance.default.name}" + value = google_sql_database_instance.default.name description = "The instance name for the master instance" } output "instance_address" { - value = "${google_sql_database_instance.default.ip_address}" + value = google_sql_database_instance.default.ip_address description = "The IPv4 addesses assigned for the master instance" } output "instance_first_ip_address" { - value = "${google_sql_database_instance.default.first_ip_address}" + value = google_sql_database_instance.default.first_ip_address description = "The first IPv4 address of the addresses assigned." } output "instance_connection_name" { - value = "${google_sql_database_instance.default.connection_name}" + value = google_sql_database_instance.default.connection_name description = "The connection name of the master instance to be used in connection strings" } output "instance_self_link" { - value = "${google_sql_database_instance.default.self_link}" + value = google_sql_database_instance.default.self_link description = "The URI of the master instance" } output "instance_server_ca_cert" { - value = "${google_sql_database_instance.default.server_ca_cert}" + value = google_sql_database_instance.default.server_ca_cert description = "The CA certificate information used to connect to the SQL instance via SSL" } output "instance_service_account_email_address" { - value = "${google_sql_database_instance.default.service_account_email_address}" + value = google_sql_database_instance.default.service_account_email_address description = "The service account email address assigned to the master instance" } // Replicas output "replicas_instance_ip_addresses" { - value = ["${google_sql_database_instance.replicas.*.ip_address}"] + value = [google_sql_database_instance.replicas.*.ip_address] description = "The IPv4 addresses assigned for the replica instances" } output "replicas_instance_connection_names" { - value = ["${google_sql_database_instance.replicas.*.connection_name}"] + value = [google_sql_database_instance.replicas.*.connection_name] description = "The connection names of the replica instances to be used in connection strings" } output "replicas_instance_self_links" { - value = ["${google_sql_database_instance.replicas.*.self_link}"] + value = [google_sql_database_instance.replicas.*.self_link] description = "The URIs of the replica instances" } output "replicas_instance_server_ca_certs" { - value = ["${google_sql_database_instance.replicas.*.server_ca_cert}"] + value = [google_sql_database_instance.replicas.*.server_ca_cert] description = "The CA certificates information used to connect to the replica instances via SSL" } output "replicas_instance_service_account_email_addresses" { - value = ["${google_sql_database_instance.replicas.*.service_account_email_address}"] + value = [google_sql_database_instance.replicas.*.service_account_email_address] description = "The service account email addresses assigned to the replica instances" } output "read_replica_instance_names" { - value = "${google_sql_database_instance.replicas.*.name}" + value = google_sql_database_instance.replicas.*.name description = "The instance names for the read replica instances" } output "generated_user_password" { description = "The auto generated default user password if not input password was provided" - value = "${random_id.user-password.hex}" + value = random_id.user-password.hex sensitive = true } + diff --git a/modules/postgresql/read_replica.tf b/modules/postgresql/read_replica.tf index 6502bd14..b90c2752 100644 --- a/modules/postgresql/read_replica.tf +++ b/modules/postgresql/read_replica.tf @@ -1,5 +1,5 @@ /** - * Copyright 2018 Google LLC + * Copyright 2019 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,72 +15,119 @@ */ locals { - primary_zone = "${var.zone}" - read_replica_zones = ["${compact(split(",", var.read_replica_zones))}"] + primary_zone = var.zone + read_replica_zones = compact(split(",", var.read_replica_zones)) zone_mapping = { - enabled = ["${local.read_replica_zones}"] - disabled = "${list(local.primary_zone)}" + enabled = local.read_replica_zones + disabled = local.primary_zone } - zones_enabled = "${length(local.read_replica_zones) > 0}" - mod_by = "${local.zones_enabled ? length(local.read_replica_zones) : 1}" + zones_enabled = length(local.read_replica_zones) > 0 + mod_by = local.zones_enabled ? length(local.read_replica_zones) : 1 - zones = "${local.zone_mapping["${local.zones_enabled ? "enabled" : "disabled"}"]}" + zones = local.zone_mapping[local.zones_enabled ? "enabled" : "disabled"] - read_replica_ip_configuration_enabled = "${length(keys(var.read_replica_ip_configuration)) > 0 ? true : false}" + read_replica_ip_configuration_enabled = length(keys(var.read_replica_ip_configuration)) > 0 ? true : false read_replica_ip_configurations = { - enabled = "${var.read_replica_ip_configuration}" - disabled = "${map()}" + enabled = var.read_replica_ip_configuration + disabled = {} } } resource "google_sql_database_instance" "replicas" { - count = "${var.read_replica_size}" - project = "${var.project_id}" - name = "${var.name}-replica${var.read_replica_name_suffix}${count.index}" - database_version = "${var.database_version}" - region = "${var.region}" - master_instance_name = "${google_sql_database_instance.default.name}" - replica_configuration = ["${merge(var.read_replica_configuration, map("failover_target", false))}"] + count = var.read_replica_size + project = var.project_id + name = "${var.name}-replica${var.read_replica_name_suffix}${count.index}" + database_version = var.database_version + region = var.region + master_instance_name = google_sql_database_instance.default.name + dynamic "replica_configuration" { + for_each = [merge( + var.read_replica_configuration, + { + "failover_target" = false + }, + )] + content { + ca_certificate = lookup(replica_configuration.value, "ca_certificate", null) + client_certificate = lookup(replica_configuration.value, "client_certificate", null) + client_key = lookup(replica_configuration.value, "client_key", null) + connect_retry_interval = lookup(replica_configuration.value, "connect_retry_interval", null) + dump_file_path = lookup(replica_configuration.value, "dump_file_path", null) + failover_target = lookup(replica_configuration.value, "failover_target", null) + master_heartbeat_period = lookup(replica_configuration.value, "master_heartbeat_period", null) + password = lookup(replica_configuration.value, "password", null) + ssl_cipher = lookup(replica_configuration.value, "ssl_cipher", null) + username = lookup(replica_configuration.value, "username", null) + verify_server_certificate = lookup(replica_configuration.value, "verify_server_certificate", null) + } + } settings { - tier = "${var.read_replica_tier}" - activation_policy = "${var.read_replica_activation_policy}" - authorized_gae_applications = ["${var.authorized_gae_applications}"] - availability_type = "${var.read_replica_availability_type}" - ip_configuration = ["${local.read_replica_ip_configurations["${local.read_replica_ip_configuration_enabled ? "enabled" : "disabled"}"]}"] - - crash_safe_replication = "${var.read_replica_crash_safe_replication}" - disk_autoresize = "${var.read_replica_disk_autoresize}" - disk_size = "${var.read_replica_disk_size}" - disk_type = "${var.read_replica_disk_type}" - pricing_plan = "${var.read_replica_pricing_plan}" - replication_type = "${var.read_replica_replication_type}" - user_labels = "${var.read_replica_user_labels}" - database_flags = ["${var.read_replica_database_flags}"] + tier = var.read_replica_tier + activation_policy = var.read_replica_activation_policy + authorized_gae_applications = var.authorized_gae_applications + availability_type = var.read_replica_availability_type + dynamic "ip_configuration" { + for_each = [local.read_replica_ip_configurations[local.read_replica_ip_configuration_enabled ? "enabled" : "disabled"]] + content { + ipv4_enabled = lookup(ip_configuration.value, "ipv4_enabled", null) + private_network = lookup(ip_configuration.value, "private_network", null) + require_ssl = lookup(ip_configuration.value, "require_ssl", null) + + dynamic "authorized_networks" { + for_each = lookup(ip_configuration.value, "authorized_networks", []) + content { + expiration_time = lookup(authorized_networks.value, "expiration_time", null) + name = lookup(authorized_networks.value, "name", null) + value = lookup(authorized_networks.value, "value", null) + } + } + } + } + + crash_safe_replication = var.read_replica_crash_safe_replication + disk_autoresize = var.read_replica_disk_autoresize + disk_size = var.read_replica_disk_size + disk_type = var.read_replica_disk_type + pricing_plan = var.read_replica_pricing_plan + replication_type = var.read_replica_replication_type + user_labels = var.read_replica_user_labels + dynamic "database_flags" { + for_each = var.read_replica_database_flags + content { + name = lookup(database_flags.value, "name", null) + value = lookup(database_flags.value, "value", null) + } + } location_preference { - zone = "${length(local.zones) == 0 ? "" : "${var.region}-${local.zones[count.index % local.mod_by]}"}" + zone = length(local.zones) == 0 ? "" : "${var.region}-${local.zones[count.index % local.mod_by]}" } maintenance_window { - day = "${var.read_replica_maintenance_window_day}" - hour = "${var.read_replica_maintenance_window_hour}" - update_track = "${var.read_replica_maintenance_window_update_track}" + day = var.read_replica_maintenance_window_day + hour = var.read_replica_maintenance_window_hour + update_track = var.read_replica_maintenance_window_update_track } } - depends_on = ["google_sql_database_instance.default"] + depends_on = [google_sql_database_instance.default] + lifecycle { - ignore_changes = ["disk_size"] + ignore_changes = [ + "settings[0].disk_size" + ] } + timeouts { - create = "${var.create_timeout}" - update = "${var.update_timeout}" - delete = "${var.delete_timeout}" + create = var.create_timeout + update = var.update_timeout + delete = var.delete_timeout } } + diff --git a/modules/postgresql/variables.tf b/modules/postgresql/variables.tf index 66f29d71..24923caa 100644 --- a/modules/postgresql/variables.tf +++ b/modules/postgresql/variables.tf @@ -1,5 +1,5 @@ /** - * Copyright 2018 Google LLC + * Copyright 2019 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,20 +15,24 @@ */ variable "project_id" { + type = string description = "The project ID to manage the Cloud SQL resources" } variable "name" { + type = string description = "The name of the Cloud SQL resources" } // required variable "database_version" { description = "The database version to use" + type = string } // required variable "region" { + type = string description = "The region of the Cloud SQL resources" default = "us-central1" } @@ -39,21 +43,25 @@ variable "tier" { } variable "zone" { + type = string description = "The zone for the master instance, it should be something like: `a`, `c`." } variable "activation_policy" { description = "The activation policy for the master instance.Can be either `ALWAYS`, `NEVER` or `ON_DEMAND`." + type = string default = "ALWAYS" } variable "availability_type" { description = "The availability type for the master instance.This is only used to set up high availability for the PostgreSQL instance. Can be either `ZONAL` or `REGIONAL`." + type = string default = "ZONAL" } variable "disk_autoresize" { description = "Configuration to increase storage size." + type = bool default = true } @@ -64,60 +72,80 @@ variable "disk_size" { variable "disk_type" { description = "The disk type for the master instance." + type = string default = "PD_SSD" } variable "pricing_plan" { description = "The pricing plan for the master instance." + type = string default = "PER_USE" } variable "maintenance_window_day" { description = "The day of week (1-7) for the master instance maintenance." + type = number default = 1 } variable "maintenance_window_hour" { description = "The hour of day (0-23) maintenance window for the master instance maintenance." + type = number default = 23 } variable "maintenance_window_update_track" { description = "The update track of maintenance window for the master instance maintenance.Can be either `canary` or `stable`." + type = string default = "canary" } variable "database_flags" { description = "The database flags for the master instance. See [more details](https://cloud.google.com/sql/docs/mysql/flags)" + type = list(map(string)) default = [] } variable "user_labels" { description = "The key/value labels for the master instances." + type = map(string) default = {} } variable "backup_configuration" { - default = {} - - description = <&2 + fi + return "${rval}" } # This function makes sure that the required files for @@ -62,81 +63,110 @@ function basefiles() { done } -# This function runs the hadolint linter on -# every file named 'Dockerfile' -function docker() { - echo "Running hadolint on Dockerfiles" - find_files . -name "Dockerfile" -print0 \ - | compat_xargs -0 hadolint +function check_bash() { +find . -name "*.sh" | while IFS= read -d '' -r file; +do + if [[ "$file" != *"bash -e"* ]]; + then + echo "$file is missing shebang with -e"; + exit 1; + fi; +done; +} + +# This function makes sure that the required files for +# releasing to OSS are present +function basefiles() { + echo "Checking for required files" + test -f LICENSE || echo "Missing LICENSE" + test -f README.md || echo "Missing README.md" } # This function runs 'terraform validate' against all -# directory paths which contain *.tf files. +# files ending in '.tf' + function check_terraform() { + local rval=125 + # fmt is before validate for faster feedback, validate requires terraform + # init which takes time. + echo "Running terraform fmt" + find_files . -name "*.tf" -exec terraform fmt -check=true -write=false {} \; + rval="$?" + if [[ "${rval}" -gt 0 ]]; then + echo "Error: terraform fmt failed with exit code ${rval}" >&2 + echo "Check the output for diffs and correct using terraform fmt " >&2 + return "${rval}" + fi echo "Running terraform validate" - find_files . -name "*.tf" -print0 \ - | compat_xargs -0 -n1 dirname \ - | sort -u \ - | grep -xv './test/fixtures/shared' \ - | compat_xargs -t -n1 terraform validate --check-variables=false + local DIRS_TF="" + local BASEPATH="" + BASEPATH="$(pwd)" + DIRS_TF=$(find_files . -not -path "./test/fixtures/shared/*" -name "*.tf" -print0 | compat_xargs -0 -n1 dirname | sort -u) + for DIR_TF in $DIRS_TF + do + # shellcheck disable=SC2164 + cd "$DIR_TF" + terraform init && terraform validate && rm -rf .terraform + # shellcheck disable=SC2164 + cd "$BASEPATH" + done } # This function runs 'go fmt' and 'go vet' on every file # that ends in '.go' function golang() { echo "Running go fmt and go vet" - find_files . -name "*.go" -print0 | compat_xargs -0 -n1 go fmt - find_files . -name "*.go" -print0 | compat_xargs -0 -n1 go vet + find . -name "*.go" -exec go fmt {} \; + find . -name "*.go" -exec go vet {} \; } # This function runs the flake8 linter on every file # ending in '.py' function check_python() { echo "Running flake8" - find_files . -name "*.py" -print0 | compat_xargs -0 flake8 - return 0 + find . -name "*.py" -exec flake8 {} \; } # This function runs the shellcheck linter on every # file ending in '.sh' function check_shell() { echo "Running shellcheck" - find_files . -name "*.sh" -print0 | compat_xargs -0 shellcheck -x + find . -name "*.sh" -exec shellcheck -x {} \; } # This function makes sure that there is no trailing whitespace # in any files in the project. # There are some exclusions function check_trailing_whitespace() { - local rc - echo "Checking for trailing whitespace" - find_files . -print \ - | grep -v -E '\.(pyc|png)$' \ - | compat_xargs grep -H -n '[[:blank:]]$' + echo "The following lines have trailing whitespace" + grep -r '[[:blank:]]$' --exclude-dir=".terraform" --exclude-dir=".kitchen" --exclude="*.png" --exclude="*.pyc" --exclude-dir=".git" . rc=$? - if [[ ${rc} -eq 0 ]]; then - return 1 + if [ $rc = 0 ]; then + exit 1 fi } function generate_docs() { echo "Generating markdown docs with terraform-docs" - local path tmpfile - while read -r path; do - if [[ -e "${path}/README.md" ]]; then - # shellcheck disable=SC2119 - tmpfile="$(maketemp)" - echo "terraform-docs markdown ${path}" - terraform-docs markdown "${path}" > "${tmpfile}" - helpers/combine_docfiles.py "${path}"/README.md "${tmpfile}" + local pth helper_dir rval + helper_dir="$(pwd)/helpers" + while read -r pth; do + if [[ -e "${pth}/README.md" ]]; then + (cd "${pth}" || return 3; "${helper_dir}"/terraform_docs .;) + rval="$?" + if [[ "${rval}" -gt 0 ]]; then + echo "Error: terraform_docs in ${pth} exit code: ${rval}" >&2 + return "${rval}" + fi else - echo "Skipping ${path} because README.md does not exist." + echo "Skipping ${pth} because README.md does not exist." fi done < <(find_files . -name '*.tf' -print0 \ | compat_xargs -0 -n1 dirname \ | sort -u) } + function prepare_test_variables() { echo "Preparing terraform.tfvars files for integration tests" #shellcheck disable=2044 @@ -152,6 +182,6 @@ function prepare_test_variables() { function check_headers() { echo "Checking file headers" # Use the exclusion behavior of find_files - find_files . -type f -print0 \ - | compat_xargs -0 python test/verify_boilerplate.py + find_files . -print0 \ + | compat_xargs -0 test/verify_boilerplate.py } diff --git a/test/test.sh b/test/test.sh deleted file mode 100644 index 0fcb1601..00000000 --- a/test/test.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -e - -bundle install -bundle exec kitchen create -bundle exec kitchen converge -bundle exec kitchen verify -bundle exec kitchen destroy diff --git a/test/test_verify_boilerplate.py b/test/test_verify_boilerplate.py old mode 100644 new mode 100755 index dd870ba5..1fa3686e --- a/test/test_verify_boilerplate.py +++ b/test/test_verify_boilerplate.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,10 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Please note that this file was generated from -# [terraform-google-module-template](https://github.com/terraform-google-modules/terraform-google-module-template). -# Please make sure to contribute relevant changes upstream! - ''' A simple test for the verify_boilerplate python script. This will create a set of test files, both valid and invalid, and confirm that the has_valid_header call returns the correct @@ -83,8 +79,8 @@ def create_test_files(self, tmp_path, extension, header): # Invalid test cases for non-*file files (.tf|.py|.sh|.yaml|.xml..) invalid_header = [] for line in header_template: - if "2018" in line: - invalid_header.append(line.replace('2018', 'YEAR')) + if "2019" in line: + invalid_header.append(line.replace('2019', 'YEAR')) else: invalid_header.append(line) invalid_header.append(content) diff --git a/test/verify_boilerplate.py b/test/verify_boilerplate.py old mode 100644 new mode 100755 index 21bc83f3..044321f7 --- a/test/verify_boilerplate.py +++ b/test/verify_boilerplate.py @@ -1,6 +1,6 @@ #!/usr/bin/env python -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -18,10 +18,6 @@ # This is based on existing work # https://github.com/kubernetes/test-infra/blob/master/hack # /verify_boilerplate.py - -# Please note that this file was generated from -# [terraform-google-module-template](https://github.com/terraform-google-modules/terraform-google-module-template). -# Please make sure to contribute relevant changes upstream! from __future__ import print_function import argparse import glob @@ -246,7 +242,7 @@ def get_regexs(): regexs["year"] = re.compile('YEAR') # dates can be 2014, 2015, 2016 or 2017, company holder names can be # anything - regexs["date"] = re.compile('(2014|2015|2016|2017|2018)') + regexs["date"] = re.compile('(2014|2015|2016|2017|2018|2019)') # strip // +build \n\n build constraints regexs["go_build_constraints"] = re.compile(r"^(// \+build.*\n)+\n", re.MULTILINE) diff --git a/variables.tf b/variables.tf index 0f40bd29..db79a796 100644 --- a/variables.tf +++ b/variables.tf @@ -1,11 +1,11 @@ -/* - * Copyright 2017 Google Inc. +/** + * Copyright 2019 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -14,137 +14,182 @@ * limitations under the License. */ -variable project { +variable "project" { description = "The project ID to deploy to, if not set the default provider project is used." + type = string default = "" } -variable region { +variable "region" { description = "Region for cloud resources" + type = string default = "us-central1" } -variable name { +variable "name" { description = "Name for the database instance. Must be unique and cannot be reused for up to one week." + type = string } -variable database_version { +variable "database_version" { description = "The version of of the database. For example, `MYSQL_5_6` or `POSTGRES_9_6`." + type = string default = "MYSQL_5_6" } -variable master_instance_name { +variable "master_instance_name" { description = "The name of the master instance to replicate" + type = string default = "" } -variable tier { +variable "tier" { description = "The machine tier (First Generation) or type (Second Generation). See this page for supported tiers and pricing: https://cloud.google.com/sql/pricing" + type = string default = "db-f1-micro" } -variable db_name { +variable "db_name" { description = "Name of the default database to create" + type = string default = "default" } -variable db_charset { +variable "db_charset" { description = "The charset for the default database" + type = string default = "" } -variable db_collation { +variable "db_collation" { description = "The collation for the default database. Example for MySQL databases: 'utf8_general_ci', and Postgres: 'en_US.UTF8'" + type = string default = "" } -variable user_name { +variable "user_name" { description = "The name of the default user" + type = string default = "default" } -variable user_host { +variable "user_host" { description = "The host for the default user" + type = string default = "%" } -variable user_password { +variable "user_password" { description = "The password for the default user. If not set, a random one will be generated and available in the generated_user_password output variable." + type = string default = "" } -variable activation_policy { +variable "activation_policy" { description = "This specifies when the instance should be active. Can be either `ALWAYS`, `NEVER` or `ON_DEMAND`." + type = string default = "ALWAYS" } -variable authorized_gae_applications { +variable "authorized_gae_applications" { description = "A list of Google App Engine (GAE) project names that are allowed to access this instance." - type = "list" + type = list(string) default = [] } -variable disk_autoresize { +variable "disk_autoresize" { description = "Second Generation only. Configuration to increase storage size automatically." + type = bool default = true } -variable disk_size { +variable "disk_size" { description = "Second generation only. The size of data disk, in GB. Size of a running instance cannot be reduced but can be increased." + type = number default = 10 } -variable disk_type { +variable "disk_type" { description = "Second generation only. The type of data disk: `PD_SSD` or `PD_HDD`." + type = string default = "PD_SSD" } -variable pricing_plan { +variable "pricing_plan" { description = "First generation only. Pricing plan for this instance, can be one of `PER_USE` or `PACKAGE`." + type = string default = "PER_USE" } -variable replication_type { +variable "replication_type" { description = "Replication type for this instance, can be one of `ASYNCHRONOUS` or `SYNCHRONOUS`." + type = string default = "SYNCHRONOUS" } -variable database_flags { +variable "database_flags" { description = "List of Cloud SQL flags that are applied to the database server" + type = list(map(string)) default = [] } -variable backup_configuration { +variable "backup_configuration" { description = "The backup_configuration settings subblock for the database setings" - type = "map" - default = {} -} - -variable ip_configuration { + type = object({ + binary_log_enabled = bool + enabled = bool + start_time = string + }) + default = { + binary_log_enabled = false + enabled = false + start_time = "" + } +} + +variable "ip_configuration" { description = "The ip_configuration settings subblock" - type = "list" - default = [{}] -} - -variable location_preference { + type = object({ + authorized_networks = list(map(string)) + ipv4_enabled = bool + private_network = string + require_ssl = bool + }) + default = { + authorized_networks = [] + ipv4_enabled = "true" + private_network = "true" + require_ssl = "false" + } +} + +variable "location_preference" { description = "The location_preference settings subblock" - type = "list" - default = [] + type = map(string) + default = {} } -variable maintenance_window { +variable "maintenance_window" { description = "The maintenance_window settings subblock" - type = "list" - default = [] + type = object({ + day = number + hour = number + update_track = string + }) } -variable replica_configuration { +variable "replica_configuration" { description = "The optional replica_configuration block for the database instance" - type = "list" - default = [] + type = object({ + connect_retry_interval = number + dump_file_path = string + }) + default = null } -variable availability_type { +variable "availability_type" { description = "This specifies whether a PostgreSQL instance should be set up for high availability (REGIONAL) or single zone (ZONAL)." + type = string default = "ZONAL" } + diff --git a/versions.tf b/versions.tf new file mode 100644 index 00000000..29704272 --- /dev/null +++ b/versions.tf @@ -0,0 +1,19 @@ +/** + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +terraform { + required_version = ">= 0.12" +}