Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Refactor GKE module for Terraform 1.3 #868

Merged
merged 10 commits into from
Oct 10, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 8 additions & 10 deletions blueprints/gke/binauthz/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -83,21 +83,19 @@ module "nat" {
}

module "cluster" {
source = "../../../modules/gke-cluster"
project_id = module.project.project_id
name = "${local.prefix}cluster"
location = var.zone
network = module.vpc.self_link
subnetwork = module.vpc.subnet_self_links["${var.region}/subnet"]
secondary_range_pods = "pods"
secondary_range_services = "services"
source = "../../../modules/gke-cluster"
project_id = module.project.project_id
name = "${local.prefix}cluster"
location = var.zone
vpc_config = {
network = module.vpc.self_link
subnetwork = module.vpc.subnet_self_links["${var.region}/subnet"]
}
private_cluster_config = {
enable_private_nodes = true
enable_private_endpoint = false
master_ipv4_cidr_block = var.master_cidr_block
master_global_access = false
}
workload_identity = true
}

module "cluster_nodepool" {
Expand Down
35 changes: 16 additions & 19 deletions blueprints/gke/multi-cluster-mesh-gke-fleet-api/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -133,30 +133,27 @@ module "mgmt_server" {
}

module "clusters" {
for_each = var.clusters_config
source = "../../../modules/gke-cluster"
project_id = module.fleet_project.project_id
name = each.key
location = var.region
network = module.svpc.self_link
subnetwork = module.svpc.subnet_self_links["${var.region}/subnet-${each.key}"]
secondary_range_pods = "pods"
secondary_range_services = "services"
for_each = var.clusters_config
source = "../../../modules/gke-cluster"
project_id = module.fleet_project.project_id
name = each.key
location = var.region
vpc_config = {
network = module.svpc.self_link
subnetwork = module.svpc.subnet_self_links["${var.region}/subnet-${each.key}"]
master_authorized_ranges = merge({
mgmt : var.mgmt_subnet_cidr_block
},
{ for key, config in var.clusters_config :
"pods-${key}" => config.pods_cidr_block if key != each.key
})
}
private_cluster_config = {
enable_private_nodes = true
enable_private_endpoint = true
master_ipv4_cidr_block = each.value.master_cidr_block
master_global_access = true
}
master_authorized_ranges = merge({
mgmt : var.mgmt_subnet_cidr_block
},
{ for key, config in var.clusters_config :
"pods-${key}" => config.pods_cidr_block if key != each.key
})
enable_autopilot = false
release_channel = "REGULAR"
workload_identity = true
release_channel = "REGULAR"
labels = {
mesh_id = "proj-${module.fleet_project.number}"
}
Expand Down
138 changes: 57 additions & 81 deletions blueprints/gke/multitenant-fleet/gke-clusters.tf
Original file line number Diff line number Diff line change
Expand Up @@ -24,93 +24,69 @@ locals {
}

module "gke-cluster" {
source = "../../../modules/gke-cluster"
for_each = local.clusters
name = each.key
project_id = module.gke-project-0.project_id
description = each.value.description
location = each.value.location
network = var.vpc_config.vpc_self_link
subnetwork = each.value.net.subnet
secondary_range_pods = each.value.net.pods
secondary_range_services = each.value.net.services
labels = each.value.labels
addons = {
cloudrun_config = each.value.overrides.cloudrun_config
dns_cache_config = true
http_load_balancing = true
gce_persistent_disk_csi_driver_config = true
horizontal_pod_autoscaling = true
config_connector_config = true
kalm_config = false
gcp_filestore_csi_driver_config = each.value.overrides.gcp_filestore_csi_driver_config
gke_backup_agent_config = false
# enable only if enable_dataplane_v2 is changed to false below
network_policy_config = false
istio_config = {
enabled = false
tls = false
source = "../../../modules/gke-cluster"
for_each = local.clusters
name = each.key
project_id = module.gke-project-0.project_id
description = each.value.description
location = each.value.location
vpc_config = {
network = var.vpc_config.vpc_self_link
subnetwork = each.value.net.subnet
secondary_range_names = {
pods = each.value.net.pods
services = each.value.net.services
}
master_authorized_ranges = each.value.overrides.master_authorized_ranges
}
labels = each.value.labels
enable_addons = {
cloudrun = each.value.overrides.cloudrun_config
config_connector = true
dns_cache = true
gce_persistent_disk_csi_driver = true
gcp_filestore_csi_driver = each.value.overrides.gcp_filestore_csi_driver_config
gke_backup_agent = false
horizontal_pod_autoscaling = true
http_load_balancing = true
}
enable_features = {
cloud_dns = var.dns_domain == null ? null : {
cluster_dns = "CLOUD_DNS"
cluster_dns_scope = "VPC_SCOPE"
cluster_dns_domain = "${each.key}.${var.dns_domain}"
}
database_encryption = (
each.value.overrides.database_encryption_key == null
? null
: {
state = "ENCRYPTED"
key_name = each.value.overrides.database_encryption_key
}
)
dataplane_v2 = true
groups_for_rbac = var.authenticator_security_group
intranode_visibility = true
pod_security_policy = each.value.overrides.pod_security_policy
resource_usage_export = {
dataset = module.gke-dataset-resource-usage.dataset_id
}
shielded_nodes = true
vertical_pod_autoscaling = each.value.overrides.vertical_pod_autoscaling
workload_identity = true
}
# change these here for all clusters if absolutely needed
authenticator_security_group = var.authenticator_security_group
enable_dataplane_v2 = true
enable_l4_ilb_subsetting = false
enable_intranode_visibility = true
enable_shielded_nodes = true
workload_identity = true
private_cluster_config = {
enable_private_nodes = true
enable_private_endpoint = false
enable_private_endpoint = true
master_ipv4_cidr_block = each.value.net.master_range
master_global_access = true
}
dns_config = each.value.dns_domain == null ? null : {
cluster_dns = "CLOUD_DNS"
cluster_dns_scope = "VPC_SCOPE"
cluster_dns_domain = "${each.key}.${var.dns_domain}"
peering_config = var.peering_config == null ? null : {
export_routes = var.peering_config.export_routes
import_routes = var.peering_config.import_routes
project_id = var.vpc_config.host_project_id
}
}
logging_config = ["SYSTEM_COMPONENTS", "WORKLOADS"]
monitoring_config = ["SYSTEM_COMPONENTS", "WORKLOADS"]

peering_config = var.peering_config == null ? null : {
export_routes = var.peering_config.export_routes
import_routes = var.peering_config.import_routes
project_id = var.vpc_config.host_project_id
}
resource_usage_export_config = {
enabled = true
dataset = module.gke-dataset-resource-usage.dataset_id
}
# TODO: the attributes below are "primed" from project-level defaults
# in locals, merge defaults with cluster-level stuff
# TODO(jccb): change fabric module
database_encryption = (
each.value.overrides.database_encryption_key == null
? {
enabled = false
state = null
key_name = null
}
: {
enabled = true
state = "ENCRYPTED"
key_name = each.value.overrides.database_encryption_key
}
)
default_max_pods_per_node = each.value.overrides.max_pods_per_node
master_authorized_ranges = each.value.overrides.master_authorized_ranges
pod_security_policy = each.value.overrides.pod_security_policy
release_channel = each.value.overrides.release_channel
vertical_pod_autoscaling = each.value.overrides.vertical_pod_autoscaling
# dynamic "cluster_autoscaling" {
# for_each = each.value.cluster_autoscaling == null ? {} : { 1 = 1 }
# content {
# enabled = true
# cpu_min = each.value.cluster_autoscaling.cpu_min
# cpu_max = each.value.cluster_autoscaling.cpu_max
# memory_min = each.value.cluster_autoscaling.memory_min
# memory_max = each.value.cluster_autoscaling.memory_max
# }
# }
max_pods_per_node = each.value.overrides.max_pods_per_node
release_channel = each.value.overrides.release_channel
}
34 changes: 16 additions & 18 deletions blueprints/networking/hub-and-spoke-peering/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -237,31 +237,29 @@ module "service-account-gce" {
################################################################################

module "cluster-1" {
source = "../../../modules/gke-cluster"
name = "${local.prefix}cluster-1"
project_id = module.project.project_id
location = "${var.region}-b"
network = module.vpc-spoke-2.self_link
subnetwork = module.vpc-spoke-2.subnet_self_links["${var.region}/${local.prefix}spoke-2-1"]
secondary_range_pods = "pods"
secondary_range_services = "services"
default_max_pods_per_node = 32
source = "../../../modules/gke-cluster"
name = "${local.prefix}cluster-1"
project_id = module.project.project_id
location = "${var.region}-b"
vpc_config = {
network = module.vpc-spoke-2.self_link
subnetwork = module.vpc-spoke-2.subnet_self_links["${var.region}/${local.prefix}spoke-2-1"]
master_authorized_ranges = {
for name, range in var.ip_ranges : name => range
}
}
max_pods_per_node = 32
labels = {
environment = "test"
}
master_authorized_ranges = {
for name, range in var.ip_ranges : name => range
}
private_cluster_config = {
enable_private_nodes = true
enable_private_endpoint = true
master_ipv4_cidr_block = var.private_service_ranges.spoke-2-cluster-1
master_global_access = true
}
peering_config = {
export_routes = true
import_routes = false
project_id = null
peering_config = {
export_routes = true
import_routes = false
}
}
}

Expand Down
31 changes: 15 additions & 16 deletions blueprints/networking/shared-vpc-gke/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -196,28 +196,27 @@ module "vm-bastion" {
################################################################################

module "cluster-1" {
source = "../../../modules/gke-cluster"
count = var.cluster_create ? 1 : 0
name = "cluster-1"
project_id = module.project-svc-gke.project_id
location = "${var.region}-b"
network = module.vpc-shared.self_link
subnetwork = module.vpc-shared.subnet_self_links["${var.region}/gke"]
secondary_range_pods = "pods"
secondary_range_services = "services"
default_max_pods_per_node = 32
labels = {
environment = "test"
}
master_authorized_ranges = {
internal-vms = var.ip_ranges.gce
source = "../../../modules/gke-cluster"
count = var.cluster_create ? 1 : 0
name = "cluster-1"
project_id = module.project-svc-gke.project_id
location = "${var.region}-b"
vpc_config = {
network = module.vpc-shared.self_link
subnetwork = module.vpc-shared.subnet_self_links["${var.region}/gke"]
master_authorized_ranges = {
internal-vms = var.ip_ranges.gce
}
}
max_pods_per_node = 32
private_cluster_config = {
enable_private_nodes = true
enable_private_endpoint = true
master_ipv4_cidr_block = var.private_service_ranges.cluster-1
master_global_access = true
}
labels = {
environment = "test"
}
}

module "cluster-1-nodepool-1" {
Expand Down
Loading