This repository has been archived by the owner on Jun 12, 2023. It is now read-only.
forked from cloudposse/terraform-aws-eks-node-group
-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathmain.tf
177 lines (139 loc) · 5.55 KB
/
main.tf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
provider "aws" {
region = var.region
}
module "label" {
source = "cloudposse/label/null"
version = "0.25.0"
# This is the preferred way to add attributes. It will put "cluster" last
# after any attributes set in `var.attributes` or `context.attributes`.
# In this case, we do not care, because we are only using this instance
# of this module to create tags.
attributes = ["cluster"]
context = module.this.context
}
locals {
# The usage of the specific kubernetes.io/cluster/* resource tags below are required
# for EKS and Kubernetes to discover and manage networking resources
# https://aws.amazon.com/premiumsupport/knowledge-center/eks-vpc-subnet-discovery/
# https://github.com/kubernetes-sigs/aws-load-balancer-controller/blob/main/docs/deploy/subnet_discovery.md
tags = { "kubernetes.io/cluster/${module.label.id}" = "shared" }
# required tags to make ALB ingress work https://docs.aws.amazon.com/eks/latest/userguide/alb-ingress.html
public_subnets_additional_tags = {
"kubernetes.io/role/elb" : 1
}
private_subnets_additional_tags = {
"kubernetes.io/role/internal-elb" : 1
}
allow_all_ingress_rule = {
key = "allow_all_ingress"
type = "ingress"
from_port = 0
to_port = 0 # [sic] from and to port ignored when protocol is "-1", warning if not zero
protocol = "-1"
description = "Allow all ingress"
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
}
allow_http_ingress_rule = {
key = "http"
type = "ingress"
from_port = 80
to_port = 80
protocol = "tcp"
description = "Allow HTTP ingress"
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
}
extra_policy_arn = "arn:aws:iam::aws:policy/job-function/ViewOnlyAccess"
}
module "vpc" {
source = "cloudposse/vpc/aws"
version = "1.1.0"
cidr_block = var.vpc_cidr_block
tags = local.tags
context = module.this.context
}
module "subnets" {
source = "cloudposse/dynamic-subnets/aws"
version = "2.0.2"
availability_zones = var.availability_zones
vpc_id = module.vpc.vpc_id
igw_id = [module.vpc.igw_id]
ipv4_cidr_block = [module.vpc.vpc_cidr_block]
max_nats = 1
nat_gateway_enabled = true
nat_instance_enabled = false
tags = local.tags
context = module.this.context
}
module "ssh_source_access" {
source = "cloudposse/security-group/aws"
version = "0.4.3"
attributes = ["ssh", "source"]
security_group_description = "Test source security group ssh access only"
create_before_destroy = true
allow_all_egress = true
rules = [local.allow_all_ingress_rule]
# rules_map = { ssh_source = [local.allow_all_ingress_rule] }
vpc_id = module.vpc.vpc_id
context = module.label.context
}
module "https_sg" {
source = "cloudposse/security-group/aws"
version = "0.4.3"
attributes = ["http"]
security_group_description = "Allow http access"
create_before_destroy = true
allow_all_egress = true
rules = [local.allow_http_ingress_rule]
vpc_id = module.vpc.vpc_id
context = module.label.context
}
module "eks_cluster" {
source = "cloudposse/eks-cluster/aws"
version = "2.2.0"
region = var.region
vpc_id = module.vpc.vpc_id
subnet_ids = module.subnets.public_subnet_ids
kubernetes_version = var.kubernetes_version
local_exec_interpreter = var.local_exec_interpreter
oidc_provider_enabled = var.oidc_provider_enabled
enabled_cluster_log_types = var.enabled_cluster_log_types
cluster_log_retention_period = var.cluster_log_retention_period
# data auth has problems destroying the auth-map
kube_data_auth_enabled = false
kube_exec_auth_enabled = true
context = module.this.context
}
module "eks_node_group" {
source = "../../"
subnet_ids = module.this.enabled ? module.subnets.public_subnet_ids : ["filler_string_for_enabled_is_false"]
cluster_name = module.eks_cluster.eks_cluster_id
instance_types = var.instance_types
desired_size = var.desired_size
min_size = var.min_size
max_size = var.max_size
kubernetes_version = [var.kubernetes_version]
kubernetes_labels = merge(var.kubernetes_labels, { attributes = coalesce(join(module.this.delimiter, module.this.attributes), "none") })
kubernetes_taints = var.kubernetes_taints
# disk_size = var.disk_size
ec2_ssh_key_name = var.ec2_ssh_key_name
ssh_access_security_group_ids = [module.ssh_source_access.id]
associated_security_group_ids = [module.ssh_source_access.id, module.https_sg.id]
node_role_policy_arns = [local.extra_policy_arn]
update_config = var.update_config
after_cluster_joining_userdata = var.after_cluster_joining_userdata
ami_type = var.ami_type
ami_release_version = var.ami_release_version
before_cluster_joining_userdata = [var.before_cluster_joining_userdata]
context = module.this.context
# Ensure ordering of resource creation to eliminate the race conditions when applying the Kubernetes Auth ConfigMap.
# Do not create Node Group before the EKS cluster is created and the `aws-auth` Kubernetes ConfigMap is applied.
depends_on = [module.eks_cluster, module.eks_cluster.kubernetes_config_map_id]
create_before_destroy = true
node_group_terraform_timeouts = [{
create = "40m"
update = null
delete = "20m"
}]
}