| no |
-| [workers\_group\_defaults](#input\_workers\_group\_defaults) | Override default values for target groups. See workers\_group\_defaults\_defaults in local.tf for valid keys. | `any` | `{}` | no |
| [workers\_role\_name](#input\_workers\_role\_name) | User defined workers role name. | `string` | `""` | no |
| [write\_kubeconfig](#input\_write\_kubeconfig) | Whether to write a Kubectl config file containing the cluster configuration. Saved to `kubeconfig_output_path`. | `bool` | `true` | no |
@@ -296,8 +295,6 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
| [worker\_security\_group\_id](#output\_worker\_security\_group\_id) | Security group ID attached to the EKS workers. |
| [workers\_asg\_arns](#output\_workers\_asg\_arns) | IDs of the autoscaling groups containing workers. |
| [workers\_asg\_names](#output\_workers\_asg\_names) | Names of the autoscaling groups containing workers. |
-| [workers\_default\_ami\_id](#output\_workers\_default\_ami\_id) | ID of the default worker group AMI |
-| [workers\_default\_ami\_id\_windows](#output\_workers\_default\_ami\_id\_windows) | ID of the default Windows worker group AMI |
| [workers\_launch\_template\_arns](#output\_workers\_launch\_template\_arns) | ARNs of the worker launch templates. |
| [workers\_launch\_template\_ids](#output\_workers\_launch\_template\_ids) | IDs of the worker launch templates. |
| [workers\_launch\_template\_latest\_versions](#output\_workers\_launch\_template\_latest\_versions) | Latest versions of the worker launch templates. |
diff --git a/aws_auth.tf b/aws_auth.tf
index 864db3abd5..2eeed22cde 100644
--- a/aws_auth.tf
+++ b/aws_auth.tf
@@ -9,11 +9,7 @@ locals {
),
index,
)}"
- platform = lookup(
- var.worker_groups[index],
- "platform",
- local.workers_group_defaults["platform"]
- )
+ platform = lookup(var.worker_groups[index], "platform", var.default_platform)
}
]
diff --git a/data.tf b/data.tf
index bf710f4e07..3edd0149b8 100644
--- a/data.tf
+++ b/data.tf
@@ -75,7 +75,7 @@ data "aws_iam_instance_profile" "custom_worker_group_iam_instance_profile" {
name = lookup(
var.worker_groups[count.index],
"iam_instance_profile_name",
- local.workers_group_defaults["iam_instance_profile_name"],
+ null # TODO need default
)
}
diff --git a/locals.tf b/locals.tf
index d100fd46e2..227fb3756c 100644
--- a/locals.tf
+++ b/locals.tf
@@ -16,11 +16,8 @@ locals {
# Worker groups
worker_security_group_id = var.worker_create_security_group ? join("", aws_security_group.workers.*.id) : var.worker_security_group_id
- default_iam_role_id = concat(aws_iam_role.workers.*.id, [""])[0]
- default_ami_id_linux = local.workers_group_defaults.ami_id != "" ? local.workers_group_defaults.ami_id : concat(data.aws_ami.eks_worker.*.id, [""])[0]
- default_ami_id_windows = local.workers_group_defaults.ami_id_windows != "" ? local.workers_group_defaults.ami_id_windows : concat(data.aws_ami.eks_worker_windows.*.id, [""])[0]
-
- worker_groups_platforms = [for x in var.worker_groups : try(x.platform, var.workers_group_defaults["platform"], var.default_platform)]
+ default_iam_role_id = concat(aws_iam_role.workers.*.id, [""])[0]
+ worker_groups_platforms = [for x in var.worker_groups : try(x.platform, var.default_platform)]
worker_ami_name_filter = coalesce(var.worker_ami_name_filter, "amazon-eks-node-${coalesce(var.cluster_version, "cluster_version")}-v*")
worker_ami_name_filter_windows = coalesce(var.worker_ami_name_filter_windows, "Windows_Server-2019-English-Core-EKS_Optimized-${coalesce(var.cluster_version, "cluster_version")}-*")
@@ -30,135 +27,6 @@ locals {
client_id_list = distinct(compact(concat([local.sts_principal], var.openid_connect_audiences)))
policy_arn_prefix = "arn:${data.aws_partition.current.partition}:iam::aws:policy"
- workers_group_defaults_defaults = {
- name = "count.index" # Name of the worker group. Literal count.index will never be used but if name is not set, the count.index interpolation will be used.
- tags = [] # A list of maps defining extra tags to be applied to the worker group autoscaling group and volumes.
- ami_id = "" # AMI ID for the eks linux based workers. If none is provided, Terraform will search for the latest version of their EKS optimized worker AMI based on platform.
- ami_id_windows = "" # AMI ID for the eks windows based workers. If none is provided, Terraform will search for the latest version of their EKS optimized worker AMI based on platform.
- asg_desired_capacity = "1" # Desired worker capacity in the autoscaling group and changing its value will not affect the autoscaling group's desired capacity because the cluster-autoscaler manages up and down scaling of the nodes. Cluster-autoscaler add nodes when pods are in pending state and remove the nodes when they are not required by modifying the desired_capacity of the autoscaling group. Although an issue exists in which if the value of the asg_min_size is changed it modifies the value of asg_desired_capacity.
- asg_max_size = "3" # Maximum worker capacity in the autoscaling group.
- asg_min_size = "1" # Minimum worker capacity in the autoscaling group. NOTE: Change in this paramater will affect the asg_desired_capacity, like changing its value to 2 will change asg_desired_capacity value to 2 but bringing back it to 1 will not affect the asg_desired_capacity.
- asg_force_delete = false # Enable forced deletion for the autoscaling group.
- asg_initial_lifecycle_hooks = [] # Initital lifecycle hook for the autoscaling group.
- default_cooldown = null # The amount of time, in seconds, after a scaling activity completes before another scaling activity can start.
- health_check_type = null # Controls how health checking is done. Valid values are "EC2" or "ELB".
- health_check_grace_period = null # Time in seconds after instance comes into service before checking health.
- instance_type = "m4.large" # Size of the workers instances.
- instance_store_virtual_name = "ephemeral0" # "virtual_name" of the instance store volume.
- spot_price = "" # Cost of spot instance.
- placement_tenancy = "" # The tenancy of the instance. Valid values are "default" or "dedicated".
- root_volume_size = "100" # root volume size of workers instances.
- root_volume_type = "gp2" # root volume type of workers instances, can be "standard", "gp3", "gp2", or "io1"
- root_iops = "0" # The amount of provisioned IOPS. This must be set with a volume_type of "io1".
- root_volume_throughput = null # The amount of throughput to provision for a gp3 volume.
- key_name = "" # The key pair name that should be used for the instances in the autoscaling group
- pre_userdata = "" # userdata to pre-append to the default userdata.
- userdata_template_file = "" # alternate template to use for userdata
- userdata_template_extra_args = {} # Additional arguments to use when expanding the userdata template file
- bootstrap_extra_args = "" # Extra arguments passed to the bootstrap.sh script from the EKS AMI (Amazon Machine Image).
- additional_userdata = "" # userdata to append to the default userdata.
- ebs_optimized = true # sets whether to use ebs optimization on supported types.
- enable_monitoring = true # Enables/disables detailed monitoring.
- enclave_support = false # Enables/disables enclave support
- public_ip = false # Associate a public ip address with a worker
- kubelet_extra_args = "" # This string is passed directly to kubelet if set. Useful for adding labels or taints.
- subnets = var.subnets # A list of subnets to place the worker nodes in. i.e. ["subnet-123", "subnet-456", "subnet-789"]
- additional_security_group_ids = [] # A list of additional security group ids to include in worker launch config
- protect_from_scale_in = false # Prevent AWS from scaling in, so that cluster-autoscaler is solely responsible.
- iam_instance_profile_name = "" # A custom IAM instance profile name. Used when manage_worker_iam_resources is set to false. Incompatible with iam_role_id.
- iam_role_id = "local.default_iam_role_id" # A custom IAM role id. Incompatible with iam_instance_profile_name. Literal local.default_iam_role_id will never be used but if iam_role_id is not set, the local.default_iam_role_id interpolation will be used.
- suspended_processes = ["AZRebalance"] # A list of processes to suspend. i.e. ["AZRebalance", "HealthCheck", "ReplaceUnhealthy"]
- target_group_arns = null # A list of Application LoadBalancer (ALB) target group ARNs to be associated to the autoscaling group
- load_balancers = null # A list of Classic LoadBalancer (CLB)'s name to be associated to the autoscaling group
- enabled_metrics = [] # A list of metrics to be collected i.e. ["GroupMinSize", "GroupMaxSize", "GroupDesiredCapacity"]
- placement_group = null # The name of the placement group into which to launch the instances, if any.
- service_linked_role_arn = "" # Arn of custom service linked role that Auto Scaling group will use. Useful when you have encrypted EBS
- termination_policies = [] # A list of policies to decide how the instances in the auto scale group should be terminated.
- platform = var.default_platform # Platform of workers. Either "linux" or "windows".
- additional_ebs_volumes = [] # A list of additional volumes to be attached to the instances on this Auto Scaling group. Each volume should be an object with the following: block_device_name (required), volume_size, volume_type, iops, throughput, encrypted, kms_key_id (only on launch-template), delete_on_termination, snapshot_id. Optional values are grabbed from root volume or from defaults
- additional_instance_store_volumes = [] # A list of additional instance store (local disk) volumes to be attached to the instances on this Auto Scaling group. Each volume should be an object with the following: block_device_name (required), virtual_name.
- warm_pool = null # If this block is configured, add a Warm Pool to the specified Auto Scaling group.
- timeouts = {} # A map of timeouts for create/update/delete operations
- snapshot_id = null # A custom snapshot ID.
-
- # Settings for launch templates
- root_block_device_name = concat(data.aws_ami.eks_worker.*.root_device_name, [""])[0] # Root device name for Linux workers. If not provided, will assume default Linux AMI was used.
- root_block_device_name_windows = concat(data.aws_ami.eks_worker_windows.*.root_device_name, [""])[0] # Root device name for Windows workers. If not provided, will assume default Windows AMI was used.
- root_kms_key_id = "" # The KMS key to use when encrypting the root storage device
- launch_template_id = null # The id of the launch template used for managed node_groups
- launch_template_version = "$Latest" # The latest version of the launch template to use in the autoscaling group
- update_default_version = false # Update the autoscaling group launch template's default version upon each update
- launch_template_placement_tenancy = "default" # The placement tenancy for instances
- launch_template_placement_group = null # The name of the placement group into which to launch the instances, if any.
- root_encrypted = false # Whether the volume should be encrypted or not
- eni_delete = true # Delete the Elastic Network Interface (ENI) on termination (if set to false you will have to manually delete before destroying)
- interface_type = null # The type of network interface. To create an Elastic Fabric Adapter (EFA), specify 'efa'.
- cpu_credits = "standard" # T2/T3 unlimited mode, can be 'standard' or 'unlimited'. Used 'standard' mode as default to avoid paying higher costs
- market_type = null
- metadata_http_endpoint = "enabled" # The state of the metadata service: enabled, disabled.
- metadata_http_tokens = "optional" # If session tokens are required: optional, required.
- metadata_http_put_response_hop_limit = null # The desired HTTP PUT response hop limit for instance metadata requests.
- # Settings for launch templates with mixed instances policy
- override_instance_types = ["m5.large", "m5a.large", "m5d.large", "m5ad.large"] # A list of override instance types for mixed instances policy
- on_demand_allocation_strategy = null # Strategy to use when launching on-demand instances. Valid values: prioritized.
- on_demand_base_capacity = "0" # Absolute minimum amount of desired capacity that must be fulfilled by on-demand instances
- on_demand_percentage_above_base_capacity = "0" # Percentage split between on-demand and Spot instances above the base on-demand capacity
- spot_allocation_strategy = "lowest-price" # Valid options are 'lowest-price' and 'capacity-optimized'. If 'lowest-price', the Auto Scaling group launches instances using the Spot pools with the lowest price, and evenly allocates your instances across the number of Spot pools. If 'capacity-optimized', the Auto Scaling group launches instances using Spot pools that are optimally chosen based on the available Spot capacity.
- spot_instance_pools = 10 # "Number of Spot pools per availability zone to allocate capacity. EC2 Auto Scaling selects the cheapest Spot pools and evenly allocates Spot capacity across the number of Spot pools that you specify."
- spot_max_price = "" # Maximum price per unit hour that the user is willing to pay for the Spot instances. Default is the on-demand price
- max_instance_lifetime = 0 # Maximum number of seconds instances can run in the ASG. 0 is unlimited.
- elastic_inference_accelerator = null # Type of elastic inference accelerator to be attached. Example values are eia1.medium, eia2.large, etc.
- instance_refresh_enabled = false # Enable instance refresh for the worker autoscaling group.
- instance_refresh_strategy = "Rolling" # Strategy to use for instance refresh. Default is 'Rolling' which the only valid value.
- instance_refresh_min_healthy_percentage = 90 # The amount of capacity in the ASG that must remain healthy during an instance refresh, as a percentage of the ASG's desired capacity.
- instance_refresh_instance_warmup = null # The number of seconds until a newly launched instance is configured and ready to use. Defaults to the ASG's health check grace period.
- instance_refresh_triggers = [] # Set of additional property names that will trigger an Instance Refresh. A refresh will always be triggered by a change in any of launch_configuration, launch_template, or mixed_instances_policy.
- capacity_rebalance = false # Enable capacity rebalance
- }
-
- workers_group_defaults = merge(
- local.workers_group_defaults_defaults,
- var.workers_group_defaults,
- )
-
- ebs_optimized_not_supported = [
- "c1.medium",
- "c3.8xlarge",
- "c3.large",
- "c5d.12xlarge",
- "c5d.24xlarge",
- "c5d.metal",
- "cc2.8xlarge",
- "cr1.8xlarge",
- "g2.8xlarge",
- "g4dn.metal",
- "hs1.8xlarge",
- "i2.8xlarge",
- "m1.medium",
- "m1.small",
- "m2.xlarge",
- "m3.large",
- "m3.medium",
- "m5ad.16xlarge",
- "m5ad.8xlarge",
- "m5dn.metal",
- "m5n.metal",
- "r3.8xlarge",
- "r3.large",
- "r5ad.16xlarge",
- "r5ad.8xlarge",
- "r5dn.metal",
- "r5n.metal",
- "t1.micro",
- "t2.2xlarge",
- "t2.large",
- "t2.medium",
- "t2.micro",
- "t2.nano",
- "t2.small",
- "t2.xlarge"
- ]
-
kubeconfig = var.create_eks ? templatefile("${path.module}/templates/kubeconfig.tpl", {
kubeconfig_name = coalesce(var.kubeconfig_name, "eks_${var.cluster_name}")
endpoint = local.cluster_endpoint
@@ -175,41 +43,21 @@ locals {
lookup(
var.worker_groups[index],
"userdata_template_file",
- lookup(var.worker_groups[index], "platform", local.workers_group_defaults["platform"]) == "windows"
+ lookup(var.worker_groups[index], "platform", var.platform_default) == "windows"
? "${path.module}/templates/userdata_windows.tpl"
: "${path.module}/templates/userdata.sh.tpl"
),
merge({
- platform = lookup(var.worker_groups[index], "platform", local.workers_group_defaults["platform"])
- cluster_name = local.cluster_name
- endpoint = local.cluster_endpoint
- cluster_auth_base64 = local.cluster_auth_base64
- pre_userdata = lookup(
- var.worker_groups[index],
- "pre_userdata",
- local.workers_group_defaults["pre_userdata"],
- )
- additional_userdata = lookup(
- var.worker_groups[index],
- "additional_userdata",
- local.workers_group_defaults["additional_userdata"],
- )
- bootstrap_extra_args = lookup(
- var.worker_groups[index],
- "bootstrap_extra_args",
- local.workers_group_defaults["bootstrap_extra_args"],
- )
- kubelet_extra_args = lookup(
- var.worker_groups[index],
- "kubelet_extra_args",
- local.workers_group_defaults["kubelet_extra_args"],
- )
+ platform = lookup(var.worker_groups[index], "platform", var.platform_default)
+ cluster_name = local.cluster_name
+ endpoint = local.cluster_endpoint
+ cluster_auth_base64 = local.cluster_auth_base64
+ pre_userdata = lookup(var.worker_groups[index], "pre_userdata", "")
+ additional_userdata = lookup(var.worker_groups[index], "additional_userdata", "")
+ bootstrap_extra_args = lookup(var.worker_groups[index], "bootstrap_extra_args", "")
+ kubelet_extra_args = lookup(var.worker_groups[index], "kubelet_extra_args", "")
},
- lookup(
- var.worker_groups[index],
- "userdata_template_extra_args",
- local.workers_group_defaults["userdata_template_extra_args"]
- )
+ lookup(var.worker_groups[index], "userdata_template_extra_args", "")
)
)
]
diff --git a/modules/node_groups/README.md b/modules/node_groups/README.md
index e0bf4f3180..fcba6dbf29 100644
--- a/modules/node_groups/README.md
+++ b/modules/node_groups/README.md
@@ -96,13 +96,11 @@ No modules.
| [cluster\_name](#input\_cluster\_name) | Name of parent cluster | `string` | `""` | no |
| [create\_eks](#input\_create\_eks) | Controls if EKS resources should be created (it affects almost all resources) | `bool` | `true` | no |
| [default\_iam\_role\_arn](#input\_default\_iam\_role\_arn) | ARN of the default IAM worker role to use if one is not specified in `var.node_groups` or `var.node_groups_defaults` | `string` | `""` | no |
-| [ebs\_optimized\_not\_supported](#input\_ebs\_optimized\_not\_supported) | List of instance types that do not support EBS optimization | `list(string)` | `[]` | no |
| [node\_groups](#input\_node\_groups) | Map of maps of `eks_node_groups` to create. See "`node_groups` and `node_groups_defaults` keys" section in README.md for more details | `any` | `{}` | no |
| [node\_groups\_defaults](#input\_node\_groups\_defaults) | map of maps of node groups to create. See "`node_groups` and `node_groups_defaults` keys" section in README.md for more details | `any` | `{}` | no |
| [tags](#input\_tags) | A map of tags to add to all resources | `map(string)` | `{}` | no |
| [worker\_additional\_security\_group\_ids](#input\_worker\_additional\_security\_group\_ids) | A list of additional security group ids to attach to worker instances | `list(string)` | `[]` | no |
| [worker\_security\_group\_id](#input\_worker\_security\_group\_id) | If provided, all workers will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the EKS cluster. | `string` | `""` | no |
-| [workers\_group\_defaults](#input\_workers\_group\_defaults) | Workers group defaults from parent | `any` | `{}` | no |
## Outputs
diff --git a/modules/node_groups/launch_template.tf b/modules/node_groups/launch_template.tf
index 6abe358d5a..3972f8f005 100644
--- a/modules/node_groups/launch_template.tf
+++ b/modules/node_groups/launch_template.tf
@@ -51,7 +51,7 @@ resource "aws_launch_template" "workers" {
}
}
- ebs_optimized = lookup(each.value, "ebs_optimized", !contains(var.ebs_optimized_not_supported, element(each.value.instance_types, 0)))
+ ebs_optimized = lookup(each.value, "ebs_optimized", null)
instance_type = each.value["set_instance_types_on_lt"] ? element(each.value.instance_types, 0) : null
diff --git a/modules/node_groups/variables.tf b/modules/node_groups/variables.tf
index 1aa8cfe26d..335ce48009 100644
--- a/modules/node_groups/variables.tf
+++ b/modules/node_groups/variables.tf
@@ -28,12 +28,6 @@ variable "default_iam_role_arn" {
default = ""
}
-variable "workers_group_defaults" {
- description = "Workers group defaults from parent"
- type = any
- default = {}
-}
-
variable "worker_security_group_id" {
description = "If provided, all workers will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the EKS cluster."
type = string
@@ -63,9 +57,3 @@ variable "node_groups" {
type = any
default = {}
}
-
-variable "ebs_optimized_not_supported" {
- description = "List of instance types that do not support EBS optimization"
- type = list(string)
- default = []
-}
diff --git a/node_groups.tf b/node_groups.tf
index 531a3df480..f999fab1a5 100644
--- a/node_groups.tf
+++ b/node_groups.tf
@@ -8,8 +8,6 @@ module "node_groups" {
cluster_auth_base64 = local.cluster_auth_base64
default_iam_role_arn = coalescelist(aws_iam_role.workers[*].arn, [""])[0]
- ebs_optimized_not_supported = local.ebs_optimized_not_supported
- workers_group_defaults = local.workers_group_defaults
worker_security_group_id = local.worker_security_group_id
worker_additional_security_group_ids = var.worker_additional_security_group_ids
diff --git a/outputs.tf b/outputs.tf
index b924ee7533..754e7c5f97 100644
--- a/outputs.tf
+++ b/outputs.tf
@@ -105,15 +105,15 @@ output "workers_user_data" {
value = local.launch_template_userdata_rendered
}
-output "workers_default_ami_id" {
- description = "ID of the default worker group AMI"
- value = local.default_ami_id_linux
-}
-
-output "workers_default_ami_id_windows" {
- description = "ID of the default Windows worker group AMI"
- value = local.default_ami_id_windows
-}
+# output "workers_default_ami_id" {
+# description = "ID of the default worker group AMI"
+# value = local.default_ami_id_linux
+# }
+
+# output "workers_default_ami_id_windows" {
+# description = "ID of the default Windows worker group AMI"
+# value = local.default_ami_id_windows
+# }
output "workers_launch_template_ids" {
description = "IDs of the worker launch templates."
diff --git a/variables.tf b/variables.tf
index 1537c874cc..9c7571797a 100644
--- a/variables.tf
+++ b/variables.tf
@@ -127,14 +127,8 @@ variable "vpc_id" {
}
variable "worker_groups" {
- description = "A list of maps defining worker group configurations to be defined using AWS Launch Template. See workers_group_defaults for valid keys."
- type = any
- default = []
-}
-
-variable "workers_group_defaults" {
- description = "Override default values for target groups. See workers_group_defaults_defaults in local.tf for valid keys."
- type = any
+ description = "A map of maps defining worker group configurations to be defined using AWS Launch Template"
+ type = map(any)
default = {}
}
diff --git a/workers.tf b/workers.tf
index e221fb61bb..eadd5d18a7 100644
--- a/workers.tf
+++ b/workers.tf
@@ -1,40 +1,49 @@
+locals {
+ # Abstracted to a local so that it can be shared with node group as well
+ # Only valus that are common between ASG and Node Group are pulled out to this local map
+ group_default = {
+ min_size = try(var.group_default.min_size, 1)
+ max_size = try(var.group_default.max_size, 3)
+ desired_capacity = try(var.group_default.desired_capacity, 1)
+ }
+}
resource "aws_autoscaling_group" "this" {
for_each = var.create_eks ? var.worker_groups : {}
name_prefix = "${join("-", [local.cluster_name, lookup(each.value, "name", each.key)])}-"
launch_template {
- name = each.value.launch_template_key
- version = lookup(each.value, "version", "$Latest")
+ name = each.value.launch_template_key # required
+ version = try(each.value.launch_template_version, var.group_default.min_size, "$Latest")
}
- availability_zones = lookup(each.value, "availability_zones", null)
- vpc_zone_identifier = lookup(each.value, "vpc_zone_identifier", null)
+ availability_zones = try(each.value.availability_zones, var.group_default.availability_zones, null)
+ vpc_zone_identifier = try(each.value.vpc_zone_identifier, var.group_default.vpc_zone_identifier, null)
- max_size = each.value.max_size
- min_size = each.value.min_size
- desired_capacity = lookup(each.value, "desired_capacity", null)
- capacity_rebalance = lookup(each.value, "capacity_rebalance", null)
- default_cooldown = lookup(each.value, "default_cooldown", null)
- protect_from_scale_in = lookup(each.value, "protect_from_scale_in", null)
+ min_size = try(each.value.min_size, locals.wg_default.min_size)
+ max_size = try(each.value.max_size, locals.wg_default.max_size)
+ desired_capacity = try(each.value.desired_capacity, locals.wg_default.desired_capacity)
+ capacity_rebalance = try(each.value.capacity_rebalance, var.group_default.capacity_rebalance, null)
+ default_cooldown = try(each.value.default_cooldown, var.group_default.default_cooldown, null)
+ protect_from_scale_in = try(each.value.protect_from_scale_in, var.group_default.protect_from_scale_in, null)
- load_balancers = lookup(each.value, "load_balancers", null)
- target_group_arns = lookup(each.value, "target_group_arns", null)
- placement_group = lookup(each.value, "placement_group", null)
- health_check_type = lookup(each.value, "health_check_type", null)
- health_check_grace_period = lookup(each.value, "health_check_grace_period", null)
+ load_balancers = try(each.value.load_balancers, var.group_default.load_balancers, null)
+ target_group_arns = try(each.value.target_group_arns, var.group_default.target_group_arns, null)
+ placement_group = try(each.value.placement_group, var.group_default.placement_group, null)
+ health_check_type = try(each.value.health_check_type, var.group_default.health_check_type, null)
+ health_check_grace_period = try(each.value.health_check_grace_period, var.group_default.health_check_grace_period, null)
- force_delete = lookup(each.value, "force_delete", null)
- termination_policies = lookup(each.value, "termination_policies", null)
- suspended_processes = lookup(each.value, "suspended_processes", null)
- max_instance_lifetime = lookup(each.value, "max_instance_lifetime", null)
+ force_delete = try(each.value.force_delete, var.group_default.force_delete, false)
+ termination_policies = try(each.value.termination_policies, var.group_default.termination_policies, null)
+ suspended_processes = try(each.value.suspended_processes, var.group_default.suspended_processes, "AZRebalance")
+ max_instance_lifetime = try(each.value.max_instance_lifetime, var.group_default.max_instance_lifetime, null)
- enabled_metrics = lookup(each.value, "enabled_metrics", null)
- metrics_granularity = lookup(each.value, "metrics_granularity", null)
- service_linked_role_arn = lookup(each.value, "service_linked_role_arn", null)
+ enabled_metrics = try(each.value.enabled_metrics, var.group_default.enabled_metrics, null)
+ metrics_granularity = try(each.value.metrics_granularity, var.group_default.metrics_granularity, null)
+ service_linked_role_arn = try(each.value.service_linked_role_arn, var.group_default.service_linked_role_arn, null)
dynamic "initial_lifecycle_hook" {
- for_each = lookup(each.value, "initial_lifecycle_hook", null) != null ? each.value.initial_lifecycle_hook : []
+ for_each = try(each.value.initial_lifecycle_hook, var.group_default.initial_lifecycle_hook, {})
iterator = hook
content {
@@ -49,7 +58,7 @@ resource "aws_autoscaling_group" "this" {
}
dynamic "instance_refresh" {
- for_each = lookup(each.value, "instance_refresh", null) != null ? each.value.instance_refresh : []
+ for_each = try(each.value.instance_refresh, var.group_default.instance_refresh, {})
iterator = refresh
content {
@@ -57,7 +66,7 @@ resource "aws_autoscaling_group" "this" {
triggers = lookup(refresh.value, "triggers", null)
dynamic "preferences" {
- for_each = lookup(refresh.value, "preferences", null) != null ? [refresh.value.preferences] : []
+ for_each = try(refresh.value.preferences, [])
content {
instance_warmup = lookup(preferences.value, "instance_warmup", null)
min_healthy_percentage = lookup(preferences.value, "min_healthy_percentage", null)
@@ -67,12 +76,12 @@ resource "aws_autoscaling_group" "this" {
}
dynamic "mixed_instances_policy" {
- for_each = lookup(each.value, "mixed_instances_policy", null) != null ? each.value.mixed_instances_policy : []
+ for_each = try(each.value.mixed_instances_policy, var.group_default.mixed_instances_policy, {})
iterator = mixed
content {
dynamic "instances_distribution" {
- for_each = lookup(mixed.value, "instances_distribution", null) != null ? [mixed.value.instances_distribution] : []
+ for_each = try(mixed.value.instances_distribution, {})
iterater = distro
content {
@@ -92,13 +101,13 @@ resource "aws_autoscaling_group" "this" {
}
dynamic "override" {
- for_each = lookup(mixed.value, "override", null) != null ? mixed.value.override : []
+ for_each = try(mixed.value.override, {})
content {
instance_type = lookup(override.value, "instance_type", null)
weighted_capacity = lookup(override.value, "weighted_capacity", null)
dynamic "launch_template_specification" {
- for_each = lookup(override.value, "launch_template_specification", null) != null ? override.value.launch_template_specification : []
+ for_each = try(override.value.launch_template_specification, {})
content {
launch_template_id = lookup(launch_template_specification.value, "launch_template_id", null)
}
@@ -110,7 +119,7 @@ resource "aws_autoscaling_group" "this" {
}
dynamic "warm_pool" {
- for_each = lookup(each.value, "warm_pool", null) != null ? each.value.warm_pool : []
+ for_each = try(each.value.warm_pool, var.group_default.warm_pool, {})
content {
pool_state = lookup(warm_pool.value, "pool_state", null)
@@ -166,7 +175,7 @@ resource "aws_launch_template" "this" {
ebs_optimized = lookup(template.value, "ebs_optimized", null)
image_id = lookup(template.value, "image_id", null)
- instance_type = lookup(template.value, "instance_type", null)
+ instance_type = lookup(template.value, "instance_type", "m6i.large")
key_name = lookup(template.value, "key_name", null)
user_data = lookup(template.value, "user_data", null)
From d23d98084984c101e036f51dd9fb96626d260a5f Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Sun, 7 Nov 2021 12:11:06 -0500
Subject: [PATCH 04/83] refactor: re-organize files, merge small files into
existing files
---
.../CHANGELOG.pre-v11.0.0.md | 0
CHANGELOG.md | 2 +
fargate.tf | 16 -----
irsa.tf | 23 ------
kubectl.tf | 8 ---
main.tf | 72 ++++++++++++++++---
node_groups.tf | 25 -------
workers.tf | 55 ++++++++++++++
8 files changed, 119 insertions(+), 82 deletions(-)
rename CHANGELOG.pre-v11.0.0.md => .github/CHANGELOG.pre-v11.0.0.md (100%)
delete mode 100644 fargate.tf
delete mode 100644 irsa.tf
delete mode 100644 kubectl.tf
delete mode 100644 node_groups.tf
diff --git a/CHANGELOG.pre-v11.0.0.md b/.github/CHANGELOG.pre-v11.0.0.md
similarity index 100%
rename from CHANGELOG.pre-v11.0.0.md
rename to .github/CHANGELOG.pre-v11.0.0.md
diff --git a/CHANGELOG.md b/CHANGELOG.md
index fe6345f9b5..93228a9cd8 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -5,6 +5,8 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](http://keepachangelog.com/) and this
project adheres to [Semantic Versioning](http://semver.org/).
+For changes prior to v11.0.0, see [CHANGELOG.pre-v11.0.0.md](.github/CHANGELOG.pre-v11.0.0.md)
+
## [Unreleased]
diff --git a/fargate.tf b/fargate.tf
deleted file mode 100644
index 5526e2eed0..0000000000
--- a/fargate.tf
+++ /dev/null
@@ -1,16 +0,0 @@
-module "fargate" {
- source = "./modules/fargate"
-
- create_eks = var.create_eks
- create_fargate_pod_execution_role = var.create_fargate_pod_execution_role
-
- cluster_name = local.cluster_name
- fargate_pod_execution_role_name = var.fargate_pod_execution_role_name
- permissions_boundary = var.permissions_boundary
- iam_path = var.iam_path
- subnets = coalescelist(var.fargate_subnets, var.subnets, [""])
-
- fargate_profiles = var.fargate_profiles
-
- tags = var.tags
-}
diff --git a/irsa.tf b/irsa.tf
deleted file mode 100644
index 5fc3dc8df7..0000000000
--- a/irsa.tf
+++ /dev/null
@@ -1,23 +0,0 @@
-# Enable IAM Roles for EKS Service-Accounts (IRSA).
-
-# The Root CA Thumbprint for an OpenID Connect Identity Provider is currently
-# Being passed as a default value which is the same for all regions and
-# Is valid until (Jun 28 17:39:16 2034 GMT).
-# https://crt.sh/?q=9E99A48A9960B14926BB7F3B02E22DA2B0AB7280
-# https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_oidc_verify-thumbprint.html
-# https://github.com/terraform-providers/terraform-provider-aws/issues/10104
-
-resource "aws_iam_openid_connect_provider" "oidc_provider" {
- count = var.enable_irsa && var.create_eks ? 1 : 0
-
- client_id_list = local.client_id_list
- thumbprint_list = [var.eks_oidc_root_ca_thumbprint]
- url = local.cluster_oidc_issuer_url
-
- tags = merge(
- {
- Name = "${var.cluster_name}-eks-irsa"
- },
- var.tags
- )
-}
diff --git a/kubectl.tf b/kubectl.tf
deleted file mode 100644
index b5d6947855..0000000000
--- a/kubectl.tf
+++ /dev/null
@@ -1,8 +0,0 @@
-resource "local_file" "kubeconfig" {
- count = var.write_kubeconfig && var.create_eks ? 1 : 0
-
- content = local.kubeconfig
- filename = substr(var.kubeconfig_output_path, -1, 1) == "/" ? "${var.kubeconfig_output_path}kubeconfig_${var.cluster_name}" : var.kubeconfig_output_path
- file_permission = var.kubeconfig_file_permission
- directory_permission = "0755"
-}
diff --git a/main.tf b/main.tf
index d1860f0696..9500141bc8 100644
--- a/main.tf
+++ b/main.tf
@@ -1,13 +1,6 @@
-resource "aws_cloudwatch_log_group" "this" {
- count = length(var.cluster_enabled_log_types) > 0 && var.create_eks ? 1 : 0
-
- name = "/aws/eks/${var.cluster_name}/cluster"
- retention_in_days = var.cluster_log_retention_in_days
- kms_key_id = var.cluster_log_kms_key_id
-
- tags = var.tags
-}
-
+################################################################################
+# Cluster
+################################################################################
resource "aws_eks_cluster" "this" {
count = var.create_eks ? 1 : 0
@@ -60,6 +53,20 @@ resource "aws_eks_cluster" "this" {
]
}
+resource "aws_cloudwatch_log_group" "this" {
+ count = length(var.cluster_enabled_log_types) > 0 && var.create_eks ? 1 : 0
+
+ name = "/aws/eks/${var.cluster_name}/cluster"
+ retention_in_days = var.cluster_log_retention_in_days
+ kms_key_id = var.cluster_log_kms_key_id
+
+ tags = var.tags
+}
+
+################################################################################
+# Security Group
+################################################################################
+
resource "aws_security_group" "cluster" {
count = var.cluster_create_security_group && var.create_eks ? 1 : 0
@@ -125,6 +132,51 @@ resource "aws_security_group_rule" "cluster_private_access_sg_source" {
security_group_id = aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id
}
+################################################################################
+# Kubeconfig
+################################################################################
+
+resource "local_file" "kubeconfig" {
+ count = var.write_kubeconfig && var.create_eks ? 1 : 0
+
+ content = local.kubeconfig
+ filename = substr(var.kubeconfig_output_path, -1, 1) == "/" ? "${var.kubeconfig_output_path}kubeconfig_${var.cluster_name}" : var.kubeconfig_output_path
+ file_permission = var.kubeconfig_file_permission
+ directory_permission = "0755"
+}
+
+################################################################################
+# IRSA
+################################################################################
+
+# Enable IAM Roles for EKS Service-Accounts (IRSA).
+# The Root CA Thumbprint for an OpenID Connect Identity Provider is currently
+# Being passed as a default value which is the same for all regions and
+# Is valid until (Jun 28 17:39:16 2034 GMT).
+# https://crt.sh/?q=9E99A48A9960B14926BB7F3B02E22DA2B0AB7280
+# https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_oidc_verify-thumbprint.html
+# https://github.com/terraform-providers/terraform-provider-aws/issues/10104
+
+# TODO - update to use TLS data source and drop hacks
+resource "aws_iam_openid_connect_provider" "oidc_provider" {
+ count = var.enable_irsa && var.create_eks ? 1 : 0
+
+ client_id_list = local.client_id_list
+ thumbprint_list = [var.eks_oidc_root_ca_thumbprint]
+ url = local.cluster_oidc_issuer_url
+
+ tags = merge(
+ {
+ Name = "${var.cluster_name}-eks-irsa"
+ },
+ var.tags
+ )
+}
+
+################################################################################
+# IAM Roles & Policies
+################################################################################
+
resource "aws_iam_role" "cluster" {
count = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0
diff --git a/node_groups.tf b/node_groups.tf
deleted file mode 100644
index f999fab1a5..0000000000
--- a/node_groups.tf
+++ /dev/null
@@ -1,25 +0,0 @@
-module "node_groups" {
- source = "./modules/node_groups"
-
- create_eks = var.create_eks
-
- cluster_name = local.cluster_name
- cluster_endpoint = local.cluster_endpoint
- cluster_auth_base64 = local.cluster_auth_base64
-
- default_iam_role_arn = coalescelist(aws_iam_role.workers[*].arn, [""])[0]
- worker_security_group_id = local.worker_security_group_id
- worker_additional_security_group_ids = var.worker_additional_security_group_ids
-
- node_groups_defaults = var.node_groups_defaults
- node_groups = var.node_groups
-
- tags = var.tags
-
- depends_on = [
- aws_eks_cluster.this,
- aws_iam_role_policy_attachment.workers_AmazonEKSWorkerNodePolicy,
- aws_iam_role_policy_attachment.workers_AmazonEKS_CNI_Policy,
- aws_iam_role_policy_attachment.workers_AmazonEC2ContainerRegistryReadOnly
- ]
-}
diff --git a/workers.tf b/workers.tf
index eadd5d18a7..529f574052 100644
--- a/workers.tf
+++ b/workers.tf
@@ -7,6 +7,61 @@ locals {
desired_capacity = try(var.group_default.desired_capacity, 1)
}
}
+
+################################################################################
+# Fargate
+################################################################################
+
+module "fargate" {
+ source = "./modules/fargate"
+
+ create_eks = var.create_eks
+ create_fargate_pod_execution_role = var.create_fargate_pod_execution_role
+
+ cluster_name = local.cluster_name
+ fargate_pod_execution_role_name = var.fargate_pod_execution_role_name
+ permissions_boundary = var.permissions_boundary
+ iam_path = var.iam_path
+ subnets = coalescelist(var.fargate_subnets, var.subnets, [""])
+
+ fargate_profiles = var.fargate_profiles
+
+ tags = var.tags
+}
+
+################################################################################
+# Node Groups
+################################################################################
+
+module "node_groups" {
+ source = "./modules/node_groups"
+
+ create_eks = var.create_eks
+
+ cluster_name = local.cluster_name
+ cluster_endpoint = local.cluster_endpoint
+ cluster_auth_base64 = local.cluster_auth_base64
+
+ default_iam_role_arn = coalescelist(aws_iam_role.workers[*].arn, [""])[0]
+ worker_security_group_id = local.worker_security_group_id
+ worker_additional_security_group_ids = var.worker_additional_security_group_ids
+
+ node_groups_defaults = var.node_groups_defaults
+ node_groups = var.node_groups
+
+ tags = var.tags
+
+ depends_on = [
+ aws_eks_cluster.this,
+ aws_iam_role_policy_attachment.workers_AmazonEKSWorkerNodePolicy,
+ aws_iam_role_policy_attachment.workers_AmazonEKS_CNI_Policy,
+ aws_iam_role_policy_attachment.workers_AmazonEC2ContainerRegistryReadOnly
+ ]
+}
+
+################################################################################
+# Autoscaling Group
+################################################################################
resource "aws_autoscaling_group" "this" {
for_each = var.create_eks ? var.worker_groups : {}
From 355d9a54febf78ae2c4406fb84f6f53c9d3c007b Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Sun, 7 Nov 2021 14:02:16 -0500
Subject: [PATCH 05/83] chore: stashing updates as I chew threw this
---
README.md | 11 +-
UPGRADE-18.0.md | 89 ++++++++++
data.tf | 2 +-
docs/enable-docker-bridge-network.md | 6 +-
docs/faq.md | 22 +--
docs/spot-instances.md | 38 +++--
examples/bottlerocket/main.tf | 14 +-
examples/complete/main.tf | 8 +-
examples/instance_refresh/main.tf | 6 +-
examples/irsa/main.tf | 7 +-
examples/launch_templates/main.tf | 18 +-
examples/secrets_encryption/main.tf | 6 +-
locals.tf | 37 ++--
modules/node_groups/README.md | 1 +
modules/node_groups/locals.tf | 48 +++---
modules/node_groups/variables.tf | 6 +
variables.tf | 18 ++
workers.tf | 246 +++++++++++++--------------
18 files changed, 348 insertions(+), 235 deletions(-)
create mode 100644 UPGRADE-18.0.md
diff --git a/README.md b/README.md
index b98cd14ba5..ac779eb7db 100644
--- a/README.md
+++ b/README.md
@@ -48,12 +48,12 @@ module "eks" {
vpc_id = "vpc-1234556abcdef"
subnets = ["subnet-abcde012", "subnet-bcde012a", "subnet-fghi345a"]
- worker_groups = [
- {
+ worker_groups = {
+ one = {
instance_type = "m4.large"
asg_max_size = 5
}
- ]
+ }
}
```
@@ -95,7 +95,7 @@ There are detailed examples available for you to see how certain features of thi
- [Complete](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/complete) - Create EKS Cluster with all available workers types in various combinations with many of supported features.
- [Bottlerocket](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/bottlerocket) - Create EKS cluster using [Bottlerocket AMI](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami-bottlerocket.html).
- [Fargate](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/fargate) - Create EKS cluster with [Fargate profiles](https://docs.aws.amazon.com/eks/latest/userguide/fargate.html) and attach Fargate profiles to an existing EKS cluster.
-
+
## Contributing
Report issues/questions/feature requests on in the [issues](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/new) section.
@@ -223,6 +223,8 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
| [fargate\_pod\_execution\_role\_name](#input\_fargate\_pod\_execution\_role\_name) | The IAM Role that provides permissions for the EKS Fargate Profile. | `string` | `null` | no |
| [fargate\_profiles](#input\_fargate\_profiles) | Fargate profiles to create. See `fargate_profile` keys section in fargate submodule's README.md for more details | `any` | `{}` | no |
| [fargate\_subnets](#input\_fargate\_subnets) | A list of subnets to place fargate workers within (if different from subnets). | `list(string)` | `[]` | no |
+| [group\_default\_settings](#input\_group\_default\_settings) | Override default values for autoscaling group, node group settings | `any` | `{}` | no |
+| [iam\_instance\_profiles](#input\_iam\_instance\_profiles) | Map of instance profile definitions to create | `map(any)` | `{}` | no |
| [iam\_path](#input\_iam\_path) | If provided, all IAM roles will be created on this path. | `string` | `"/"` | no |
| [kubeconfig\_api\_version](#input\_kubeconfig\_api\_version) | KubeConfig API version. Defaults to client.authentication.k8s.io/v1alpha1 | `string` | `"client.authentication.k8s.io/v1alpha1"` | no |
| [kubeconfig\_aws\_authenticator\_additional\_args](#input\_kubeconfig\_aws\_authenticator\_additional\_args) | Any additional arguments to pass to the authenticator such as the role to assume. e.g. ["-r", "MyEksRole"]. | `list(string)` | `[]` | no |
@@ -232,6 +234,7 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
| [kubeconfig\_file\_permission](#input\_kubeconfig\_file\_permission) | File permission of the Kubectl config file containing cluster configuration saved to `kubeconfig_output_path.` | `string` | `"0600"` | no |
| [kubeconfig\_name](#input\_kubeconfig\_name) | Override the default name used for items kubeconfig. | `string` | `""` | no |
| [kubeconfig\_output\_path](#input\_kubeconfig\_output\_path) | Where to save the Kubectl config file (if `write_kubeconfig = true`). Assumed to be a directory if the value ends with a forward slash `/`. | `string` | `"./"` | no |
+| [launch\_templates](#input\_launch\_templates) | Map of launch template definitions to create | `map(any)` | `{}` | no |
| [manage\_aws\_auth](#input\_manage\_aws\_auth) | Whether to apply the aws-auth configmap file. | `bool` | `true` | no |
| [manage\_cluster\_iam\_resources](#input\_manage\_cluster\_iam\_resources) | Whether to let the module manage cluster IAM resources. If set to false, cluster\_iam\_role\_name must be specified. | `bool` | `true` | no |
| [manage\_worker\_iam\_resources](#input\_manage\_worker\_iam\_resources) | Whether to let the module manage worker IAM resources. If set to false, iam\_instance\_profile\_name must be specified for workers. | `bool` | `true` | no |
diff --git a/UPGRADE-18.0.md b/UPGRADE-18.0.md
new file mode 100644
index 0000000000..445959bcab
--- /dev/null
+++ b/UPGRADE-18.0.md
@@ -0,0 +1,89 @@
+# Upgrade from v17.x to v18.x
+
+If you have any questions regarding this upgrade process, please consult the `examples` directory:
+
+- TODO
+
+If you find a bug, please open an issue with supporting configuration to reproduce.
+
+## Changes
+
+- Launch configuration support has been removed and only launch template is supported going forward. AWS is no longer adding new features back into launch configuration and their docs state [`We strongly recommend that you do not use launch configurations. They do not provide full functionality for Amazon EC2 Auto Scaling or Amazon EC2. We provide information about launch configurations for customers who have not yet migrated from launch configurations to launch templates.`](https://docs.aws.amazon.com/autoscaling/ec2/userguide/LaunchConfiguration.html)
+- One IAM role/profile is created as a "default" role (if users opt in to create the role/profile). Otherwise users need to supply the instance profile name/arn to use for the various groups
+- Maps, maps, maps, maps...
+
+## List of backwards incompatible changes
+
+- TODO
+
+### Variable and output changes
+
+1. Removed variables:
+
+ - TODO
+
+2. Renamed variables:
+
+ - TODO
+
+3. Added variables:
+
+ - TODO
+
+4. Removed outputs:
+
+ - TODO
+
+5. Renamed outputs:
+
+ - TODO
+
+6. Added outputs:
+
+ - TODO
+
+## Upgrade Migrations
+
+### Before 17.x Example
+
+```hcl
+module "cluster_before" {
+ source = "terraform-aws-modules/eks/aws"
+ version = "~> 17.0"
+
+ # TODO
+
+ tags = {
+ Environment = "dev"
+ Terraform = "true"
+ }
+}
+```
+
+### After 18.x Example
+
+```hcl
+module "cluster_after" {
+ source = "terraform-aws-modules/eks/aws"
+ version = "~> 18.0"
+
+ # TODO
+
+ tags = {
+ Environment = "dev"
+ Terraform = "true"
+ }
+}
+```
+
+### State Changes
+
+To migrate from the `v17.x` version to `v18.x` version example shown above, the following state move commands can be performed to maintain the current resources without modification:
+
+```bash
+terraform state mv 'from' 'to'
+```
+
+### Configuration Changes
+
+TODO
diff --git a/data.tf b/data.tf
index 3edd0149b8..07c0b92f6f 100644
--- a/data.tf
+++ b/data.tf
@@ -70,7 +70,7 @@ data "aws_iam_role" "custom_cluster_iam_role" {
}
data "aws_iam_instance_profile" "custom_worker_group_iam_instance_profile" {
- count = var.manage_worker_iam_resources ? 0 : local.worker_group_count
+ count = var.manage_worker_iam_resources ? 0 : var.worker_groups
name = lookup(
var.worker_groups[count.index],
diff --git a/docs/enable-docker-bridge-network.md b/docs/enable-docker-bridge-network.md
index f6eb8ee11e..df5e20f629 100644
--- a/docs/enable-docker-bridge-network.md
+++ b/docs/enable-docker-bridge-network.md
@@ -4,12 +4,12 @@ The latest versions of the AWS EKS-optimized AMI disable the docker bridge netwo
```hcl
locals {
- worker_groups = [
- {
+ worker_groups = {
+ one = {
# Other parameters omitted for brevity
bootstrap_extra_args = "--enable-docker-bridge true"
}
- ]
+ }
}
```
diff --git a/docs/faq.md b/docs/faq.md
index d1a5d0f4bc..1550c282ee 100644
--- a/docs/faq.md
+++ b/docs/faq.md
@@ -146,34 +146,34 @@ Amazon EKS clusters must contain one or more Linux worker nodes to run core syst
1. Build AWS EKS cluster with the next workers configuration (default Linux):
```hcl
-worker_groups = [
- {
+ worker_groups = {
+ one = {
name = "worker-group-linux"
instance_type = "m5.large"
platform = "linux"
asg_desired_capacity = 2
},
- ]
+ }
```
2. Apply commands from https://docs.aws.amazon.com/eks/latest/userguide/windows-support.html#enable-windows-support (use tab with name `Windows`)
3. Add one more worker group for Windows with required field `platform = "windows"` and update your cluster. Worker group example:
```hcl
-worker_groups = [
- {
+ worker_groups = {
+ linux = {
name = "worker-group-linux"
instance_type = "m5.large"
platform = "linux"
asg_desired_capacity = 2
},
- {
+ windows = {
name = "worker-group-windows"
instance_type = "m5.large"
platform = "windows"
asg_desired_capacity = 1
},
- ]
+ }
```
4. With `kubectl get nodes` you can see cluster with mixed (Linux/Windows) nodes support.
@@ -217,12 +217,12 @@ module "eks" {
subnets = ["subnet-xyz123", "subnet-xyz456", "subnet-xyz789"]
}
- worker_groups = [
- {
+ worker_groups = {
+ one = {
instance_type = "m4.large"
asg_max_size = 5
},
- {
+ two = {
name = "worker-group-2"
subnets = ["subnet-qwer123"]
instance_type = "t3.medium"
@@ -230,6 +230,6 @@ module "eks" {
public_ip = true
ebs_optimized = true
}
- ]
+ }
}
```
\ No newline at end of file
diff --git a/docs/spot-instances.md b/docs/spot-instances.md
index 2631c0bdc2..739d2d923b 100644
--- a/docs/spot-instances.md
+++ b/docs/spot-instances.md
@@ -27,8 +27,8 @@ Notes:
Only Launch Template is supported in this module; Launch Configuration support has been deprecated and removed:
```hcl
- worker_groups = [
- {
+ worker_groups = {
+ one ={
name = "spot-1"
override_instance_types = ["m5.large", "m5a.large", "m5d.large", "m5ad.large"]
spot_instance_pools = 4
@@ -37,7 +37,7 @@ Only Launch Template is supported in this module; Launch Configuration support h
kubelet_extra_args = "--node-labels=node.kubernetes.io/lifecycle=spot"
public_ip = true
},
- ]
+ }
```
## Using Launch Templates With Both Spot and On Demand
@@ -47,21 +47,23 @@ Example launch template to launch 2 on demand instances of type m5.large, and ha
`on_demand_percentage_above_base_capacity` is set to 25 so 1 in 4 new nodes, when auto-scaling, will be on-demand instances. If not set, all new nodes will be spot instances. The on-demand instances will be the primary instance type (first in the array if they are not weighted).
```hcl
- worker_groups = [{
- name = "mixed-demand-spot"
- override_instance_types = ["m5.large", "m5a.large", "m4.large"]
- root_encrypted = true
- root_volume_size = 50
-
- asg_min_size = 2
- asg_desired_capacity = 2
- on_demand_base_capacity = 3
- on_demand_percentage_above_base_capacity = 25
- asg_max_size = 20
- spot_instance_pools = 3
-
- kubelet_extra_args = "--node-labels=node.kubernetes.io/lifecycle=`curl -s http://169.254.169.254/latest/meta-data/instance-life-cycle`"
- }]
+ worker_groups = {
+ one = {
+ name = "mixed-demand-spot"
+ override_instance_types = ["m5.large", "m5a.large", "m4.large"]
+ root_encrypted = true
+ root_volume_size = 50
+
+ asg_min_size = 2
+ asg_desired_capacity = 2
+ on_demand_base_capacity = 3
+ on_demand_percentage_above_base_capacity = 25
+ asg_max_size = 20
+ spot_instance_pools = 3
+
+ kubelet_extra_args = "--node-labels=node.kubernetes.io/lifecycle=`curl -s http://169.254.169.254/latest/meta-data/instance-life-cycle`"
+ }
+ }
```
## Important Notes
diff --git a/examples/bottlerocket/main.tf b/examples/bottlerocket/main.tf
index b6fe967da8..4edc3268ac 100644
--- a/examples/bottlerocket/main.tf
+++ b/examples/bottlerocket/main.tf
@@ -28,8 +28,8 @@ module "eks" {
write_kubeconfig = false
manage_aws_auth = true
- worker_groups = [
- {
+ worker_groups = {
+ one = {
name = "bottlerocket-nodes"
ami_id = data.aws_ami.bottlerocket_ami.id
instance_type = "t3a.small"
@@ -52,12 +52,12 @@ module "eks" {
aws_region = data.aws_region.current.name
}
# example of k8s/kubelet configuration via additional_userdata
- additional_userdata = < [cluster\_name](#input\_cluster\_name) | Name of parent cluster | `string` | `""` | no |
| [create\_eks](#input\_create\_eks) | Controls if EKS resources should be created (it affects almost all resources) | `bool` | `true` | no |
| [default\_iam\_role\_arn](#input\_default\_iam\_role\_arn) | ARN of the default IAM worker role to use if one is not specified in `var.node_groups` or `var.node_groups_defaults` | `string` | `""` | no |
+| [node\_default\_settings](#input\_node\_default\_settings) | Node group defaults from parent | `any` | `{}` | no |
| [node\_groups](#input\_node\_groups) | Map of maps of `eks_node_groups` to create. See "`node_groups` and `node_groups_defaults` keys" section in README.md for more details | `any` | `{}` | no |
| [node\_groups\_defaults](#input\_node\_groups\_defaults) | map of maps of node groups to create. See "`node_groups` and `node_groups_defaults` keys" section in README.md for more details | `any` | `{}` | no |
| [tags](#input\_tags) | A map of tags to add to all resources | `map(string)` | `{}` | no |
diff --git a/modules/node_groups/locals.tf b/modules/node_groups/locals.tf
index 0a6c7cbffb..178eb9b1c7 100644
--- a/modules/node_groups/locals.tf
+++ b/modules/node_groups/locals.tf
@@ -2,37 +2,37 @@ locals {
# Merge defaults and per-group values to make code cleaner
node_groups_expanded = { for k, v in var.node_groups : k => merge(
{
- desired_capacity = var.workers_group_defaults["asg_desired_capacity"]
+ desired_capacity = var.node_default_settings["asg_desired_capacity"]
iam_role_arn = var.default_iam_role_arn
- instance_types = [var.workers_group_defaults["instance_type"]]
- key_name = var.workers_group_defaults["key_name"]
- launch_template_id = var.workers_group_defaults["launch_template_id"]
- launch_template_version = var.workers_group_defaults["launch_template_version"]
+ instance_types = [var.node_default_settings["instance_type"]]
+ key_name = var.node_default_settings["key_name"]
+ launch_template_id = var.node_default_settings["launch_template_id"]
+ launch_template_version = var.node_default_settings["launch_template_version"]
set_instance_types_on_lt = false
- max_capacity = var.workers_group_defaults["asg_max_size"]
- min_capacity = var.workers_group_defaults["asg_min_size"]
- subnets = var.workers_group_defaults["subnets"]
+ max_capacity = var.node_default_settings["asg_max_size"]
+ min_capacity = var.node_default_settings["asg_min_size"]
+ subnets = var.node_default_settings["subnets"]
create_launch_template = false
bootstrap_env = {}
- kubelet_extra_args = var.workers_group_defaults["kubelet_extra_args"]
- disk_size = var.workers_group_defaults["root_volume_size"]
- disk_type = var.workers_group_defaults["root_volume_type"]
- disk_iops = var.workers_group_defaults["root_iops"]
- disk_throughput = var.workers_group_defaults["root_volume_throughput"]
- disk_encrypted = var.workers_group_defaults["root_encrypted"]
- disk_kms_key_id = var.workers_group_defaults["root_kms_key_id"]
- enable_monitoring = var.workers_group_defaults["enable_monitoring"]
- eni_delete = var.workers_group_defaults["eni_delete"]
- public_ip = var.workers_group_defaults["public_ip"]
- pre_userdata = var.workers_group_defaults["pre_userdata"]
- additional_security_group_ids = var.workers_group_defaults["additional_security_group_ids"]
+ kubelet_extra_args = var.node_default_settings["kubelet_extra_args"]
+ disk_size = var.node_default_settings["root_volume_size"]
+ disk_type = var.node_default_settings["root_volume_type"]
+ disk_iops = var.node_default_settings["root_iops"]
+ disk_throughput = var.node_default_settings["root_volume_throughput"]
+ disk_encrypted = var.node_default_settings["root_encrypted"]
+ disk_kms_key_id = var.node_default_settings["root_kms_key_id"]
+ enable_monitoring = var.node_default_settings["enable_monitoring"]
+ eni_delete = var.node_default_settings["eni_delete"]
+ public_ip = var.node_default_settings["public_ip"]
+ pre_userdata = var.node_default_settings["pre_userdata"]
+ additional_security_group_ids = var.node_default_settings["additional_security_group_ids"]
taints = []
- timeouts = var.workers_group_defaults["timeouts"]
+ timeouts = var.node_default_settings["timeouts"]
update_default_version = true
ebs_optimized = null
- metadata_http_endpoint = var.workers_group_defaults["metadata_http_endpoint"]
- metadata_http_tokens = var.workers_group_defaults["metadata_http_tokens"]
- metadata_http_put_response_hop_limit = var.workers_group_defaults["metadata_http_put_response_hop_limit"]
+ metadata_http_endpoint = var.node_default_settings["metadata_http_endpoint"]
+ metadata_http_tokens = var.node_default_settings["metadata_http_tokens"]
+ metadata_http_put_response_hop_limit = var.node_default_settings["metadata_http_put_response_hop_limit"]
ami_is_eks_optimized = true
},
var.node_groups_defaults,
diff --git a/modules/node_groups/variables.tf b/modules/node_groups/variables.tf
index 335ce48009..7c24706420 100644
--- a/modules/node_groups/variables.tf
+++ b/modules/node_groups/variables.tf
@@ -28,6 +28,12 @@ variable "default_iam_role_arn" {
default = ""
}
+variable "node_default_settings" {
+ description = "Node group defaults from parent"
+ type = any
+ default = {}
+}
+
variable "worker_security_group_id" {
description = "If provided, all workers will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the EKS cluster."
type = string
diff --git a/variables.tf b/variables.tf
index 9c7571797a..14b355479e 100644
--- a/variables.tf
+++ b/variables.tf
@@ -96,6 +96,18 @@ variable "map_users" {
default = []
}
+variable "launch_templates" {
+ description = "Map of launch template definitions to create"
+ type = map(any)
+ default = {}
+}
+
+variable "iam_instance_profiles" {
+ description = "Map of instance profile definitions to create"
+ type = map(any)
+ default = {}
+}
+
variable "fargate_subnets" {
description = "A list of subnets to place fargate workers within (if different from subnets)."
type = list(string)
@@ -132,6 +144,12 @@ variable "worker_groups" {
default = {}
}
+variable "group_default_settings" {
+ description = "Override default values for autoscaling group, node group settings"
+ type = any
+ default = {}
+}
+
variable "worker_security_group_id" {
description = "If provided, all workers will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the EKS cluster."
type = string
diff --git a/workers.tf b/workers.tf
index 529f574052..aaa8e7da7d 100644
--- a/workers.tf
+++ b/workers.tf
@@ -1,10 +1,10 @@
locals {
# Abstracted to a local so that it can be shared with node group as well
# Only valus that are common between ASG and Node Group are pulled out to this local map
- group_default = {
- min_size = try(var.group_default.min_size, 1)
- max_size = try(var.group_default.max_size, 3)
- desired_capacity = try(var.group_default.desired_capacity, 1)
+ group_default_settings = {
+ min_size = try(var.group_default_settings.min_size, 1)
+ max_size = try(var.group_default_settings.max_size, 3)
+ desired_capacity = try(var.group_default_settings.desired_capacity, 1)
}
}
@@ -46,8 +46,8 @@ module "node_groups" {
worker_security_group_id = local.worker_security_group_id
worker_additional_security_group_ids = var.worker_additional_security_group_ids
- node_groups_defaults = var.node_groups_defaults
- node_groups = var.node_groups
+ node_default_settings = var.group_default_settings
+ node_groups = var.node_groups
tags = var.tags
@@ -62,43 +62,44 @@ module "node_groups" {
################################################################################
# Autoscaling Group
################################################################################
+
resource "aws_autoscaling_group" "this" {
for_each = var.create_eks ? var.worker_groups : {}
- name_prefix = "${join("-", [local.cluster_name, lookup(each.value, "name", each.key)])}-"
+ name_prefix = "${join("-", [local.cluster_name, try(each.value.name, each.key)])}-"
launch_template {
name = each.value.launch_template_key # required
- version = try(each.value.launch_template_version, var.group_default.min_size, "$Latest")
+ version = try(each.value.launch_template_version, var.group_default_settings.min_size, "$Latest")
}
- availability_zones = try(each.value.availability_zones, var.group_default.availability_zones, null)
- vpc_zone_identifier = try(each.value.vpc_zone_identifier, var.group_default.vpc_zone_identifier, null)
+ availability_zones = try(each.value.availability_zones, var.group_default_settings.availability_zones, null)
+ vpc_zone_identifier = try(each.value.vpc_zone_identifier, var.group_default_settings.vpc_zone_identifier, null)
- min_size = try(each.value.min_size, locals.wg_default.min_size)
- max_size = try(each.value.max_size, locals.wg_default.max_size)
- desired_capacity = try(each.value.desired_capacity, locals.wg_default.desired_capacity)
- capacity_rebalance = try(each.value.capacity_rebalance, var.group_default.capacity_rebalance, null)
- default_cooldown = try(each.value.default_cooldown, var.group_default.default_cooldown, null)
- protect_from_scale_in = try(each.value.protect_from_scale_in, var.group_default.protect_from_scale_in, null)
+ min_size = try(each.value.min_size, local.group_default_settings.min_size)
+ max_size = try(each.value.max_size, local.group_default_settings.max_size)
+ desired_capacity = try(each.value.desired_capacity, local.group_default_settings.desired_capacity)
+ capacity_rebalance = try(each.value.capacity_rebalance, var.group_default_settings.capacity_rebalance, null)
+ default_cooldown = try(each.value.default_cooldown, var.group_default_settings.default_cooldown, null)
+ protect_from_scale_in = try(each.value.protect_from_scale_in, var.group_default_settings.protect_from_scale_in, null)
- load_balancers = try(each.value.load_balancers, var.group_default.load_balancers, null)
- target_group_arns = try(each.value.target_group_arns, var.group_default.target_group_arns, null)
- placement_group = try(each.value.placement_group, var.group_default.placement_group, null)
- health_check_type = try(each.value.health_check_type, var.group_default.health_check_type, null)
- health_check_grace_period = try(each.value.health_check_grace_period, var.group_default.health_check_grace_period, null)
+ load_balancers = try(each.value.load_balancers, var.group_default_settings.load_balancers, null)
+ target_group_arns = try(each.value.target_group_arns, var.group_default_settings.target_group_arns, null)
+ placement_group = try(each.value.placement_group, var.group_default_settings.placement_group, null)
+ health_check_type = try(each.value.health_check_type, var.group_default_settings.health_check_type, null)
+ health_check_grace_period = try(each.value.health_check_grace_period, var.group_default_settings.health_check_grace_period, null)
- force_delete = try(each.value.force_delete, var.group_default.force_delete, false)
- termination_policies = try(each.value.termination_policies, var.group_default.termination_policies, null)
- suspended_processes = try(each.value.suspended_processes, var.group_default.suspended_processes, "AZRebalance")
- max_instance_lifetime = try(each.value.max_instance_lifetime, var.group_default.max_instance_lifetime, null)
+ force_delete = try(each.value.force_delete, var.group_default_settings.force_delete, false)
+ termination_policies = try(each.value.termination_policies, var.group_default_settings.termination_policies, null)
+ suspended_processes = try(each.value.suspended_processes, var.group_default_settings.suspended_processes, "AZRebalance")
+ max_instance_lifetime = try(each.value.max_instance_lifetime, var.group_default_settings.max_instance_lifetime, null)
- enabled_metrics = try(each.value.enabled_metrics, var.group_default.enabled_metrics, null)
- metrics_granularity = try(each.value.metrics_granularity, var.group_default.metrics_granularity, null)
- service_linked_role_arn = try(each.value.service_linked_role_arn, var.group_default.service_linked_role_arn, null)
+ enabled_metrics = try(each.value.enabled_metrics, var.group_default_settings.enabled_metrics, null)
+ metrics_granularity = try(each.value.metrics_granularity, var.group_default_settings.metrics_granularity, null)
+ service_linked_role_arn = try(each.value.service_linked_role_arn, var.group_default_settings.service_linked_role_arn, null)
dynamic "initial_lifecycle_hook" {
- for_each = try(each.value.initial_lifecycle_hook, var.group_default.initial_lifecycle_hook, {})
+ for_each = try(each.value.initial_lifecycle_hook, var.group_default_settings.initial_lifecycle_hook, {})
iterator = hook
content {
@@ -113,7 +114,7 @@ resource "aws_autoscaling_group" "this" {
}
dynamic "instance_refresh" {
- for_each = try(each.value.instance_refresh, var.group_default.instance_refresh, {})
+ for_each = try(each.value.instance_refresh, var.group_default_settings.instance_refresh, {})
iterator = refresh
content {
@@ -131,13 +132,13 @@ resource "aws_autoscaling_group" "this" {
}
dynamic "mixed_instances_policy" {
- for_each = try(each.value.mixed_instances_policy, var.group_default.mixed_instances_policy, {})
+ for_each = try(each.value.mixed_instances_policy, var.group_default_settings.mixed_instances_policy, {})
iterator = mixed
content {
dynamic "instances_distribution" {
for_each = try(mixed.value.instances_distribution, {})
- iterater = distro
+ iterator = distro
content {
on_demand_allocation_strategy = lookup(distro.value, "on_demand_allocation_strategy", null)
@@ -174,7 +175,7 @@ resource "aws_autoscaling_group" "this" {
}
dynamic "warm_pool" {
- for_each = try(each.value.warm_pool, var.group_default.warm_pool, {})
+ for_each = try(each.value.warm_pool, var.group_default_settings.warm_pool, {})
content {
pool_state = lookup(warm_pool.value, "pool_state", null)
@@ -223,28 +224,27 @@ resource "aws_autoscaling_group" "this" {
resource "aws_launch_template" "this" {
for_each = var.create_eks ? var.launch_templates : {}
- iterater = template
- name_prefix = "${local.cluster_name}-${lookup(template.value, "name", each.key)}"
- description = lookup(template.value, "description", null)
+ name_prefix = "${local.cluster_name}-${try(each.value.name, each.key)}"
+ description = try(each.value.description, var.group_default_settings.description, null)
- ebs_optimized = lookup(template.value, "ebs_optimized", null)
- image_id = lookup(template.value, "image_id", null)
- instance_type = lookup(template.value, "instance_type", "m6i.large")
- key_name = lookup(template.value, "key_name", null)
- user_data = lookup(template.value, "user_data", null)
+ ebs_optimized = try(each.value.ebs_optimized, var.group_default_settings.ebs_optimized, null)
+ image_id = try(each.value.image_id, var.group_default_settings.image_id, null)
+ instance_type = try(each.value.instance_type, var.group_default_settings.instance_type, "m6i.large")
+ key_name = try(each.value.key_name, var.group_default_settings.key_name, null)
+ user_data = try(each.value.user_data, var.group_default_settings.user_data, null)
- vpc_security_group_ids = lookup(template.value, "vpc_security_group_ids", null)
+ vpc_security_group_ids = try(each.value.vpc_security_group_ids, var.group_default_settings.vpc_security_group_ids, null)
- default_version = lookup(template.value, "default_version", null)
- update_default_version = lookup(template.value, "update_default_version", null)
- disable_api_termination = lookup(template.value, "disable_api_termination", null)
- instance_initiated_shutdown_behavior = lookup(template.value, "instance_initiated_shutdown_behavior", null)
- kernel_id = lookup(template.value, "kernel_id", null)
- ram_disk_id = lookup(template.value, "ram_disk_id", null)
+ default_version = try(each.value.default_version, var.group_default_settings.default_version, null)
+ update_default_version = try(each.value.update_default_version, var.group_default_settings.update_default_version, null)
+ disable_api_termination = try(each.value.disable_api_termination, var.group_default_settings.disable_api_termination, null)
+ instance_initiated_shutdown_behavior = try(each.value.instance_initiated_shutdown_behavior, var.group_default_settings.instance_initiated_shutdown_behavior, null)
+ kernel_id = try(each.value.kernel_id, var.group_default_settings.kernel_id, null)
+ ram_disk_id = try(each.value.ram_disk_id, var.group_default_settings.ram_disk_id, null)
dynamic "block_device_mappings" {
- for_each = lookup(each.value, "block_device_mappings", null) != null ? each.value.block_device_mappings : []
+ for_each = try(each.value.block_device_mappings, var.group_default_settings.block_device_mappings, [])
content {
device_name = block_device_mappings.value.device_name
no_device = lookup(block_device_mappings.value, "no_device", null)
@@ -267,7 +267,7 @@ resource "aws_launch_template" "this" {
}
dynamic "capacity_reservation_specification" {
- for_each = lookup(each.value, "capacity_reservation_specification", null) != null ? [each.value.capacity_reservation_specification] : []
+ for_each = try(each.value.capacity_reservation_specification, var.group_default_settings.capacity_reservation_specification, [])
content {
capacity_reservation_preference = lookup(capacity_reservation_specification.value, "capacity_reservation_preference", null)
@@ -281,7 +281,7 @@ resource "aws_launch_template" "this" {
}
dynamic "cpu_options" {
- for_each = lookup(each.value, "cpu_options", null) != null ? [each.value.cpu_options] : []
+ for_each = try(each.value.cpu_options, var.group_default_settings.cpu_options, [])
content {
core_count = cpu_options.value.core_count
threads_per_core = cpu_options.value.threads_per_core
@@ -289,35 +289,35 @@ resource "aws_launch_template" "this" {
}
dynamic "credit_specification" {
- for_each = lookup(each.value, "credit_specification", null) != null ? [each.value.credit_specification] : []
+ for_each = try(each.value.credit_specification, var.group_default_settings.credit_specification, [])
content {
cpu_credits = credit_specification.value.cpu_credits
}
}
dynamic "elastic_gpu_specifications" {
- for_each = lookup(each.value, "elastic_gpu_specifications", null) != null ? [each.value.elastic_gpu_specifications] : []
+ for_each = try(each.value.elastic_gpu_specifications, var.group_default_settings.elastic_gpu_specifications, [])
content {
type = elastic_gpu_specifications.value.type
}
}
dynamic "elastic_inference_accelerator" {
- for_each = lookup(each.value, "elastic_inference_accelerator", null) != null ? [each.value.elastic_inference_accelerator] : []
+ for_each = try(each.value.elastic_inference_accelerator, var.group_default_settings.elastic_inference_accelerator, [])
content {
type = elastic_inference_accelerator.value.type
}
}
dynamic "enclave_options" {
- for_each = lookup(each.value, "enclave_options", null) != null ? [each.value.enclave_options] : []
+ for_each = try(each.value.enclave_options, var.group_default_settings.enclave_options, [])
content {
enabled = enclave_options.value.enabled
}
}
dynamic "hibernation_options" {
- for_each = lookup(each.value, "hibernation_options", null) != null ? [each.value.hibernation_options] : []
+ for_each = try(each.value.hibernation_options, var.group_default_settings.hibernation_options, [])
content {
configured = hibernation_options.value.configured
}
@@ -329,22 +329,21 @@ resource "aws_launch_template" "this" {
# data.aws_iam_instance_profile.custom_worker_group_iam_instance_profile.*.name,
# )[count.index]
# }
-
+ # TODO - oy
dynamic "iam_instance_profile" {
- for_each = lookup(each.value, "iam_instance_profile", null) != null ? [1] : []
+ for_each = try([each.value.iam_instance_profile_name], [])
content {
- name = iam_instance_profile.value.name
- arn = iam_instance_profile.value.arn
+ name = each.value
}
}
dynamic "instance_market_options" {
- for_each = lookup(each.value, "instance_market_options", null) != null ? [each.value.instance_market_options] : []
+ for_each = try(each.value.instance_market_options, var.group_default_settings.instance_market_options, [])
content {
market_type = instance_market_options.value.market_type
dynamic "spot_options" {
- for_each = lookup(instance_market_options.value, "spot_options", null) != null ? [instance_market_options.value.spot_options] : []
+ for_each = try([instance_market_options.value.spot_options], [])
content {
block_duration_minutes = spot_options.value.block_duration_minutes
instance_interruption_behavior = lookup(spot_options.value, "instance_interruption_behavior", null)
@@ -357,14 +356,14 @@ resource "aws_launch_template" "this" {
}
dynamic "license_specification" {
- for_each = lookup(each.value, "license_specifications", null) != null ? [each.value.license_specifications] : []
+ for_each = try(each.value.license_specifications, var.group_default_settings.license_specifications, [])
content {
license_configuration_arn = license_specifications.value.license_configuration_arn
}
}
dynamic "metadata_options" {
- for_each = lookup(each.value, "metadata_options", null) != null ? [each.value.metadata_options] : []
+ for_each = try([each.value.metadata_options], [var.group_default_settings.metadata_options], [])
content {
http_endpoint = lookup(metadata_options.value, "http_endpoint", null)
http_tokens = lookup(metadata_options.value, "http_tokens", null)
@@ -373,14 +372,14 @@ resource "aws_launch_template" "this" {
}
dynamic "monitoring" {
- for_each = lookup(each.value, "enable_monitoring", null) != null ? [1] : []
+ for_each = try(each.value.enable_monitoring, var.group_default_settings.enable_monitoring, [])
content {
- enabled = each.value.enable_monitoring
+ enabled = each.value
}
}
dynamic "network_interfaces" {
- for_each = lookup(each.value, "network_interfaces", null) != null ? each.value.network_interfaces : []
+ for_each = try(each.value.network_interfaces, var.group_default_settings.network_interfaces, [])
iterator = interface
content {
associate_carrier_ip_address = lookup(interface.value, "associate_carrier_ip_address", null)
@@ -400,7 +399,7 @@ resource "aws_launch_template" "this" {
}
dynamic "placement" {
- for_each = lookup(each.value, "placement", null) != null ? [each.value.placement] : []
+ for_each = try(each.value.placement, var.group_default_settings.placement, [])
content {
affinity = lookup(placement.value, "affinity", null)
availability_zone = lookup(placement.value, "availability_zone", null)
@@ -414,9 +413,8 @@ resource "aws_launch_template" "this" {
tag_specifications {
resource_type = "volume"
-
tags = merge(
- { "Name" = "${local.cluster_name}-${lookup(each.value, "name", each.key)}-eks_asg" },
+ { "Name" = "${local.cluster_name}-${try(each.value.name, each.key)}-eks_asg" },
var.tags,
{ for tag in lookup(each.value, "tags", {}) : tag["key"] => tag["value"] if tag["key"] != "Name" && tag["propagate_at_launch"] }
)
@@ -424,9 +422,8 @@ resource "aws_launch_template" "this" {
tag_specifications {
resource_type = "instance"
-
tags = merge(
- { "Name" = "${local.cluster_name}-${lookup(each.value, "name", each.key)}-eks_asg" },
+ { "Name" = "${local.cluster_name}-${try(each.value.name, each.key)}-eks_asg" },
{ for tag_key, tag_value in var.tags :
tag_key => tag_value
if tag_key != "Name" && !contains([for tag in lookup(each.value, "tags", {}) : tag["key"]], tag_key)
@@ -436,9 +433,8 @@ resource "aws_launch_template" "this" {
tag_specifications {
resource_type = "network-interface"
-
tags = merge(
- { "Name" = "${local.cluster_name}-${lookup(each.value, "name", each.key)}-eks_asg" },
+ { "Name" = "${local.cluster_name}-${try(each.value.name, each.key)}-eks_asg" },
var.tags,
{ for tag in lookup(each.value, "tags", {}) : tag["key"] => tag["value"] if tag["key"] != "Name" && tag["propagate_at_launch"] }
)
@@ -460,26 +456,63 @@ resource "aws_launch_template" "this" {
aws_iam_role_policy_attachment.workers_additional_policies
]
- dynamic "tag_specifications" {
- for_each = lookup(each.value, "tag_specifications", null) != null ? each.value.tag_specifications : []
- content {
- resource_type = tag_specifications.value.resource_type
- tags = tag_specifications.value.tags
- }
- }
-
lifecycle {
create_before_destroy = true
}
+ tags = merge(var.tags, lookup(each.value, "tags", {}))
+}
+
+################################################################################
+# IAM Role & Instance Profile
+################################################################################
+
+resource "aws_iam_role" "workers" {
+ count = var.manage_worker_iam_resources && var.create_eks ? 1 : 0
+
+ name_prefix = var.workers_role_name != "" ? null : local.cluster_name
+ name = var.workers_role_name != "" ? var.workers_role_name : null
+ assume_role_policy = data.aws_iam_policy_document.workers_assume_role_policy.json
+ permissions_boundary = var.permissions_boundary
+ path = var.iam_path
+ force_detach_policies = true
+
tags = var.tags
}
+resource "aws_iam_role_policy_attachment" "workers_AmazonEKSWorkerNodePolicy" {
+ count = var.manage_worker_iam_resources && var.create_eks ? 1 : 0
+
+ policy_arn = "${local.policy_arn_prefix}/AmazonEKSWorkerNodePolicy"
+ role = aws_iam_role.workers[0].name
+}
+
+resource "aws_iam_role_policy_attachment" "workers_AmazonEKS_CNI_Policy" {
+ count = var.manage_worker_iam_resources && var.attach_worker_cni_policy && var.create_eks ? 1 : 0
+
+ policy_arn = "${local.policy_arn_prefix}/AmazonEKS_CNI_Policy"
+ role = aws_iam_role.workers[0].name
+}
+
+resource "aws_iam_role_policy_attachment" "workers_AmazonEC2ContainerRegistryReadOnly" {
+ count = var.manage_worker_iam_resources && var.create_eks ? 1 : 0
+
+ policy_arn = "${local.policy_arn_prefix}/AmazonEC2ContainerRegistryReadOnly"
+ role = aws_iam_role.workers[0].name
+}
+
+resource "aws_iam_role_policy_attachment" "workers_additional_policies" {
+ for_each = var.manage_worker_iam_resources && var.create_eks ? toset(var.workers_additional_policies) : []
+
+ role = aws_iam_role.workers[0].name
+ policy_arn = each.value
+}
+
resource "aws_iam_instance_profile" "workers" {
- count = var.manage_worker_iam_resources && var.create_eks ? var.iam_instance_profiles : {}
+ for_each = var.manage_worker_iam_resources && var.create_eks ? var.iam_instance_profiles : {}
name_prefix = local.cluster_name
- role = lookup(var.worker_groups[count.index], "iam_role_id", local.default_iam_role_id)
+ role = aws_iam_role.workers[0].id
path = var.iam_path
lifecycle {
@@ -489,6 +522,10 @@ resource "aws_iam_instance_profile" "workers" {
tags = var.tags
}
+################################################################################
+# Security Group
+################################################################################
+
resource "aws_security_group" "workers" {
count = var.worker_create_security_group && var.create_eks ? 1 : 0
@@ -587,44 +624,3 @@ resource "aws_security_group_rule" "cluster_primary_ingress_workers" {
to_port = 65535
type = "ingress"
}
-
-resource "aws_iam_role" "workers" {
- count = var.manage_worker_iam_resources && var.create_eks ? 1 : 0
-
- name_prefix = var.workers_role_name != "" ? null : local.cluster_name
- name = var.workers_role_name != "" ? var.workers_role_name : null
- assume_role_policy = data.aws_iam_policy_document.workers_assume_role_policy.json
- permissions_boundary = var.permissions_boundary
- path = var.iam_path
- force_detach_policies = true
-
- tags = var.tags
-}
-
-resource "aws_iam_role_policy_attachment" "workers_AmazonEKSWorkerNodePolicy" {
- count = var.manage_worker_iam_resources && var.create_eks ? 1 : 0
-
- policy_arn = "${local.policy_arn_prefix}/AmazonEKSWorkerNodePolicy"
- role = aws_iam_role.workers[0].name
-}
-
-resource "aws_iam_role_policy_attachment" "workers_AmazonEKS_CNI_Policy" {
- count = var.manage_worker_iam_resources && var.attach_worker_cni_policy && var.create_eks ? 1 : 0
-
- policy_arn = "${local.policy_arn_prefix}/AmazonEKS_CNI_Policy"
- role = aws_iam_role.workers[0].name
-}
-
-resource "aws_iam_role_policy_attachment" "workers_AmazonEC2ContainerRegistryReadOnly" {
- count = var.manage_worker_iam_resources && var.create_eks ? 1 : 0
-
- policy_arn = "${local.policy_arn_prefix}/AmazonEC2ContainerRegistryReadOnly"
- role = aws_iam_role.workers[0].name
-}
-
-resource "aws_iam_role_policy_attachment" "workers_additional_policies" {
- count = var.manage_worker_iam_resources && var.create_eks ? length(var.workers_additional_policies) : 0
-
- role = aws_iam_role.workers[0].name
- policy_arn = var.workers_additional_policies[count.index]
-}
From cc9d3be0fcfd1e698dd3417c87522ff70d050576 Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Mon, 8 Nov 2021 10:28:09 -0500
Subject: [PATCH 06/83] chore: clean up cluster IAM resources
---
README.md | 24 +++----
aws_auth.tf | 6 +-
data.tf | 20 ++----
examples/complete/main.tf | 4 +-
locals.tf | 6 +-
main.tf | 112 ++++++++++++-------------------
modules/fargate/README.md | 2 +-
modules/fargate/main.tf | 12 ++--
modules/fargate/outputs.tf | 2 +-
modules/fargate/variables.tf | 2 +-
modules/node_groups/README.md | 2 +-
modules/node_groups/locals.tf | 2 +-
modules/node_groups/variables.tf | 2 +-
variables.tf | 68 +++++++++++++------
workers.tf | 62 ++++++++---------
15 files changed, 158 insertions(+), 168 deletions(-)
diff --git a/README.md b/README.md
index ac779eb7db..b3284c7c35 100644
--- a/README.md
+++ b/README.md
@@ -147,15 +147,13 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
| [aws_eks_cluster.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_cluster) | resource |
| [aws_iam_instance_profile.workers](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_instance_profile) | resource |
| [aws_iam_openid_connect_provider.oidc_provider](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_openid_connect_provider) | resource |
-| [aws_iam_policy.cluster_deny_log_group](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
-| [aws_iam_policy.cluster_elb_sl_role_creation](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
+| [aws_iam_policy.cluster_additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
| [aws_iam_role.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
| [aws_iam_role.workers](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
| [aws_iam_role_policy_attachment.cluster_AmazonEKSClusterPolicy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
| [aws_iam_role_policy_attachment.cluster_AmazonEKSServicePolicy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
| [aws_iam_role_policy_attachment.cluster_AmazonEKSVPCResourceControllerPolicy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
-| [aws_iam_role_policy_attachment.cluster_deny_log_group](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
-| [aws_iam_role_policy_attachment.cluster_elb_sl_role_creation](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
+| [aws_iam_role_policy_attachment.cluster_additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
| [aws_iam_role_policy_attachment.workers_AmazonEC2ContainerRegistryReadOnly](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
| [aws_iam_role_policy_attachment.workers_AmazonEKSWorkerNodePolicy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
| [aws_iam_role_policy_attachment.workers_AmazonEKS_CNI_Policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
@@ -179,12 +177,9 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
| [aws_ami.eks_worker](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source |
| [aws_ami.eks_worker_windows](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source |
| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source |
-| [aws_iam_instance_profile.custom_worker_group_iam_instance_profile](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_instance_profile) | data source |
+| [aws_iam_policy_document.cluster_additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
| [aws_iam_policy_document.cluster_assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
-| [aws_iam_policy_document.cluster_deny_log_group](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
-| [aws_iam_policy_document.cluster_elb_sl_role_creation](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
| [aws_iam_policy_document.workers_assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
-| [aws_iam_role.custom_cluster_iam_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_role) | data source |
| [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source |
| [http_http.wait_for_cluster](https://registry.terraform.io/providers/terraform-aws-modules/http/latest/docs/data-sources/http) | data source |
@@ -206,16 +201,18 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
| [cluster\_endpoint\_private\_access\_sg](#input\_cluster\_endpoint\_private\_access\_sg) | List of security group IDs which can access the Amazon EKS private API server endpoint. To use this `cluster_endpoint_private_access` and `cluster_create_endpoint_private_access_sg_rule` must be set to `true`. | `list(string)` | `null` | no |
| [cluster\_endpoint\_public\_access](#input\_cluster\_endpoint\_public\_access) | Indicates whether or not the Amazon EKS public API server endpoint is enabled. When it's set to `false` ensure to have a proper private access with `cluster_endpoint_private_access = true`. | `bool` | `true` | no |
| [cluster\_endpoint\_public\_access\_cidrs](#input\_cluster\_endpoint\_public\_access\_cidrs) | List of CIDR blocks which can access the Amazon EKS public API server endpoint. | `list(string)` |
[ "0.0.0.0/0" ]
| no |
-| [cluster\_iam\_role\_name](#input\_cluster\_iam\_role\_name) | IAM role name for the cluster. If manage\_cluster\_iam\_resources is set to false, set this to reuse an existing IAM role. If manage\_cluster\_iam\_resources is set to true, set this to force the created role name. | `string` | `""` | no |
+| [cluster\_iam\_role\_arn](#input\_cluster\_iam\_role\_arn) | Existing IAM role ARN for the cluster. Required if `create_cluster_iam_role` is set to `false` | `string` | `null` | no |
+| [cluster\_iam\_role\_name](#input\_cluster\_iam\_role\_name) | Name to use on cluster role created | `string` | `null` | no |
+| [cluster\_iam\_role\_use\_name\_prefix](#input\_cluster\_iam\_role\_use\_name\_prefix) | Determines whether cluster IAM role name (`cluster_iam_role_name`) is used as a prefix | `string` | `true` | no |
| [cluster\_log\_kms\_key\_id](#input\_cluster\_log\_kms\_key\_id) | If a KMS Key ARN is set, this key will be used to encrypt the corresponding log group. Please be sure that the KMS Key has an appropriate key policy (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/encrypt-log-data-kms.html) | `string` | `""` | no |
| [cluster\_log\_retention\_in\_days](#input\_cluster\_log\_retention\_in\_days) | Number of days to retain log events. Default retention - 90 days. | `number` | `90` | no |
-| [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster. Also used as a prefix in names of related resources. | `string` | `""` | no |
| [cluster\_security\_group\_id](#input\_cluster\_security\_group\_id) | If provided, the EKS cluster will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the workers | `string` | `""` | no |
| [cluster\_service\_ipv4\_cidr](#input\_cluster\_service\_ipv4\_cidr) | service ipv4 cidr for the kubernetes cluster | `string` | `null` | no |
| [cluster\_tags](#input\_cluster\_tags) | A map of tags to add to just the eks resource. | `map(string)` | `{}` | no |
| [cluster\_update\_timeout](#input\_cluster\_update\_timeout) | Timeout value when updating the EKS cluster. | `string` | `"60m"` | no |
| [cluster\_version](#input\_cluster\_version) | Kubernetes minor version to use for the EKS cluster (for example 1.21). | `string` | `null` | no |
-| [create\_eks](#input\_create\_eks) | Controls if EKS resources should be created (it affects almost all resources) | `bool` | `true` | no |
+| [create](#input\_create) | Controls if EKS resources should be created (it affects almost all resources) | `bool` | `true` | no |
+| [create\_cluster\_iam\_role](#input\_create\_cluster\_iam\_role) | Determines whether a cluster IAM role is created or to use an existing IAM role | `bool` | `true` | no |
| [create\_fargate\_pod\_execution\_role](#input\_create\_fargate\_pod\_execution\_role) | Controls if the EKS Fargate pod execution IAM role should be created. | `bool` | `true` | no |
| [default\_platform](#input\_default\_platform) | Default platform name. Valid options are `linux` and `windows`. | `string` | `"linux"` | no |
| [eks\_oidc\_root\_ca\_thumbprint](#input\_eks\_oidc\_root\_ca\_thumbprint) | Thumbprint of Root CA for EKS OIDC, Valid until 2037 | `string` | `"9e99a48a9960b14926bb7f3b02e22da2b0ab7280"` | no |
@@ -236,17 +233,18 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
| [kubeconfig\_output\_path](#input\_kubeconfig\_output\_path) | Where to save the Kubectl config file (if `write_kubeconfig = true`). Assumed to be a directory if the value ends with a forward slash `/`. | `string` | `"./"` | no |
| [launch\_templates](#input\_launch\_templates) | Map of launch template definitions to create | `map(any)` | `{}` | no |
| [manage\_aws\_auth](#input\_manage\_aws\_auth) | Whether to apply the aws-auth configmap file. | `bool` | `true` | no |
-| [manage\_cluster\_iam\_resources](#input\_manage\_cluster\_iam\_resources) | Whether to let the module manage cluster IAM resources. If set to false, cluster\_iam\_role\_name must be specified. | `bool` | `true` | no |
+| [manage\_cluster\_iam\_resources](#input\_manage\_cluster\_iam\_resources) | Whether to let the module manage cluster IAM resources. If set to false, `cluster_iam_role_arn` must be specified. | `bool` | `true` | no |
| [manage\_worker\_iam\_resources](#input\_manage\_worker\_iam\_resources) | Whether to let the module manage worker IAM resources. If set to false, iam\_instance\_profile\_name must be specified for workers. | `bool` | `true` | no |
| [map\_accounts](#input\_map\_accounts) | Additional AWS account numbers to add to the aws-auth configmap. | `list(string)` | `[]` | no |
| [map\_roles](#input\_map\_roles) | Additional IAM roles to add to the aws-auth configmap. |
| `[]` | no |
+| [name](#input\_name) | Name of the EKS cluster and default name (prefix) used throughout the resources created | `string` | `""` | no |
| [node\_groups](#input\_node\_groups) | Map of map of node groups to create. See `node_groups` module's documentation for more details | `any` | `{}` | no |
| [node\_groups\_defaults](#input\_node\_groups\_defaults) | Map of values to be applied to all node groups. See `node_groups` module's documentation for more details | `any` | `{}` | no |
| [openid\_connect\_audiences](#input\_openid\_connect\_audiences) | List of OpenID Connect audience client IDs to add to the IRSA provider. | `list(string)` | `[]` | no |
| [permissions\_boundary](#input\_permissions\_boundary) | If provided, all IAM roles will be created with this permissions boundary attached. | `string` | `null` | no |
| [subnets](#input\_subnets) | A list of subnets to place the EKS cluster and workers within. | `list(string)` | `[]` | no |
-| [tags](#input\_tags) | A map of tags to add to all resources. Tags added to launch configuration or templates override these values for ASG Tags only. | `map(string)` | `{}` | no |
+| [tags](#input\_tags) | A map of tags to add to all resources. Tags added to launch configuration or templates override these values for ASG Tags only | `map(string)` | `{}` | no |
| [vpc\_id](#input\_vpc\_id) | VPC where the cluster and workers will be deployed. | `string` | `null` | no |
| [wait\_for\_cluster\_timeout](#input\_wait\_for\_cluster\_timeout) | A timeout (in seconds) to wait for cluster to be available. | `number` | `300` | no |
| [worker\_additional\_security\_group\_ids](#input\_worker\_additional\_security\_group\_ids) | A list of additional security group ids to attach to worker instances | `list(string)` | `[]` | no |
diff --git a/aws_auth.tf b/aws_auth.tf
index 2eeed22cde..1510f4e218 100644
--- a/aws_auth.tf
+++ b/aws_auth.tf
@@ -1,6 +1,6 @@
locals {
auth_worker_roles = [
- for index in range(0, var.create_eks ? local.worker_group_count : 0) : {
+ for index in range(0, var.create ? local.worker_group_count : 0) : {
worker_role_arn = "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:role/${element(
coalescelist(
aws_iam_instance_profile.workers.*.role,
@@ -16,7 +16,7 @@ locals {
# Convert to format needed by aws-auth ConfigMap
configmap_roles = [
for role in concat(
- local.auth_worker_roles,
+ aws_iam_instance_profile.workers.*.role,
module.node_groups.aws_auth_roles,
module.fargate.aws_auth_roles,
) :
@@ -38,7 +38,7 @@ locals {
}
resource "kubernetes_config_map" "aws_auth" {
- count = var.create_eks && var.manage_aws_auth ? 1 : 0
+ count = var.create && var.manage_aws_auth ? 1 : 0
metadata {
name = "aws-auth"
diff --git a/data.tf b/data.tf
index 07c0b92f6f..d423db215d 100644
--- a/data.tf
+++ b/data.tf
@@ -63,24 +63,14 @@ data "aws_iam_policy_document" "cluster_assume_role_policy" {
}
}
-data "aws_iam_role" "custom_cluster_iam_role" {
- count = var.manage_cluster_iam_resources ? 0 : 1
+# data "aws_iam_role" "custom_cluster_iam_role" {
+# count = var.manage_cluster_iam_resources ? 0 : 1
- name = var.cluster_iam_role_name
-}
-
-data "aws_iam_instance_profile" "custom_worker_group_iam_instance_profile" {
- count = var.manage_worker_iam_resources ? 0 : var.worker_groups
-
- name = lookup(
- var.worker_groups[count.index],
- "iam_instance_profile_name",
- null # TODO need default
- )
-}
+# name = var.cluster_iam_role_name
+# }
data "http" "wait_for_cluster" {
- count = var.create_eks && var.manage_aws_auth ? 1 : 0
+ count = var.create && var.manage_aws_auth ? 1 : 0
url = format("%s/healthz", aws_eks_cluster.this[0].endpoint)
ca_certificate = base64decode(local.cluster_auth_base64)
diff --git a/examples/complete/main.tf b/examples/complete/main.tf
index 052a20c843..79b02e3bce 100644
--- a/examples/complete/main.tf
+++ b/examples/complete/main.tf
@@ -145,7 +145,7 @@ module "eks" {
module "disabled_eks" {
source = "../.."
- create_eks = false
+ create = false
}
module "disabled_fargate" {
@@ -157,7 +157,7 @@ module "disabled_fargate" {
module "disabled_node_groups" {
source = "../../modules/node_groups"
- create_eks = false
+ create = false
}
################################################################################
diff --git a/locals.tf b/locals.tf
index 00aef514ba..6362f7e360 100644
--- a/locals.tf
+++ b/locals.tf
@@ -10,8 +10,6 @@ locals {
cluster_primary_security_group_id = try(aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id, "")
cluster_security_group_id = var.cluster_create_security_group ? join("", aws_security_group.cluster.*.id) : var.cluster_security_group_id
- cluster_iam_role_name = var.manage_cluster_iam_resources ? join("", aws_iam_role.cluster.*.name) : var.cluster_iam_role_name
- cluster_iam_role_arn = var.manage_cluster_iam_resources ? join("", aws_iam_role.cluster.*.arn) : join("", data.aws_iam_role.custom_cluster_iam_role.*.arn)
# Worker groups
worker_security_group_id = var.worker_create_security_group ? join("", aws_security_group.workers.*.id) : var.worker_security_group_id
@@ -25,7 +23,7 @@ locals {
client_id_list = distinct(compact(concat([local.sts_principal], var.openid_connect_audiences)))
policy_arn_prefix = "arn:${data.aws_partition.current.partition}:iam::aws:policy"
- kubeconfig = var.create_eks ? templatefile("${path.module}/templates/kubeconfig.tpl", {
+ kubeconfig = var.create ? templatefile("${path.module}/templates/kubeconfig.tpl", {
kubeconfig_name = coalesce(var.kubeconfig_name, "eks_${var.cluster_name}")
endpoint = local.cluster_endpoint
cluster_auth_base64 = local.cluster_auth_base64
@@ -37,7 +35,7 @@ locals {
}) : ""
launch_template_userdata_rendered = [
- for key, group in(var.create_eks ? var.worker_groups : {}) : templatefile(
+ for key, group in(var.create ? var.worker_groups : {}) : templatefile(
try(
group.userdata_template_file,
lookup(group, "platform", var.default_platform) == "windows"
diff --git a/main.tf b/main.tf
index 9500141bc8..85e005ded2 100644
--- a/main.tf
+++ b/main.tf
@@ -2,11 +2,11 @@
# Cluster
################################################################################
resource "aws_eks_cluster" "this" {
- count = var.create_eks ? 1 : 0
+ count = var.create ? 1 : 0
name = var.cluster_name
enabled_cluster_log_types = var.cluster_enabled_log_types
- role_arn = local.cluster_iam_role_arn
+ role_arn = try(aws_iam_role.cluster[0].arn, var.cluster_iam_role_arn)
version = var.cluster_version
vpc_config {
@@ -54,7 +54,7 @@ resource "aws_eks_cluster" "this" {
}
resource "aws_cloudwatch_log_group" "this" {
- count = length(var.cluster_enabled_log_types) > 0 && var.create_eks ? 1 : 0
+ count = length(var.cluster_enabled_log_types) > 0 && var.create ? 1 : 0
name = "/aws/eks/${var.cluster_name}/cluster"
retention_in_days = var.cluster_log_retention_in_days
@@ -68,7 +68,7 @@ resource "aws_cloudwatch_log_group" "this" {
################################################################################
resource "aws_security_group" "cluster" {
- count = var.cluster_create_security_group && var.create_eks ? 1 : 0
+ count = var.cluster_create_security_group && var.create ? 1 : 0
name_prefix = var.cluster_name
description = "EKS cluster security group."
@@ -83,7 +83,7 @@ resource "aws_security_group" "cluster" {
}
resource "aws_security_group_rule" "cluster_egress_internet" {
- count = var.cluster_create_security_group && var.create_eks ? 1 : 0
+ count = var.cluster_create_security_group && var.create ? 1 : 0
description = "Allow cluster egress access to the Internet."
protocol = "-1"
@@ -95,7 +95,7 @@ resource "aws_security_group_rule" "cluster_egress_internet" {
}
resource "aws_security_group_rule" "cluster_https_worker_ingress" {
- count = var.cluster_create_security_group && var.create_eks && var.worker_create_security_group ? 1 : 0
+ count = var.cluster_create_security_group && var.create && var.worker_create_security_group ? 1 : 0
description = "Allow pods to communicate with the EKS cluster API."
protocol = "tcp"
@@ -107,7 +107,7 @@ resource "aws_security_group_rule" "cluster_https_worker_ingress" {
}
resource "aws_security_group_rule" "cluster_private_access_cidrs_source" {
- for_each = var.create_eks && var.cluster_create_endpoint_private_access_sg_rule && var.cluster_endpoint_private_access && var.cluster_endpoint_private_access_cidrs != null ? toset(var.cluster_endpoint_private_access_cidrs) : []
+ for_each = var.create && var.cluster_create_endpoint_private_access_sg_rule && var.cluster_endpoint_private_access && var.cluster_endpoint_private_access_cidrs != null ? toset(var.cluster_endpoint_private_access_cidrs) : []
description = "Allow private K8S API ingress from custom CIDR source."
type = "ingress"
@@ -120,7 +120,7 @@ resource "aws_security_group_rule" "cluster_private_access_cidrs_source" {
}
resource "aws_security_group_rule" "cluster_private_access_sg_source" {
- count = var.create_eks && var.cluster_create_endpoint_private_access_sg_rule && var.cluster_endpoint_private_access && var.cluster_endpoint_private_access_sg != null ? length(var.cluster_endpoint_private_access_sg) : 0
+ count = var.create && var.cluster_create_endpoint_private_access_sg_rule && var.cluster_endpoint_private_access && var.cluster_endpoint_private_access_sg != null ? length(var.cluster_endpoint_private_access_sg) : 0
description = "Allow private K8S API ingress from custom Security Groups source."
type = "ingress"
@@ -137,7 +137,7 @@ resource "aws_security_group_rule" "cluster_private_access_sg_source" {
################################################################################
resource "local_file" "kubeconfig" {
- count = var.write_kubeconfig && var.create_eks ? 1 : 0
+ count = var.write_kubeconfig && var.create ? 1 : 0
content = local.kubeconfig
filename = substr(var.kubeconfig_output_path, -1, 1) == "/" ? "${var.kubeconfig_output_path}kubeconfig_${var.cluster_name}" : var.kubeconfig_output_path
@@ -159,7 +159,7 @@ resource "local_file" "kubeconfig" {
# TODO - update to use TLS data source and drop hacks
resource "aws_iam_openid_connect_provider" "oidc_provider" {
- count = var.enable_irsa && var.create_eks ? 1 : 0
+ count = var.enable_irsa && var.create ? 1 : 0
client_id_list = local.client_id_list
thumbprint_list = [var.eks_oidc_root_ca_thumbprint]
@@ -174,52 +174,53 @@ resource "aws_iam_openid_connect_provider" "oidc_provider" {
}
################################################################################
-# IAM Roles & Policies
+# Cluster IAM Role, Permissions, & Policies
################################################################################
+locals {
+ cluster_iam_role_name = try(var.cluster_iam_role_name, var.cluster_name)
+}
+
resource "aws_iam_role" "cluster" {
- count = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0
+ count = var.create_cluster_iam_role && var.create ? 1 : 0
- name_prefix = var.cluster_iam_role_name != "" ? null : var.cluster_name
- name = var.cluster_iam_role_name != "" ? var.cluster_iam_role_name : null
+ name = var.cluster_iam_role_use_name_prefix ? null : local.cluster_iam_role_name
+ name_prefix = var.cluster_iam_role_use_name_prefix ? "${local.cluster_iam_role_name}-" : null
assume_role_policy = data.aws_iam_policy_document.cluster_assume_role_policy.json
- permissions_boundary = var.permissions_boundary
- path = var.iam_path
+ permissions_boundary = var.cluster_iam_role_permissions_boundary
+ path = var.cluster_iam_role_path
force_detach_policies = true
- tags = var.tags
+ tags = merge(var.tags, var.cluster_role_iam_tags)
}
resource "aws_iam_role_policy_attachment" "cluster_AmazonEKSClusterPolicy" {
- count = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0
+ count = var.create_cluster_iam_role && var.create ? 1 : 0
+ role = aws_iam_role.cluster.name
policy_arn = "${local.policy_arn_prefix}/AmazonEKSClusterPolicy"
- role = local.cluster_iam_role_name
}
resource "aws_iam_role_policy_attachment" "cluster_AmazonEKSServicePolicy" {
- count = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0
+ count = var.create_cluster_iam_role && var.create ? 1 : 0
+ role = aws_iam_role.cluster.name
policy_arn = "${local.policy_arn_prefix}/AmazonEKSServicePolicy"
- role = local.cluster_iam_role_name
}
resource "aws_iam_role_policy_attachment" "cluster_AmazonEKSVPCResourceControllerPolicy" {
- count = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0
+ count = var.create_cluster_iam_role && var.create ? 1 : 0
+ role = aws_iam_role.cluster.name
policy_arn = "${local.policy_arn_prefix}/AmazonEKSVPCResourceController"
- role = local.cluster_iam_role_name
}
-/*
- Adding a policy to cluster IAM role that allow permissions
- required to create AWSServiceRoleForElasticLoadBalancing service-linked role by EKS during ELB provisioning
-*/
-
-data "aws_iam_policy_document" "cluster_elb_sl_role_creation" {
- count = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0
+data "aws_iam_policy_document" "cluster_additional" {
+ count = var.create_cluster_iam_role && var.create ? 1 : 0
+ # Permissions required to create AWSServiceRoleForElasticLoadBalancing service-linked role by EKS during ELB provisioning
statement {
+ sid = "ELBServiceLinkedRole"
effect = "Allow"
actions = [
"ec2:DescribeAccountAttributes",
@@ -228,34 +229,9 @@ data "aws_iam_policy_document" "cluster_elb_sl_role_creation" {
]
resources = ["*"]
}
-}
-
-resource "aws_iam_policy" "cluster_elb_sl_role_creation" {
- count = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0
-
- name_prefix = "${var.cluster_name}-elb-sl-role-creation"
- description = "Permissions for EKS to create AWSServiceRoleForElasticLoadBalancing service-linked role"
- policy = data.aws_iam_policy_document.cluster_elb_sl_role_creation[0].json
- path = var.iam_path
-
- tags = var.tags
-}
-
-resource "aws_iam_role_policy_attachment" "cluster_elb_sl_role_creation" {
- count = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0
-
- policy_arn = aws_iam_policy.cluster_elb_sl_role_creation[0].arn
- role = local.cluster_iam_role_name
-}
-
-/*
- Adding a policy to cluster IAM role that deny permissions to logs:CreateLogGroup
- it is not needed since we create the log group ourselve in this module, and it is causing trouble during cleanup/deletion
-*/
-
-data "aws_iam_policy_document" "cluster_deny_log_group" {
- count = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0
+ # Deny permissions to logs:CreateLogGroup it is not needed since we create the log group ourselve in this module,
+ # and it is causing trouble during cleanup/deletion
statement {
effect = "Deny"
actions = [
@@ -265,20 +241,20 @@ data "aws_iam_policy_document" "cluster_deny_log_group" {
}
}
-resource "aws_iam_policy" "cluster_deny_log_group" {
- count = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0
+resource "aws_iam_policy" "cluster_additional" {
+ count = var.create_cluster_iam_role && var.create ? 1 : 0
- name_prefix = "${var.cluster_name}-deny-log-group"
- description = "Deny CreateLogGroup"
- policy = data.aws_iam_policy_document.cluster_deny_log_group[0].json
- path = var.iam_path
+ name = var.cluster_iam_role_use_name_prefix ? null : local.cluster_iam_role_name
+ name_prefix = var.cluster_iam_role_use_name_prefix ? "${local.cluster_iam_role_name}-" : null
+ description = "Additional permissions for EKS cluster"
+ policy = data.aws_iam_policy_document.cluster_additional[0].json
- tags = var.tags
+ tags = merge(var.tags, var.cluster_role_iam_tags)
}
-resource "aws_iam_role_policy_attachment" "cluster_deny_log_group" {
- count = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0
+resource "aws_iam_role_policy_attachment" "cluster_additional" {
+ count = var.create_cluster_iam_role && var.create ? 1 : 0
- policy_arn = aws_iam_policy.cluster_deny_log_group[0].arn
- role = local.cluster_iam_role_name
+ role = aws_iam_role.cluster.name
+ policy_arn = aws_iam_policy.cluster_additional[0].arn
}
diff --git a/modules/fargate/README.md b/modules/fargate/README.md
index 466b2a051a..629e3c1fee 100644
--- a/modules/fargate/README.md
+++ b/modules/fargate/README.md
@@ -52,7 +52,7 @@ No modules.
| Name | Description | Type | Default | Required |
|------|-------------|------|---------|:--------:|
| [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster. | `string` | `""` | no |
-| [create\_eks](#input\_create\_eks) | Controls if EKS resources should be created (it affects almost all resources) | `bool` | `true` | no |
+| [create](#input\_create) | Controls if EKS resources should be created (it affects almost all resources) | `bool` | `true` | no |
| [create\_fargate\_pod\_execution\_role](#input\_create\_fargate\_pod\_execution\_role) | Controls if the the IAM Role that provides permissions for the EKS Fargate Profile should be created. | `bool` | `true` | no |
| [fargate\_pod\_execution\_role\_name](#input\_fargate\_pod\_execution\_role\_name) | The IAM Role that provides permissions for the EKS Fargate Profile. | `string` | `null` | no |
| [fargate\_profiles](#input\_fargate\_profiles) | Fargate profiles to create. See `fargate_profile` keys section in README.md for more details | `any` | `{}` | no |
diff --git a/modules/fargate/main.tf b/modules/fargate/main.tf
index a4e4b0d80f..33caaaa060 100644
--- a/modules/fargate/main.tf
+++ b/modules/fargate/main.tf
@@ -1,16 +1,16 @@
locals {
- create_eks = var.create_eks && length(var.fargate_profiles) > 0
+ create = var.create && length(var.fargate_profiles) > 0
pod_execution_role_arn = coalescelist(aws_iam_role.eks_fargate_pod.*.arn, data.aws_iam_role.custom_fargate_iam_role.*.arn, [""])[0]
pod_execution_role_name = coalescelist(aws_iam_role.eks_fargate_pod.*.name, data.aws_iam_role.custom_fargate_iam_role.*.name, [""])[0]
- fargate_profiles = { for k, v in var.fargate_profiles : k => v if var.create_eks }
+ fargate_profiles = { for k, v in var.fargate_profiles : k => v if var.create }
}
data "aws_partition" "current" {}
data "aws_iam_policy_document" "eks_fargate_pod_assume_role" {
- count = local.create_eks && var.create_fargate_pod_execution_role ? 1 : 0
+ count = local.create && var.create_fargate_pod_execution_role ? 1 : 0
statement {
effect = "Allow"
@@ -24,13 +24,13 @@ data "aws_iam_policy_document" "eks_fargate_pod_assume_role" {
}
data "aws_iam_role" "custom_fargate_iam_role" {
- count = local.create_eks && !var.create_fargate_pod_execution_role ? 1 : 0
+ count = local.create && !var.create_fargate_pod_execution_role ? 1 : 0
name = var.fargate_pod_execution_role_name
}
resource "aws_iam_role" "eks_fargate_pod" {
- count = local.create_eks && var.create_fargate_pod_execution_role ? 1 : 0
+ count = local.create && var.create_fargate_pod_execution_role ? 1 : 0
name_prefix = format("%s-fargate", substr(var.cluster_name, 0, 24))
assume_role_policy = data.aws_iam_policy_document.eks_fargate_pod_assume_role[0].json
@@ -40,7 +40,7 @@ resource "aws_iam_role" "eks_fargate_pod" {
}
resource "aws_iam_role_policy_attachment" "eks_fargate_pod" {
- count = local.create_eks && var.create_fargate_pod_execution_role ? 1 : 0
+ count = local.create && var.create_fargate_pod_execution_role ? 1 : 0
policy_arn = "arn:${data.aws_partition.current.partition}:iam::aws:policy/AmazonEKSFargatePodExecutionRolePolicy"
role = aws_iam_role.eks_fargate_pod[0].name
diff --git a/modules/fargate/outputs.tf b/modules/fargate/outputs.tf
index 126ba6e385..0535d71de3 100644
--- a/modules/fargate/outputs.tf
+++ b/modules/fargate/outputs.tf
@@ -24,6 +24,6 @@ output "aws_auth_roles" {
for i in range(1) : {
worker_role_arn = local.pod_execution_role_arn
platform = "fargate"
- } if local.create_eks
+ } if local.create
]
}
diff --git a/modules/fargate/variables.tf b/modules/fargate/variables.tf
index 39e2cc68b3..fe9cf83303 100644
--- a/modules/fargate/variables.tf
+++ b/modules/fargate/variables.tf
@@ -1,4 +1,4 @@
-variable "create_eks" {
+variable "create" {
description = "Controls if EKS resources should be created (it affects almost all resources)"
type = bool
default = true
diff --git a/modules/node_groups/README.md b/modules/node_groups/README.md
index f580287682..3abbbddd57 100644
--- a/modules/node_groups/README.md
+++ b/modules/node_groups/README.md
@@ -94,7 +94,7 @@ No modules.
| [cluster\_auth\_base64](#input\_cluster\_auth\_base64) | Base64 encoded CA of parent cluster | `string` | `""` | no |
| [cluster\_endpoint](#input\_cluster\_endpoint) | Endpoint of parent cluster | `string` | `""` | no |
| [cluster\_name](#input\_cluster\_name) | Name of parent cluster | `string` | `""` | no |
-| [create\_eks](#input\_create\_eks) | Controls if EKS resources should be created (it affects almost all resources) | `bool` | `true` | no |
+| [create](#input\_create) | Controls if EKS resources should be created (it affects almost all resources) | `bool` | `true` | no |
| [default\_iam\_role\_arn](#input\_default\_iam\_role\_arn) | ARN of the default IAM worker role to use if one is not specified in `var.node_groups` or `var.node_groups_defaults` | `string` | `""` | no |
| [node\_default\_settings](#input\_node\_default\_settings) | Node group defaults from parent | `any` | `{}` | no |
| [node\_groups](#input\_node\_groups) | Map of maps of `eks_node_groups` to create. See "`node_groups` and `node_groups_defaults` keys" section in README.md for more details | `any` | `{}` | no |
diff --git a/modules/node_groups/locals.tf b/modules/node_groups/locals.tf
index 178eb9b1c7..a9b376d796 100644
--- a/modules/node_groups/locals.tf
+++ b/modules/node_groups/locals.tf
@@ -37,7 +37,7 @@ locals {
},
var.node_groups_defaults,
v,
- ) if var.create_eks }
+ ) if var.create }
node_groups_names = { for k, v in local.node_groups_expanded : k => lookup(
v,
diff --git a/modules/node_groups/variables.tf b/modules/node_groups/variables.tf
index 7c24706420..93412ef6f0 100644
--- a/modules/node_groups/variables.tf
+++ b/modules/node_groups/variables.tf
@@ -1,4 +1,4 @@
-variable "create_eks" {
+variable "create" {
description = "Controls if EKS resources should be created (it affects almost all resources)"
type = bool
default = true
diff --git a/variables.tf b/variables.tf
index 14b355479e..3b7bc9c364 100644
--- a/variables.tf
+++ b/variables.tf
@@ -1,3 +1,27 @@
+variable "tags" {
+ description = "A map of tags to add to all resources. Tags added to launch configuration or templates override these values for ASG Tags only"
+ type = map(string)
+ default = {}
+}
+
+variable "create" {
+ description = "Controls if EKS resources should be created (it affects almost all resources)"
+ type = bool
+ default = true
+}
+
+################################################################################
+# Cluster
+################################################################################
+
+variable "cluster_name" {
+ description = "Name of the EKS cluster and default name (prefix) used throughout the resources created"
+ type = string
+ default = ""
+}
+
+### ^ADDED^
+
variable "cluster_enabled_log_types" {
description = "A list of the desired control plane logging to enable. For more information, see Amazon EKS Control Plane Logging documentation (https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html)"
type = list(string)
@@ -16,11 +40,7 @@ variable "cluster_log_retention_in_days" {
default = 90
}
-variable "cluster_name" {
- description = "Name of the EKS cluster. Also used as a prefix in names of related resources."
- type = string
- default = ""
-}
+
variable "cluster_security_group_id" {
description = "If provided, the EKS cluster will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the workers"
@@ -120,11 +140,7 @@ variable "subnets" {
default = []
}
-variable "tags" {
- description = "A map of tags to add to all resources. Tags added to launch configuration or templates override these values for ASG Tags only."
- type = map(string)
- default = {}
-}
+
variable "cluster_tags" {
description = "A map of tags to add to just the eks resource."
@@ -210,7 +226,7 @@ variable "kubeconfig_aws_authenticator_command" {
}
variable "kubeconfig_aws_authenticator_command_args" {
- description = "Default arguments passed to the authenticator command. Defaults to [token -i $cluster_name]."
+ description = "Default arguments passed to the authenticator command. Defaults to [token -i ${cluster_name}]."
type = list(string)
default = []
}
@@ -324,15 +340,33 @@ variable "cluster_endpoint_public_access_cidrs" {
}
variable "manage_cluster_iam_resources" {
- description = "Whether to let the module manage cluster IAM resources. If set to false, cluster_iam_role_name must be specified."
+ description = "Whether to let the module manage cluster IAM resources. If set to false, `cluster_iam_role_arn` must be specified."
+ type = bool
+ default = true
+}
+
+variable "create_cluster_iam_role" {
+ description = "Determines whether a cluster IAM role is created or to use an existing IAM role"
type = bool
default = true
}
+variable "cluster_iam_role_arn" {
+ description = "Existing IAM role ARN for the cluster. Required if `create_cluster_iam_role` is set to `false`"
+ type = string
+ default = null
+}
+
variable "cluster_iam_role_name" {
- description = "IAM role name for the cluster. If manage_cluster_iam_resources is set to false, set this to reuse an existing IAM role. If manage_cluster_iam_resources is set to true, set this to force the created role name."
+ description = "Name to use on cluster role created"
type = string
- default = ""
+ default = null
+}
+
+variable "cluster_iam_role_use_name_prefix" {
+ description = "Determines whether cluster IAM role name (`cluster_iam_role_name`) is used as a prefix"
+ type = string
+ default = true
}
variable "manage_worker_iam_resources" {
@@ -353,12 +387,6 @@ variable "attach_worker_cni_policy" {
default = true
}
-variable "create_eks" {
- description = "Controls if EKS resources should be created (it affects almost all resources)"
- type = bool
- default = true
-}
-
variable "node_groups_defaults" {
description = "Map of values to be applied to all node groups. See `node_groups` module's documentation for more details"
type = any
diff --git a/workers.tf b/workers.tf
index aaa8e7da7d..aeffc9e82a 100644
--- a/workers.tf
+++ b/workers.tf
@@ -15,7 +15,7 @@ locals {
module "fargate" {
source = "./modules/fargate"
- create_eks = var.create_eks
+ create = var.create
create_fargate_pod_execution_role = var.create_fargate_pod_execution_role
cluster_name = local.cluster_name
@@ -36,7 +36,7 @@ module "fargate" {
module "node_groups" {
source = "./modules/node_groups"
- create_eks = var.create_eks
+ create = var.create
cluster_name = local.cluster_name
cluster_endpoint = local.cluster_endpoint
@@ -64,7 +64,7 @@ module "node_groups" {
################################################################################
resource "aws_autoscaling_group" "this" {
- for_each = var.create_eks ? var.worker_groups : {}
+ for_each = var.create ? var.worker_groups : {}
name_prefix = "${join("-", [local.cluster_name, try(each.value.name, each.key)])}-"
@@ -223,7 +223,7 @@ resource "aws_autoscaling_group" "this" {
}
resource "aws_launch_template" "this" {
- for_each = var.create_eks ? var.launch_templates : {}
+ for_each = var.create ? var.launch_templates : {}
name_prefix = "${local.cluster_name}-${try(each.value.name, each.key)}"
description = try(each.value.description, var.group_default_settings.description, null)
@@ -468,7 +468,7 @@ resource "aws_launch_template" "this" {
################################################################################
resource "aws_iam_role" "workers" {
- count = var.manage_worker_iam_resources && var.create_eks ? 1 : 0
+ count = var.manage_worker_iam_resources && var.create ? 1 : 0
name_prefix = var.workers_role_name != "" ? null : local.cluster_name
name = var.workers_role_name != "" ? var.workers_role_name : null
@@ -480,54 +480,54 @@ resource "aws_iam_role" "workers" {
tags = var.tags
}
+resource "aws_iam_instance_profile" "workers" {
+ count = var.create && var.manage_worker_iam_resources ? 1 : 0
+
+ name_prefix = local.cluster_name
+ role = aws_iam_role.workers[0].id
+ path = var.iam_path
+
+ lifecycle {
+ create_before_destroy = true
+ }
+
+ tags = var.tags
+}
+
resource "aws_iam_role_policy_attachment" "workers_AmazonEKSWorkerNodePolicy" {
- count = var.manage_worker_iam_resources && var.create_eks ? 1 : 0
+ count = var.create && var.manage_worker_iam_resources ? 1 : 0
policy_arn = "${local.policy_arn_prefix}/AmazonEKSWorkerNodePolicy"
role = aws_iam_role.workers[0].name
}
resource "aws_iam_role_policy_attachment" "workers_AmazonEKS_CNI_Policy" {
- count = var.manage_worker_iam_resources && var.attach_worker_cni_policy && var.create_eks ? 1 : 0
+ count = var.create && var.manage_worker_iam_resources && var.attach_worker_cni_policy ? 1 : 0
policy_arn = "${local.policy_arn_prefix}/AmazonEKS_CNI_Policy"
role = aws_iam_role.workers[0].name
}
resource "aws_iam_role_policy_attachment" "workers_AmazonEC2ContainerRegistryReadOnly" {
- count = var.manage_worker_iam_resources && var.create_eks ? 1 : 0
+ count = var.create && var.manage_worker_iam_resources ? 1 : 0
policy_arn = "${local.policy_arn_prefix}/AmazonEC2ContainerRegistryReadOnly"
role = aws_iam_role.workers[0].name
}
resource "aws_iam_role_policy_attachment" "workers_additional_policies" {
- for_each = var.manage_worker_iam_resources && var.create_eks ? toset(var.workers_additional_policies) : []
+ for_each = var.create && var.manage_worker_iam_resources ? toset(var.workers_additional_policies) : []
role = aws_iam_role.workers[0].name
policy_arn = each.value
}
-resource "aws_iam_instance_profile" "workers" {
- for_each = var.manage_worker_iam_resources && var.create_eks ? var.iam_instance_profiles : {}
-
- name_prefix = local.cluster_name
- role = aws_iam_role.workers[0].id
- path = var.iam_path
-
- lifecycle {
- create_before_destroy = true
- }
-
- tags = var.tags
-}
-
################################################################################
# Security Group
################################################################################
resource "aws_security_group" "workers" {
- count = var.worker_create_security_group && var.create_eks ? 1 : 0
+ count = var.worker_create_security_group && var.create ? 1 : 0
name_prefix = var.cluster_name
description = "Security group for all nodes in the cluster."
@@ -542,7 +542,7 @@ resource "aws_security_group" "workers" {
}
resource "aws_security_group_rule" "workers_egress_internet" {
- count = var.worker_create_security_group && var.create_eks ? 1 : 0
+ count = var.worker_create_security_group && var.create ? 1 : 0
description = "Allow nodes all egress to the Internet."
protocol = "-1"
@@ -554,7 +554,7 @@ resource "aws_security_group_rule" "workers_egress_internet" {
}
resource "aws_security_group_rule" "workers_ingress_self" {
- count = var.worker_create_security_group && var.create_eks ? 1 : 0
+ count = var.worker_create_security_group && var.create ? 1 : 0
description = "Allow node to communicate with each other."
protocol = "-1"
@@ -566,7 +566,7 @@ resource "aws_security_group_rule" "workers_ingress_self" {
}
resource "aws_security_group_rule" "workers_ingress_cluster" {
- count = var.worker_create_security_group && var.create_eks ? 1 : 0
+ count = var.worker_create_security_group && var.create ? 1 : 0
description = "Allow workers pods to receive communication from the cluster control plane."
protocol = "tcp"
@@ -578,7 +578,7 @@ resource "aws_security_group_rule" "workers_ingress_cluster" {
}
resource "aws_security_group_rule" "workers_ingress_cluster_kubelet" {
- count = var.worker_create_security_group && var.create_eks ? var.worker_sg_ingress_from_port > 10250 ? 1 : 0 : 0
+ count = var.worker_create_security_group && var.create ? var.worker_sg_ingress_from_port > 10250 ? 1 : 0 : 0
description = "Allow workers Kubelets to receive communication from the cluster control plane."
protocol = "tcp"
@@ -590,7 +590,7 @@ resource "aws_security_group_rule" "workers_ingress_cluster_kubelet" {
}
resource "aws_security_group_rule" "workers_ingress_cluster_https" {
- count = var.worker_create_security_group && var.create_eks ? 1 : 0
+ count = var.worker_create_security_group && var.create ? 1 : 0
description = "Allow pods running extension API servers on port 443 to receive communication from cluster control plane."
protocol = "tcp"
@@ -602,7 +602,7 @@ resource "aws_security_group_rule" "workers_ingress_cluster_https" {
}
resource "aws_security_group_rule" "workers_ingress_cluster_primary" {
- count = var.worker_create_security_group && var.worker_create_cluster_primary_security_group_rules && var.create_eks ? 1 : 0
+ count = var.worker_create_security_group && var.worker_create_cluster_primary_security_group_rules && var.create ? 1 : 0
description = "Allow pods running on workers to receive communication from cluster primary security group (e.g. Fargate pods)."
protocol = "all"
@@ -614,7 +614,7 @@ resource "aws_security_group_rule" "workers_ingress_cluster_primary" {
}
resource "aws_security_group_rule" "cluster_primary_ingress_workers" {
- count = var.worker_create_security_group && var.worker_create_cluster_primary_security_group_rules && var.create_eks ? 1 : 0
+ count = var.worker_create_security_group && var.worker_create_cluster_primary_security_group_rules && var.create ? 1 : 0
description = "Allow pods running on workers to send communication to cluster primary security group (e.g. Fargate pods)."
protocol = "all"
From f2d29789ed0d2dd253eda7a3c697a4883df210b1 Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Mon, 8 Nov 2021 11:58:02 -0500
Subject: [PATCH 07/83] chore: working through cleaning up variables -
`main.tf` done so far
---
README.md | 39 +--
UPGRADE-18.0.md | 6 +-
data.tf | 21 --
docs/faq.md | 6 +-
examples/bottlerocket/main.tf | 6 +-
examples/complete/main.tf | 6 +-
examples/fargate/main.tf | 12 +-
examples/instance_refresh/main.tf | 4 +-
examples/irsa/main.tf | 4 +-
examples/launch_templates/main.tf | 2 +-
.../main.tf | 4 +-
examples/managed_node_groups/main.tf | 4 +-
examples/secrets_encryption/main.tf | 4 +-
locals.tf | 4 +-
main.tf | 120 +++----
modules/fargate/README.md | 2 +-
modules/fargate/main.tf | 2 +-
modules/fargate/variables.tf | 4 +-
modules/node_groups/locals.tf | 2 +-
modules/node_groups/main.tf | 2 +-
variables.tf | 302 +++++++++++-------
versions.tf | 24 +-
workers.tf | 2 +-
23 files changed, 319 insertions(+), 263 deletions(-)
diff --git a/README.md b/README.md
index b3284c7c35..aa75f64d5b 100644
--- a/README.md
+++ b/README.md
@@ -46,7 +46,7 @@ module "eks" {
cluster_version = "1.21"
cluster_name = "my-cluster"
vpc_id = "vpc-1234556abcdef"
- subnets = ["subnet-abcde012", "subnet-bcde012a", "subnet-fghi345a"]
+ subnet_ids = ["subnet-abcde012", "subnet-bcde012a", "subnet-fghi345a"]
worker_groups = {
one = {
@@ -117,10 +117,11 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
|------|---------|
| [terraform](#requirement\_terraform) | >= 0.13.1 |
| [aws](#requirement\_aws) | >= 3.56.0 |
-| [cloudinit](#requirement\_cloudinit) | >= 2.0 |
+| [cloudinit](#requirement\_cloudinit) | >= 2.0.0 |
| [http](#requirement\_http) | >= 2.4.1 |
| [kubernetes](#requirement\_kubernetes) | >= 1.11.1 |
-| [local](#requirement\_local) | >= 1.4 |
+| [local](#requirement\_local) | >= 1.4.0 |
+| [tls](#requirement\_tls) | >= 2.2.0 |
## Providers
@@ -129,7 +130,8 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
| [aws](#provider\_aws) | >= 3.56.0 |
| [http](#provider\_http) | >= 2.4.1 |
| [kubernetes](#provider\_kubernetes) | >= 1.11.1 |
-| [local](#provider\_local) | >= 1.4 |
+| [local](#provider\_local) | >= 1.4.0 |
+| [tls](#provider\_tls) | >= 2.2.0 |
## Modules
@@ -150,9 +152,6 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
| [aws_iam_policy.cluster_additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
| [aws_iam_role.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
| [aws_iam_role.workers](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
-| [aws_iam_role_policy_attachment.cluster_AmazonEKSClusterPolicy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
-| [aws_iam_role_policy_attachment.cluster_AmazonEKSServicePolicy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
-| [aws_iam_role_policy_attachment.cluster_AmazonEKSVPCResourceControllerPolicy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
| [aws_iam_role_policy_attachment.cluster_additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
| [aws_iam_role_policy_attachment.workers_AmazonEC2ContainerRegistryReadOnly](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
| [aws_iam_role_policy_attachment.workers_AmazonEKSWorkerNodePolicy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
@@ -182,6 +181,7 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
| [aws_iam_policy_document.workers_assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
| [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source |
| [http_http.wait_for_cluster](https://registry.terraform.io/providers/terraform-aws-modules/http/latest/docs/data-sources/http) | data source |
+| [tls_certificate.this](https://registry.terraform.io/providers/hashicorp/tls/latest/docs/data-sources/certificate) | data source |
## Inputs
@@ -190,9 +190,6 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
| [attach\_worker\_cni\_policy](#input\_attach\_worker\_cni\_policy) | Whether to attach the Amazon managed `AmazonEKS_CNI_Policy` IAM policy to the default worker IAM role. WARNING: If set `false` the permissions must be assigned to the `aws-node` DaemonSet pods via another method or nodes will not be able to join the cluster. | `bool` | `true` | no |
| [aws\_auth\_additional\_labels](#input\_aws\_auth\_additional\_labels) | Additional kubernetes labels applied on aws-auth ConfigMap | `map(string)` | `{}` | no |
| [cluster\_create\_endpoint\_private\_access\_sg\_rule](#input\_cluster\_create\_endpoint\_private\_access\_sg\_rule) | Whether to create security group rules for the access to the Amazon EKS private API server endpoint. When is `true`, `cluster_endpoint_private_access_cidrs` must be setted. | `bool` | `false` | no |
-| [cluster\_create\_security\_group](#input\_cluster\_create\_security\_group) | Whether to create a security group for the cluster or attach the cluster to `cluster_security_group_id`. | `bool` | `true` | no |
-| [cluster\_create\_timeout](#input\_cluster\_create\_timeout) | Timeout value when creating the EKS cluster. | `string` | `"30m"` | no |
-| [cluster\_delete\_timeout](#input\_cluster\_delete\_timeout) | Timeout value when deleting the EKS cluster. | `string` | `"15m"` | no |
| [cluster\_egress\_cidrs](#input\_cluster\_egress\_cidrs) | List of CIDR blocks that are permitted for cluster egress traffic. | `list(string)` |
[ "0.0.0.0/0" ]
| no |
| [cluster\_enabled\_log\_types](#input\_cluster\_enabled\_log\_types) | A list of the desired control plane logging to enable. For more information, see Amazon EKS Control Plane Logging documentation (https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html) | `list(string)` | `[]` | no |
| [cluster\_encryption\_config](#input\_cluster\_encryption\_config) | Configuration block with encryption configuration for the cluster. See examples/secrets\_encryption/main.tf for example format |
| `[]` | no |
@@ -203,23 +200,30 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
| [cluster\_endpoint\_public\_access\_cidrs](#input\_cluster\_endpoint\_public\_access\_cidrs) | List of CIDR blocks which can access the Amazon EKS public API server endpoint. | `list(string)` |
[ "0.0.0.0/0" ]
| no |
| [cluster\_iam\_role\_arn](#input\_cluster\_iam\_role\_arn) | Existing IAM role ARN for the cluster. Required if `create_cluster_iam_role` is set to `false` | `string` | `null` | no |
| [cluster\_iam\_role\_name](#input\_cluster\_iam\_role\_name) | Name to use on cluster role created | `string` | `null` | no |
+| [cluster\_iam\_role\_path](#input\_cluster\_iam\_role\_path) | Cluster IAM role path | `string` | `null` | no |
+| [cluster\_iam\_role\_permissions\_boundary](#input\_cluster\_iam\_role\_permissions\_boundary) | ARN of the policy that is used to set the permissions boundary for the cluster role | `string` | `null` | no |
+| [cluster\_iam\_role\_tags](#input\_cluster\_iam\_role\_tags) | A map of additional tags to add to the cluster IAM role created | `map(string)` | `{}` | no |
| [cluster\_iam\_role\_use\_name\_prefix](#input\_cluster\_iam\_role\_use\_name\_prefix) | Determines whether cluster IAM role name (`cluster_iam_role_name`) is used as a prefix | `string` | `true` | no |
| [cluster\_log\_kms\_key\_id](#input\_cluster\_log\_kms\_key\_id) | If a KMS Key ARN is set, this key will be used to encrypt the corresponding log group. Please be sure that the KMS Key has an appropriate key policy (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/encrypt-log-data-kms.html) | `string` | `""` | no |
-| [cluster\_log\_retention\_in\_days](#input\_cluster\_log\_retention\_in\_days) | Number of days to retain log events. Default retention - 90 days. | `number` | `90` | no |
+| [cluster\_log\_retention\_in\_days](#input\_cluster\_log\_retention\_in\_days) | Number of days to retain log events. Default retention - 90 days | `number` | `90` | no |
+| [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster and default name (prefix) used throughout the resources created | `string` | `""` | no |
| [cluster\_security\_group\_id](#input\_cluster\_security\_group\_id) | If provided, the EKS cluster will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the workers | `string` | `""` | no |
+| [cluster\_security\_group\_name](#input\_cluster\_security\_group\_name) | Name to use on cluster role created | `string` | `null` | no |
+| [cluster\_security\_group\_tags](#input\_cluster\_security\_group\_tags) | A map of additional tags to add to the cluster security group created | `map(string)` | `{}` | no |
+| [cluster\_security\_group\_use\_name\_prefix](#input\_cluster\_security\_group\_use\_name\_prefix) | Determines whether cluster IAM role name (`cluster_iam_role_name`) is used as a prefix | `string` | `true` | no |
| [cluster\_service\_ipv4\_cidr](#input\_cluster\_service\_ipv4\_cidr) | service ipv4 cidr for the kubernetes cluster | `string` | `null` | no |
-| [cluster\_tags](#input\_cluster\_tags) | A map of tags to add to just the eks resource. | `map(string)` | `{}` | no |
-| [cluster\_update\_timeout](#input\_cluster\_update\_timeout) | Timeout value when updating the EKS cluster. | `string` | `"60m"` | no |
+| [cluster\_tags](#input\_cluster\_tags) | A map of additional tags to add to the cluster | `map(string)` | `{}` | no |
+| [cluster\_timeouts](#input\_cluster\_timeouts) | Create, update, and delete timeout configurations for the cluster | `map(string)` | `{}` | no |
| [cluster\_version](#input\_cluster\_version) | Kubernetes minor version to use for the EKS cluster (for example 1.21). | `string` | `null` | no |
| [create](#input\_create) | Controls if EKS resources should be created (it affects almost all resources) | `bool` | `true` | no |
| [create\_cluster\_iam\_role](#input\_create\_cluster\_iam\_role) | Determines whether a cluster IAM role is created or to use an existing IAM role | `bool` | `true` | no |
+| [create\_cluster\_security\_group](#input\_create\_cluster\_security\_group) | Whether to create a security group for the cluster or attach the cluster to `cluster_security_group_id`. | `bool` | `true` | no |
| [create\_fargate\_pod\_execution\_role](#input\_create\_fargate\_pod\_execution\_role) | Controls if the EKS Fargate pod execution IAM role should be created. | `bool` | `true` | no |
| [default\_platform](#input\_default\_platform) | Default platform name. Valid options are `linux` and `windows`. | `string` | `"linux"` | no |
-| [eks\_oidc\_root\_ca\_thumbprint](#input\_eks\_oidc\_root\_ca\_thumbprint) | Thumbprint of Root CA for EKS OIDC, Valid until 2037 | `string` | `"9e99a48a9960b14926bb7f3b02e22da2b0ab7280"` | no |
| [enable\_irsa](#input\_enable\_irsa) | Whether to create OpenID Connect Provider for EKS to enable IRSA | `bool` | `false` | no |
| [fargate\_pod\_execution\_role\_name](#input\_fargate\_pod\_execution\_role\_name) | The IAM Role that provides permissions for the EKS Fargate Profile. | `string` | `null` | no |
| [fargate\_profiles](#input\_fargate\_profiles) | Fargate profiles to create. See `fargate_profile` keys section in fargate submodule's README.md for more details | `any` | `{}` | no |
-| [fargate\_subnets](#input\_fargate\_subnets) | A list of subnets to place fargate workers within (if different from subnets). | `list(string)` | `[]` | no |
+| [fargate\_subnet\_ids](#input\_fargate\_subnet\_ids) | A list of subnet IDs to place fargate workers within (if different from `subnet_ids`) | `list(string)` | `[]` | no |
| [group\_default\_settings](#input\_group\_default\_settings) | Override default values for autoscaling group, node group settings | `any` | `{}` | no |
| [iam\_instance\_profiles](#input\_iam\_instance\_profiles) | Map of instance profile definitions to create | `map(any)` | `{}` | no |
| [iam\_path](#input\_iam\_path) | If provided, all IAM roles will be created on this path. | `string` | `"/"` | no |
@@ -238,14 +242,13 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
| [map\_accounts](#input\_map\_accounts) | Additional AWS account numbers to add to the aws-auth configmap. | `list(string)` | `[]` | no |
| [map\_roles](#input\_map\_roles) | Additional IAM roles to add to the aws-auth configmap. |
| `[]` | no |
-| [name](#input\_name) | Name of the EKS cluster and default name (prefix) used throughout the resources created | `string` | `""` | no |
| [node\_groups](#input\_node\_groups) | Map of map of node groups to create. See `node_groups` module's documentation for more details | `any` | `{}` | no |
| [node\_groups\_defaults](#input\_node\_groups\_defaults) | Map of values to be applied to all node groups. See `node_groups` module's documentation for more details | `any` | `{}` | no |
| [openid\_connect\_audiences](#input\_openid\_connect\_audiences) | List of OpenID Connect audience client IDs to add to the IRSA provider. | `list(string)` | `[]` | no |
| [permissions\_boundary](#input\_permissions\_boundary) | If provided, all IAM roles will be created with this permissions boundary attached. | `string` | `null` | no |
-| [subnets](#input\_subnets) | A list of subnets to place the EKS cluster and workers within. | `list(string)` | `[]` | no |
+| [subnet\_ids](#input\_subnet\_ids) | A list of subnet IDs to place the EKS cluster and workers within | `list(string)` | `[]` | no |
| [tags](#input\_tags) | A map of tags to add to all resources. Tags added to launch configuration or templates override these values for ASG Tags only | `map(string)` | `{}` | no |
-| [vpc\_id](#input\_vpc\_id) | VPC where the cluster and workers will be deployed. | `string` | `null` | no |
+| [vpc\_id](#input\_vpc\_id) | ID of the VPC where the cluster and workers will be provisioned | `string` | `null` | no |
| [wait\_for\_cluster\_timeout](#input\_wait\_for\_cluster\_timeout) | A timeout (in seconds) to wait for cluster to be available. | `number` | `300` | no |
| [worker\_additional\_security\_group\_ids](#input\_worker\_additional\_security\_group\_ids) | A list of additional security group ids to attach to worker instances | `list(string)` | `[]` | no |
| [worker\_ami\_name\_filter](#input\_worker\_ami\_name\_filter) | Name filter for AWS EKS worker AMI. If not provided, the latest official AMI for the specified 'cluster\_version' is used. | `string` | `""` | no |
diff --git a/UPGRADE-18.0.md b/UPGRADE-18.0.md
index 445959bcab..96a0bc5907 100644
--- a/UPGRADE-18.0.md
+++ b/UPGRADE-18.0.md
@@ -20,11 +20,13 @@ If you find a bug, please open an issue with supporting configuration to reprodu
1. Removed variables:
- - TODO
+ - `var.cluster_create_timeout`, `var.cluster_update_timeout`, and `var.cluster_delete_timeout` have been replaced with `var.cluster_timeouts`
2. Renamed variables:
- - TODO
+ - `create_eks` -> `create`
+ - `subnets` -> `subnet_ids`
+ - `cluster_create_security_group` -> `create_cluster_security_group`
3. Added variables:
diff --git a/data.tf b/data.tf
index d423db215d..a1e87557c4 100644
--- a/data.tf
+++ b/data.tf
@@ -48,27 +48,6 @@ data "aws_ami" "eks_worker_windows" {
owners = [var.worker_ami_owner_id_windows]
}
-data "aws_iam_policy_document" "cluster_assume_role_policy" {
- statement {
- sid = "EKSClusterAssumeRole"
-
- actions = [
- "sts:AssumeRole",
- ]
-
- principals {
- type = "Service"
- identifiers = ["eks.amazonaws.com"]
- }
- }
-}
-
-# data "aws_iam_role" "custom_cluster_iam_role" {
-# count = var.manage_cluster_iam_resources ? 0 : 1
-
-# name = var.cluster_iam_role_name
-# }
-
data "http" "wait_for_cluster" {
count = var.create && var.manage_aws_auth ? 1 : 0
diff --git a/docs/faq.md b/docs/faq.md
index 1550c282ee..6fef297060 100644
--- a/docs/faq.md
+++ b/docs/faq.md
@@ -211,10 +211,10 @@ module "eks" {
cluster_version = "1.21"
cluster_name = "my-cluster"
vpc_id = "vpc-1234556abcdef"
- subnets = ["subnet-abcde123", "subnet-abcde456", "subnet-abcde789"]
+ subnet_ids = ["subnet-abcde123", "subnet-abcde456", "subnet-abcde789"]
workers_group_defaults = {
- subnets = ["subnet-xyz123", "subnet-xyz456", "subnet-xyz789"]
+ subnet_ids = ["subnet-xyz123", "subnet-xyz456", "subnet-xyz789"]
}
worker_groups = {
@@ -224,7 +224,7 @@ module "eks" {
},
two = {
name = "worker-group-2"
- subnets = ["subnet-qwer123"]
+ subnet_ids = ["subnet-qwer123"]
instance_type = "t3.medium"
asg_desired_capacity = 1
public_ip = true
diff --git a/examples/bottlerocket/main.tf b/examples/bottlerocket/main.tf
index 4edc3268ac..94e21c1f2e 100644
--- a/examples/bottlerocket/main.tf
+++ b/examples/bottlerocket/main.tf
@@ -18,9 +18,9 @@ module "eks" {
cluster_name = local.name
cluster_version = local.cluster_version
- vpc_id = module.vpc.vpc_id
- subnets = [module.vpc.private_subnets[0], module.vpc.public_subnets[1]]
- fargate_subnets = [module.vpc.private_subnets[2]]
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = [module.vpc.private_subnets[0], module.vpc.public_subnets[1]]
+ fargate_subnet_ids = [module.vpc.private_subnets[2]]
cluster_endpoint_private_access = true
cluster_endpoint_public_access = true
diff --git a/examples/complete/main.tf b/examples/complete/main.tf
index 79b02e3bce..bf4147158e 100644
--- a/examples/complete/main.tf
+++ b/examples/complete/main.tf
@@ -18,9 +18,9 @@ module "eks" {
cluster_name = local.name
cluster_version = local.cluster_version
- vpc_id = module.vpc.vpc_id
- subnets = [module.vpc.private_subnets[0], module.vpc.public_subnets[1]]
- fargate_subnets = [module.vpc.private_subnets[2]]
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = [module.vpc.private_subnets[0], module.vpc.public_subnets[1]]
+ fargate_subnet_ids = [module.vpc.private_subnets[2]]
cluster_endpoint_private_access = true
cluster_endpoint_public_access = true
diff --git a/examples/fargate/main.tf b/examples/fargate/main.tf
index 09e2e57e2d..d4b21032c9 100644
--- a/examples/fargate/main.tf
+++ b/examples/fargate/main.tf
@@ -18,9 +18,9 @@ module "eks" {
cluster_name = local.name
cluster_version = local.cluster_version
- vpc_id = module.vpc.vpc_id
- subnets = [module.vpc.private_subnets[0], module.vpc.public_subnets[1]]
- fargate_subnets = [module.vpc.private_subnets[2]]
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = [module.vpc.private_subnets[0], module.vpc.public_subnets[1]]
+ fargate_subnet_ids = [module.vpc.private_subnets[2]]
cluster_endpoint_private_access = true
cluster_endpoint_public_access = true
@@ -89,7 +89,7 @@ module "eks" {
]
# Using specific subnets instead of the ones configured in EKS (`subnets` and `fargate_subnets`)
- subnets = [module.vpc.private_subnets[1]]
+ subnet_ids = [module.vpc.private_subnets[1]]
tags = {
Owner = "secondary"
@@ -115,7 +115,7 @@ module "fargate_profile_existing_cluster" {
source = "../../modules/fargate"
cluster_name = module.eks.cluster_id
- subnets = [module.vpc.private_subnets[0], module.vpc.private_subnets[2]]
+ subnets_ids = [module.vpc.private_subnets[0], module.vpc.private_subnets[2]]
fargate_profiles = {
profile1 = {
@@ -153,7 +153,7 @@ module "fargate_profile_existing_cluster" {
]
# Using specific subnets instead of the ones configured in EKS (`subnets` and `fargate_subnets`)
- subnets = [module.vpc.private_subnets[0]]
+ subnet_ids = [module.vpc.private_subnets[0]]
tags = {
Owner = "profile2"
diff --git a/examples/instance_refresh/main.tf b/examples/instance_refresh/main.tf
index 28a1ff65a1..42e46cfc07 100644
--- a/examples/instance_refresh/main.tf
+++ b/examples/instance_refresh/main.tf
@@ -205,8 +205,8 @@ module "eks" {
cluster_name = local.name
cluster_version = local.cluster_version
- vpc_id = module.vpc.vpc_id
- subnets = module.vpc.private_subnets
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_subnets
cluster_endpoint_private_access = true
cluster_endpoint_public_access = true
diff --git a/examples/irsa/main.tf b/examples/irsa/main.tf
index f9b375cb23..80a931f169 100644
--- a/examples/irsa/main.tf
+++ b/examples/irsa/main.tf
@@ -18,8 +18,8 @@ module "eks" {
cluster_name = local.name
cluster_version = local.cluster_version
- vpc_id = module.vpc.vpc_id
- subnets = module.vpc.private_subnets
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_subnets
cluster_endpoint_private_access = true
cluster_endpoint_public_access = true
diff --git a/examples/launch_templates/main.tf b/examples/launch_templates/main.tf
index 9301416360..d9c02cb328 100644
--- a/examples/launch_templates/main.tf
+++ b/examples/launch_templates/main.tf
@@ -17,7 +17,7 @@ module "eks" {
cluster_name = local.name
cluster_version = local.cluster_version
vpc_id = module.vpc.vpc_id
- subnets = module.vpc.private_subnets
+ subnet_ids = module.vpc.private_subnets
cluster_endpoint_private_access = true
cluster_endpoint_public_access = true
diff --git a/examples/launch_templates_with_managed_node_groups/main.tf b/examples/launch_templates_with_managed_node_groups/main.tf
index 4392c606fa..227c034fcb 100644
--- a/examples/launch_templates_with_managed_node_groups/main.tf
+++ b/examples/launch_templates_with_managed_node_groups/main.tf
@@ -18,8 +18,8 @@ module "eks" {
cluster_name = local.name
cluster_version = local.cluster_version
- vpc_id = module.vpc.vpc_id
- subnets = module.vpc.private_subnets
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_subnets
cluster_endpoint_private_access = true
cluster_endpoint_public_access = true
diff --git a/examples/managed_node_groups/main.tf b/examples/managed_node_groups/main.tf
index 56a2b05346..0eb4176696 100644
--- a/examples/managed_node_groups/main.tf
+++ b/examples/managed_node_groups/main.tf
@@ -18,8 +18,8 @@ module "eks" {
cluster_name = local.name
cluster_version = local.cluster_version
- vpc_id = module.vpc.vpc_id
- subnets = module.vpc.private_subnets
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_subnets
cluster_endpoint_private_access = true
cluster_endpoint_public_access = true
diff --git a/examples/secrets_encryption/main.tf b/examples/secrets_encryption/main.tf
index 0ddc1dd6a3..45124ca350 100644
--- a/examples/secrets_encryption/main.tf
+++ b/examples/secrets_encryption/main.tf
@@ -18,8 +18,8 @@ module "eks" {
cluster_name = local.name
cluster_version = local.cluster_version
- vpc_id = module.vpc.vpc_id
- subnets = module.vpc.private_subnets
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_subnets
cluster_endpoint_private_access = true
cluster_endpoint_public_access = true
diff --git a/locals.tf b/locals.tf
index 6362f7e360..2aa231415d 100644
--- a/locals.tf
+++ b/locals.tf
@@ -6,10 +6,9 @@ locals {
cluster_name = try(aws_eks_cluster.this[0].name, "")
cluster_endpoint = try(aws_eks_cluster.this[0].endpoint, "")
cluster_auth_base64 = try(aws_eks_cluster.this[0].certificate_authority[0].data, "")
- cluster_oidc_issuer_url = flatten(concat(aws_eks_cluster.this[*].identity[*].oidc[0].issuer, [""]))[0]
cluster_primary_security_group_id = try(aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id, "")
- cluster_security_group_id = var.cluster_create_security_group ? join("", aws_security_group.cluster.*.id) : var.cluster_security_group_id
+ cluster_security_group_id = var.create_cluster_security_group ? join("", aws_security_group.cluster.*.id) : var.cluster_security_group_id
# Worker groups
worker_security_group_id = var.worker_create_security_group ? join("", aws_security_group.workers.*.id) : var.worker_security_group_id
@@ -20,7 +19,6 @@ locals {
ec2_principal = "ec2.${data.aws_partition.current.dns_suffix}"
sts_principal = "sts.${data.aws_partition.current.dns_suffix}"
- client_id_list = distinct(compact(concat([local.sts_principal], var.openid_connect_audiences)))
policy_arn_prefix = "arn:${data.aws_partition.current.partition}:iam::aws:policy"
kubeconfig = var.create ? templatefile("${path.module}/templates/kubeconfig.tpl", {
diff --git a/main.tf b/main.tf
index 85e005ded2..4d0076786e 100644
--- a/main.tf
+++ b/main.tf
@@ -5,13 +5,13 @@ resource "aws_eks_cluster" "this" {
count = var.create ? 1 : 0
name = var.cluster_name
- enabled_cluster_log_types = var.cluster_enabled_log_types
role_arn = try(aws_iam_role.cluster[0].arn, var.cluster_iam_role_arn)
version = var.cluster_version
+ enabled_cluster_log_types = var.cluster_enabled_log_types
vpc_config {
security_group_ids = compact([local.cluster_security_group_id])
- subnet_ids = var.subnets
+ subnet_ids = var.subnet_ids
endpoint_private_access = var.cluster_endpoint_private_access
endpoint_public_access = var.cluster_endpoint_public_access
public_access_cidrs = var.cluster_endpoint_public_access_cidrs
@@ -38,9 +38,9 @@ resource "aws_eks_cluster" "this" {
)
timeouts {
- create = var.cluster_create_timeout
- delete = var.cluster_delete_timeout
- update = var.cluster_update_timeout
+ create = lookup(var.cluster_timeouts, "create", null)
+ delete = lookup(var.cluster_timeouts, "update", null)
+ update = lookup(var.cluster_timeouts, "delete", null)
}
depends_on = [
@@ -54,7 +54,7 @@ resource "aws_eks_cluster" "this" {
}
resource "aws_cloudwatch_log_group" "this" {
- count = length(var.cluster_enabled_log_types) > 0 && var.create ? 1 : 0
+ count = var.create && length(var.cluster_enabled_log_types) > 0 ? 1 : 0
name = "/aws/eks/${var.cluster_name}/cluster"
retention_in_days = var.cluster_log_retention_in_days
@@ -64,28 +64,34 @@ resource "aws_cloudwatch_log_group" "this" {
}
################################################################################
-# Security Group
+# Cluster Security Group
################################################################################
+locals {
+ cluster_security_group_name = try(var.cluster_security_group_name, var.cluster_name)
+}
+
resource "aws_security_group" "cluster" {
- count = var.cluster_create_security_group && var.create ? 1 : 0
+ count = var.create && var.create_cluster_security_group ? 1 : 0
- name_prefix = var.cluster_name
- description = "EKS cluster security group."
+ name = var.cluster_security_group_use_name_prefix ? null : local.cluster_security_group_name
+ name_prefix = var.cluster_security_group_use_name_prefix ? "${local.cluster_security_group_name}-" : null
+ description = "EKS cluster security group"
vpc_id = var.vpc_id
tags = merge(
var.tags,
{
- "Name" = "${var.cluster_name}-eks_cluster_sg"
+ "Name" = local.cluster_security_group_name
},
+ var.cluster_security_group_tags
)
}
resource "aws_security_group_rule" "cluster_egress_internet" {
- count = var.cluster_create_security_group && var.create ? 1 : 0
+ count = var.create && var.create_cluster_security_group ? 1 : 0
- description = "Allow cluster egress access to the Internet."
+ description = "Allow cluster egress access to the Internet"
protocol = "-1"
security_group_id = local.cluster_security_group_id
cidr_blocks = var.cluster_egress_cidrs
@@ -95,9 +101,9 @@ resource "aws_security_group_rule" "cluster_egress_internet" {
}
resource "aws_security_group_rule" "cluster_https_worker_ingress" {
- count = var.cluster_create_security_group && var.create && var.worker_create_security_group ? 1 : 0
+ count = var.create && var.create_cluster_security_group && var.worker_create_security_group ? 1 : 0
- description = "Allow pods to communicate with the EKS cluster API."
+ description = "Allow pods to communicate with the EKS cluster API"
protocol = "tcp"
security_group_id = local.cluster_security_group_id
source_security_group_id = local.worker_security_group_id
@@ -109,7 +115,7 @@ resource "aws_security_group_rule" "cluster_https_worker_ingress" {
resource "aws_security_group_rule" "cluster_private_access_cidrs_source" {
for_each = var.create && var.cluster_create_endpoint_private_access_sg_rule && var.cluster_endpoint_private_access && var.cluster_endpoint_private_access_cidrs != null ? toset(var.cluster_endpoint_private_access_cidrs) : []
- description = "Allow private K8S API ingress from custom CIDR source."
+ description = "Allow private K8S API ingress from custom CIDR source"
type = "ingress"
from_port = 443
to_port = 443
@@ -122,7 +128,7 @@ resource "aws_security_group_rule" "cluster_private_access_cidrs_source" {
resource "aws_security_group_rule" "cluster_private_access_sg_source" {
count = var.create && var.cluster_create_endpoint_private_access_sg_rule && var.cluster_endpoint_private_access && var.cluster_endpoint_private_access_sg != null ? length(var.cluster_endpoint_private_access_sg) : 0
- description = "Allow private K8S API ingress from custom Security Groups source."
+ description = "Allow private K8S API ingress from custom Security Groups source"
type = "ingress"
from_port = 443
to_port = 443
@@ -137,7 +143,7 @@ resource "aws_security_group_rule" "cluster_private_access_sg_source" {
################################################################################
resource "local_file" "kubeconfig" {
- count = var.write_kubeconfig && var.create ? 1 : 0
+ count = var.create && var.write_kubeconfig ? 1 : 0
content = local.kubeconfig
filename = substr(var.kubeconfig_output_path, -1, 1) == "/" ? "${var.kubeconfig_output_path}kubeconfig_${var.cluster_name}" : var.kubeconfig_output_path
@@ -149,21 +155,18 @@ resource "local_file" "kubeconfig" {
# IRSA
################################################################################
-# Enable IAM Roles for EKS Service-Accounts (IRSA).
-# The Root CA Thumbprint for an OpenID Connect Identity Provider is currently
-# Being passed as a default value which is the same for all regions and
-# Is valid until (Jun 28 17:39:16 2034 GMT).
-# https://crt.sh/?q=9E99A48A9960B14926BB7F3B02E22DA2B0AB7280
-# https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_oidc_verify-thumbprint.html
-# https://github.com/terraform-providers/terraform-provider-aws/issues/10104
+data "tls_certificate" "this" {
+ count = var.create && var.enable_irsa ? 1 : 0
+
+ url = aws_eks_cluster.this.identity[0].oidc[0].issuer
+}
-# TODO - update to use TLS data source and drop hacks
resource "aws_iam_openid_connect_provider" "oidc_provider" {
- count = var.enable_irsa && var.create ? 1 : 0
+ count = var.create && var.enable_irsa ? 1 : 0
- client_id_list = local.client_id_list
- thumbprint_list = [var.eks_oidc_root_ca_thumbprint]
- url = local.cluster_oidc_issuer_url
+ client_id_list = distinct(compact(concat([local.sts_principal], var.openid_connect_audiences)))
+ thumbprint_list = [data.tls_certificate.this[0].certificates[0].sha1_fingerprint]
+ url = aws_eks_cluster.this.identity[0].oidc[0].issuer
tags = merge(
{
@@ -174,7 +177,7 @@ resource "aws_iam_openid_connect_provider" "oidc_provider" {
}
################################################################################
-# Cluster IAM Role, Permissions, & Policies
+# Cluster IAM Role
################################################################################
locals {
@@ -182,41 +185,40 @@ locals {
}
resource "aws_iam_role" "cluster" {
- count = var.create_cluster_iam_role && var.create ? 1 : 0
+ count = var.create && var.create_cluster_iam_role ? 1 : 0
- name = var.cluster_iam_role_use_name_prefix ? null : local.cluster_iam_role_name
- name_prefix = var.cluster_iam_role_use_name_prefix ? "${local.cluster_iam_role_name}-" : null
- assume_role_policy = data.aws_iam_policy_document.cluster_assume_role_policy.json
- permissions_boundary = var.cluster_iam_role_permissions_boundary
- path = var.cluster_iam_role_path
+ name = var.cluster_iam_role_use_name_prefix ? null : local.cluster_iam_role_name
+ name_prefix = var.cluster_iam_role_use_name_prefix ? "${local.cluster_iam_role_name}-" : null
+ path = var.cluster_iam_role_path
+
+ assume_role_policy = data.aws_iam_policy_document.cluster_assume_role_policy[0].json
+ permissions_boundary = var.cluster_iam_role_permissions_boundary
+ managed_policy_arns = [
+ "${local.policy_arn_prefix}/AmazonEKSClusterPolicy",
+ "${local.policy_arn_prefix}/AmazonEKSServicePolicy",
+ "${local.policy_arn_prefix}/AmazonEKSVPCResourceController",
+ ]
force_detach_policies = true
- tags = merge(var.tags, var.cluster_role_iam_tags)
+ tags = merge(var.tags, var.cluster_iam_role_tags)
}
-resource "aws_iam_role_policy_attachment" "cluster_AmazonEKSClusterPolicy" {
- count = var.create_cluster_iam_role && var.create ? 1 : 0
-
- role = aws_iam_role.cluster.name
- policy_arn = "${local.policy_arn_prefix}/AmazonEKSClusterPolicy"
-}
+data "aws_iam_policy_document" "cluster_assume_role_policy" {
+ count = var.create && var.create_cluster_iam_role ? 1 : 0
-resource "aws_iam_role_policy_attachment" "cluster_AmazonEKSServicePolicy" {
- count = var.create_cluster_iam_role && var.create ? 1 : 0
-
- role = aws_iam_role.cluster.name
- policy_arn = "${local.policy_arn_prefix}/AmazonEKSServicePolicy"
-}
-
-resource "aws_iam_role_policy_attachment" "cluster_AmazonEKSVPCResourceControllerPolicy" {
- count = var.create_cluster_iam_role && var.create ? 1 : 0
+ statement {
+ sid = "EKSClusterAssumeRole"
+ actions = ["sts:AssumeRole"]
- role = aws_iam_role.cluster.name
- policy_arn = "${local.policy_arn_prefix}/AmazonEKSVPCResourceController"
+ principals {
+ type = "Service"
+ identifiers = ["eks.amazonaws.com"]
+ }
+ }
}
data "aws_iam_policy_document" "cluster_additional" {
- count = var.create_cluster_iam_role && var.create ? 1 : 0
+ count = var.create && var.create_cluster_iam_role ? 1 : 0
# Permissions required to create AWSServiceRoleForElasticLoadBalancing service-linked role by EKS during ELB provisioning
statement {
@@ -242,7 +244,7 @@ data "aws_iam_policy_document" "cluster_additional" {
}
resource "aws_iam_policy" "cluster_additional" {
- count = var.create_cluster_iam_role && var.create ? 1 : 0
+ count = var.create && var.create_cluster_iam_role ? 1 : 0
name = var.cluster_iam_role_use_name_prefix ? null : local.cluster_iam_role_name
name_prefix = var.cluster_iam_role_use_name_prefix ? "${local.cluster_iam_role_name}-" : null
@@ -253,8 +255,8 @@ resource "aws_iam_policy" "cluster_additional" {
}
resource "aws_iam_role_policy_attachment" "cluster_additional" {
- count = var.create_cluster_iam_role && var.create ? 1 : 0
+ count = var.create && var.create_cluster_iam_role ? 1 : 0
- role = aws_iam_role.cluster.name
+ role = aws_iam_role.cluster[0].name
policy_arn = aws_iam_policy.cluster_additional[0].arn
}
diff --git a/modules/fargate/README.md b/modules/fargate/README.md
index 629e3c1fee..0ca65e2dfd 100644
--- a/modules/fargate/README.md
+++ b/modules/fargate/README.md
@@ -58,7 +58,7 @@ No modules.
| [fargate\_profiles](#input\_fargate\_profiles) | Fargate profiles to create. See `fargate_profile` keys section in README.md for more details | `any` | `{}` | no |
| [iam\_path](#input\_iam\_path) | IAM roles will be created on this path. | `string` | `"/"` | no |
| [permissions\_boundary](#input\_permissions\_boundary) | If provided, all IAM roles will be created with this permissions boundary attached. | `string` | `null` | no |
-| [subnets](#input\_subnets) | A list of subnets for the EKS Fargate profiles. | `list(string)` | `[]` | no |
+| [subnet\_ids](#input\_subnet\_ids) | A list of subnet IDs for the EKS Fargate profiles. | `list(string)` | `[]` | no |
| [tags](#input\_tags) | A map of tags to add to all resources. | `map(string)` | `{}` | no |
## Outputs
diff --git a/modules/fargate/main.tf b/modules/fargate/main.tf
index 33caaaa060..9bc99e9a79 100644
--- a/modules/fargate/main.tf
+++ b/modules/fargate/main.tf
@@ -52,7 +52,7 @@ resource "aws_eks_fargate_profile" "this" {
cluster_name = var.cluster_name
fargate_profile_name = lookup(each.value, "name", format("%s-fargate-%s", var.cluster_name, replace(each.key, "_", "-")))
pod_execution_role_arn = local.pod_execution_role_arn
- subnet_ids = lookup(each.value, "subnets", var.subnets)
+ subnet_ids = lookup(each.value, "subnet_ids", var.subnet_ids)
dynamic "selector" {
for_each = each.value.selectors
diff --git a/modules/fargate/variables.tf b/modules/fargate/variables.tf
index fe9cf83303..b836447099 100644
--- a/modules/fargate/variables.tf
+++ b/modules/fargate/variables.tf
@@ -40,8 +40,8 @@ variable "permissions_boundary" {
default = null
}
-variable "subnets" {
- description = "A list of subnets for the EKS Fargate profiles."
+variable "subnet_ids" {
+ description = "A list of subnet IDs for the EKS Fargate profiles."
type = list(string)
default = []
}
diff --git a/modules/node_groups/locals.tf b/modules/node_groups/locals.tf
index a9b376d796..5b9a017421 100644
--- a/modules/node_groups/locals.tf
+++ b/modules/node_groups/locals.tf
@@ -11,7 +11,7 @@ locals {
set_instance_types_on_lt = false
max_capacity = var.node_default_settings["asg_max_size"]
min_capacity = var.node_default_settings["asg_min_size"]
- subnets = var.node_default_settings["subnets"]
+ subnet_ids = var.node_default_settings["subnet_ids"]
create_launch_template = false
bootstrap_env = {}
kubelet_extra_args = var.node_default_settings["kubelet_extra_args"]
diff --git a/modules/node_groups/main.tf b/modules/node_groups/main.tf
index 75e6209730..8d68756c06 100644
--- a/modules/node_groups/main.tf
+++ b/modules/node_groups/main.tf
@@ -6,7 +6,7 @@ resource "aws_eks_node_group" "workers" {
cluster_name = var.cluster_name
node_role_arn = each.value["iam_role_arn"]
- subnet_ids = each.value["subnets"]
+ subnet_ids = each.value["subnet_ids"]
scaling_config {
desired_size = each.value["desired_capacity"]
diff --git a/variables.tf b/variables.tf
index 3b7bc9c364..564a25c12e 100644
--- a/variables.tf
+++ b/variables.tf
@@ -20,7 +20,17 @@ variable "cluster_name" {
default = ""
}
-### ^ADDED^
+variable "cluster_iam_role_arn" {
+ description = "Existing IAM role ARN for the cluster. Required if `create_cluster_iam_role` is set to `false`"
+ type = string
+ default = null
+}
+
+variable "cluster_version" {
+ description = "Kubernetes minor version to use for the EKS cluster (for example 1.21)."
+ type = string
+ default = null
+}
variable "cluster_enabled_log_types" {
description = "A list of the desired control plane logging to enable. For more information, see Amazon EKS Control Plane Logging documentation (https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html)"
@@ -28,32 +38,120 @@ variable "cluster_enabled_log_types" {
default = []
}
-variable "cluster_log_kms_key_id" {
- description = "If a KMS Key ARN is set, this key will be used to encrypt the corresponding log group. Please be sure that the KMS Key has an appropriate key policy (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/encrypt-log-data-kms.html)"
+variable "cluster_security_group_id" {
+ description = "If provided, the EKS cluster will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the workers"
type = string
default = ""
}
+# TODO - split out cluster subnets vs workers
+variable "subnet_ids" {
+ description = "A list of subnet IDs to place the EKS cluster and workers within"
+ type = list(string)
+ default = []
+}
+
+variable "cluster_endpoint_private_access" {
+ description = "Indicates whether or not the Amazon EKS private API server endpoint is enabled."
+ type = bool
+ default = false
+}
+
+variable "cluster_endpoint_public_access" {
+ description = "Indicates whether or not the Amazon EKS public API server endpoint is enabled. When it's set to `false` ensure to have a proper private access with `cluster_endpoint_private_access = true`."
+ type = bool
+ default = true
+}
+
+variable "cluster_endpoint_public_access_cidrs" {
+ description = "List of CIDR blocks which can access the Amazon EKS public API server endpoint."
+ type = list(string)
+ default = ["0.0.0.0/0"]
+}
+
+variable "cluster_service_ipv4_cidr" {
+ description = "service ipv4 cidr for the kubernetes cluster"
+ type = string
+ default = null
+}
+
+variable "cluster_encryption_config" {
+ description = "Configuration block with encryption configuration for the cluster. See examples/secrets_encryption/main.tf for example format"
+ type = list(object({
+ provider_key_arn = string
+ resources = list(string)
+ }))
+ default = []
+}
+
+variable "cluster_tags" {
+ description = "A map of additional tags to add to the cluster"
+ type = map(string)
+ default = {}
+}
+
+variable "cluster_timeouts" {
+ description = "Create, update, and delete timeout configurations for the cluster"
+ type = map(string)
+ default = {}
+}
+
variable "cluster_log_retention_in_days" {
- description = "Number of days to retain log events. Default retention - 90 days."
+ description = "Number of days to retain log events. Default retention - 90 days"
type = number
default = 90
}
+variable "cluster_log_kms_key_id" {
+ description = "If a KMS Key ARN is set, this key will be used to encrypt the corresponding log group. Please be sure that the KMS Key has an appropriate key policy (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/encrypt-log-data-kms.html)"
+ type = string
+ default = ""
+}
+
+################################################################################
+# Cluster Security Group
+################################################################################
+variable "create_cluster_security_group" {
+ description = "Whether to create a security group for the cluster or attach the cluster to `cluster_security_group_id`."
+ type = bool
+ default = true
+}
-variable "cluster_security_group_id" {
- description = "If provided, the EKS cluster will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the workers"
+variable "vpc_id" {
+ description = "ID of the VPC where the cluster and workers will be provisioned"
type = string
- default = ""
+ default = null
}
-variable "cluster_version" {
- description = "Kubernetes minor version to use for the EKS cluster (for example 1.21)."
+variable "cluster_security_group_name" {
+ description = "Name to use on cluster role created"
type = string
default = null
}
+variable "cluster_security_group_use_name_prefix" {
+ description = "Determines whether cluster IAM role name (`cluster_iam_role_name`) is used as a prefix"
+ type = string
+ default = true
+}
+
+variable "cluster_security_group_tags" {
+ description = "A map of additional tags to add to the cluster security group created"
+ type = map(string)
+ default = {}
+}
+
+################################################################################
+# Kubeconfig
+################################################################################
+
+variable "write_kubeconfig" {
+ description = "Whether to write a Kubectl config file containing the cluster configuration. Saved to `kubeconfig_output_path`."
+ type = bool
+ default = true
+}
+
variable "kubeconfig_output_path" {
description = "Where to save the Kubectl config file (if `write_kubeconfig = true`). Assumed to be a directory if the value ends with a forward slash `/`."
type = string
@@ -66,12 +164,78 @@ variable "kubeconfig_file_permission" {
default = "0600"
}
-variable "write_kubeconfig" {
- description = "Whether to write a Kubectl config file containing the cluster configuration. Saved to `kubeconfig_output_path`."
+################################################################################
+# IRSA
+################################################################################
+
+variable "enable_irsa" {
+ description = "Whether to create OpenID Connect Provider for EKS to enable IRSA"
+ type = bool
+ default = false
+}
+
+variable "openid_connect_audiences" {
+ description = "List of OpenID Connect audience client IDs to add to the IRSA provider."
+ type = list(string)
+ default = []
+}
+
+################################################################################
+# Cluster IAM Role
+################################################################################
+
+variable "create_cluster_iam_role" {
+ description = "Determines whether a cluster IAM role is created or to use an existing IAM role"
type = bool
default = true
}
+variable "cluster_iam_role_name" {
+ description = "Name to use on cluster role created"
+ type = string
+ default = null
+}
+
+variable "cluster_iam_role_use_name_prefix" {
+ description = "Determines whether cluster IAM role name (`cluster_iam_role_name`) is used as a prefix"
+ type = string
+ default = true
+}
+
+variable "cluster_iam_role_path" {
+ description = "Cluster IAM role path"
+ type = string
+ default = null
+}
+
+variable "cluster_iam_role_permissions_boundary" {
+ description = "ARN of the policy that is used to set the permissions boundary for the cluster role"
+ type = string
+ default = null
+}
+
+variable "cluster_iam_role_tags" {
+ description = "A map of additional tags to add to the cluster IAM role created"
+ type = map(string)
+ default = {}
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+### ^ADDED^
+
+
+
variable "default_platform" {
description = "Default platform name. Valid options are `linux` and `windows`."
type = string
@@ -128,31 +292,14 @@ variable "iam_instance_profiles" {
default = {}
}
-variable "fargate_subnets" {
- description = "A list of subnets to place fargate workers within (if different from subnets)."
- type = list(string)
- default = []
-}
-
-variable "subnets" {
- description = "A list of subnets to place the EKS cluster and workers within."
+variable "fargate_subnet_ids" {
+ description = "A list of subnet IDs to place fargate workers within (if different from `subnet_ids`)"
type = list(string)
default = []
}
-variable "cluster_tags" {
- description = "A map of tags to add to just the eks resource."
- type = map(string)
- default = {}
-}
-
-variable "vpc_id" {
- description = "VPC where the cluster and workers will be deployed."
- type = string
- default = null
-}
variable "worker_groups" {
description = "A map of maps defining worker group configurations to be defined using AWS Launch Template"
@@ -226,7 +373,7 @@ variable "kubeconfig_aws_authenticator_command" {
}
variable "kubeconfig_aws_authenticator_command_args" {
- description = "Default arguments passed to the authenticator command. Defaults to [token -i ${cluster_name}]."
+ description = "Default arguments passed to the authenticator command. Defaults to [token -i $cluster_name]."
type = list(string)
default = []
}
@@ -249,29 +396,7 @@ variable "kubeconfig_name" {
default = ""
}
-variable "cluster_create_timeout" {
- description = "Timeout value when creating the EKS cluster."
- type = string
- default = "30m"
-}
-
-variable "cluster_delete_timeout" {
- description = "Timeout value when deleting the EKS cluster."
- type = string
- default = "15m"
-}
-
-variable "cluster_update_timeout" {
- description = "Timeout value when updating the EKS cluster."
- type = string
- default = "60m"
-}
-variable "cluster_create_security_group" {
- description = "Whether to create a security group for the cluster or attach the cluster to `cluster_security_group_id`."
- type = bool
- default = true
-}
variable "worker_create_security_group" {
description = "Whether to create a security group for the workers or attach the workers to `worker_security_group_id`."
@@ -321,23 +446,7 @@ variable "cluster_endpoint_private_access_sg" {
default = null
}
-variable "cluster_endpoint_private_access" {
- description = "Indicates whether or not the Amazon EKS private API server endpoint is enabled."
- type = bool
- default = false
-}
-variable "cluster_endpoint_public_access" {
- description = "Indicates whether or not the Amazon EKS public API server endpoint is enabled. When it's set to `false` ensure to have a proper private access with `cluster_endpoint_private_access = true`."
- type = bool
- default = true
-}
-
-variable "cluster_endpoint_public_access_cidrs" {
- description = "List of CIDR blocks which can access the Amazon EKS public API server endpoint."
- type = list(string)
- default = ["0.0.0.0/0"]
-}
variable "manage_cluster_iam_resources" {
description = "Whether to let the module manage cluster IAM resources. If set to false, `cluster_iam_role_arn` must be specified."
@@ -345,29 +454,6 @@ variable "manage_cluster_iam_resources" {
default = true
}
-variable "create_cluster_iam_role" {
- description = "Determines whether a cluster IAM role is created or to use an existing IAM role"
- type = bool
- default = true
-}
-
-variable "cluster_iam_role_arn" {
- description = "Existing IAM role ARN for the cluster. Required if `create_cluster_iam_role` is set to `false`"
- type = string
- default = null
-}
-
-variable "cluster_iam_role_name" {
- description = "Name to use on cluster role created"
- type = string
- default = null
-}
-
-variable "cluster_iam_role_use_name_prefix" {
- description = "Determines whether cluster IAM role name (`cluster_iam_role_name`) is used as a prefix"
- type = string
- default = true
-}
variable "manage_worker_iam_resources" {
description = "Whether to let the module manage worker IAM resources. If set to false, iam_instance_profile_name must be specified for workers."
@@ -399,27 +485,6 @@ variable "node_groups" {
default = {}
}
-variable "enable_irsa" {
- description = "Whether to create OpenID Connect Provider for EKS to enable IRSA"
- type = bool
- default = false
-}
-
-variable "eks_oidc_root_ca_thumbprint" {
- type = string
- description = "Thumbprint of Root CA for EKS OIDC, Valid until 2037"
- default = "9e99a48a9960b14926bb7f3b02e22da2b0ab7280"
-}
-
-variable "cluster_encryption_config" {
- description = "Configuration block with encryption configuration for the cluster. See examples/secrets_encryption/main.tf for example format"
- type = list(object({
- provider_key_arn = string
- resources = list(string)
- }))
- default = []
-}
-
variable "fargate_profiles" {
description = "Fargate profiles to create. See `fargate_profile` keys section in fargate submodule's README.md for more details"
type = any
@@ -438,11 +503,7 @@ variable "fargate_pod_execution_role_name" {
default = null
}
-variable "cluster_service_ipv4_cidr" {
- description = "service ipv4 cidr for the kubernetes cluster"
- type = string
- default = null
-}
+
variable "cluster_egress_cidrs" {
description = "List of CIDR blocks that are permitted for cluster egress traffic."
@@ -462,9 +523,4 @@ variable "wait_for_cluster_timeout" {
default = 300
}
-variable "openid_connect_audiences" {
- description = "List of OpenID Connect audience client IDs to add to the IRSA provider."
- type = list(string)
- default = []
-}
diff --git a/versions.tf b/versions.tf
index e9b6bc9562..9ed32587e6 100644
--- a/versions.tf
+++ b/versions.tf
@@ -2,13 +2,29 @@ terraform {
required_version = ">= 0.13.1"
required_providers {
- aws = ">= 3.56.0"
- local = ">= 1.4"
- kubernetes = ">= 1.11.1"
- cloudinit = ">= 2.0"
+ aws = {
+ source = "hashicorp/aws"
+ version = ">= 3.56.0"
+ }
+ local = {
+ source = "hashicorp/local"
+ version = ">= 1.4.0"
+ }
+ kubernetes = {
+ source = "hashicorp/kubernetes"
+ version = ">= 1.11.1"
+ }
+ cloudinit = {
+ source = "hashicorp/cloudinit"
+ version = ">= 2.0.0"
+ }
http = {
source = "terraform-aws-modules/http"
version = ">= 2.4.1"
}
+ tls = {
+ source = "hashicorp/tls"
+ version = ">= 2.2.0"
+ }
}
}
diff --git a/workers.tf b/workers.tf
index aeffc9e82a..00018c5797 100644
--- a/workers.tf
+++ b/workers.tf
@@ -22,7 +22,7 @@ module "fargate" {
fargate_pod_execution_role_name = var.fargate_pod_execution_role_name
permissions_boundary = var.permissions_boundary
iam_path = var.iam_path
- subnets = coalescelist(var.fargate_subnets, var.subnets, [""])
+ subnet_ids = coalescelist(var.fargate_subnet_ids, var.subnet_ids, [""])
fargate_profiles = var.fargate_profiles
From 4fe3dad6020d6f55876ebbf1c1b695300dc89de2 Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Mon, 8 Nov 2021 12:41:42 -0500
Subject: [PATCH 08/83] refactor: update fargate sub-module
---
README.md | 90 ++++++++++----------
aws_auth.tf | 2 +-
locals.tf | 1 -
modules/fargate/README.md | 27 +++---
modules/fargate/main.tf | 40 +++------
modules/fargate/outputs.tf | 18 +---
modules/fargate/variables.tf | 22 ++---
modules/fargate/versions.tf | 5 +-
variables.tf | 155 +++++++++++++++++++++--------------
workers.tf | 35 ++++----
10 files changed, 203 insertions(+), 192 deletions(-)
diff --git a/README.md b/README.md
index aa75f64d5b..f204e93487 100644
--- a/README.md
+++ b/README.md
@@ -187,17 +187,17 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
| Name | Description | Type | Default | Required |
|------|-------------|------|---------|:--------:|
-| [attach\_worker\_cni\_policy](#input\_attach\_worker\_cni\_policy) | Whether to attach the Amazon managed `AmazonEKS_CNI_Policy` IAM policy to the default worker IAM role. WARNING: If set `false` the permissions must be assigned to the `aws-node` DaemonSet pods via another method or nodes will not be able to join the cluster. | `bool` | `true` | no |
+| [attach\_worker\_cni\_policy](#input\_attach\_worker\_cni\_policy) | Whether to attach the Amazon managed `AmazonEKS_CNI_Policy` IAM policy to the default worker IAM role. WARNING: If set `false` the permissions must be assigned to the `aws-node` DaemonSet pods via another method or nodes will not be able to join the cluster | `bool` | `true` | no |
| [aws\_auth\_additional\_labels](#input\_aws\_auth\_additional\_labels) | Additional kubernetes labels applied on aws-auth ConfigMap | `map(string)` | `{}` | no |
-| [cluster\_create\_endpoint\_private\_access\_sg\_rule](#input\_cluster\_create\_endpoint\_private\_access\_sg\_rule) | Whether to create security group rules for the access to the Amazon EKS private API server endpoint. When is `true`, `cluster_endpoint_private_access_cidrs` must be setted. | `bool` | `false` | no |
-| [cluster\_egress\_cidrs](#input\_cluster\_egress\_cidrs) | List of CIDR blocks that are permitted for cluster egress traffic. | `list(string)` |
[ "0.0.0.0/0" ]
| no |
+| [cluster\_create\_endpoint\_private\_access\_sg\_rule](#input\_cluster\_create\_endpoint\_private\_access\_sg\_rule) | Whether to create security group rules for the access to the Amazon EKS private API server endpoint. When is `true`, `cluster_endpoint_private_access_cidrs` must be setted | `bool` | `false` | no |
+| [cluster\_egress\_cidrs](#input\_cluster\_egress\_cidrs) | List of CIDR blocks that are permitted for cluster egress traffic | `list(string)` |
[ "0.0.0.0/0" ]
| no |
| [cluster\_enabled\_log\_types](#input\_cluster\_enabled\_log\_types) | A list of the desired control plane logging to enable. For more information, see Amazon EKS Control Plane Logging documentation (https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html) | `list(string)` | `[]` | no |
| [cluster\_encryption\_config](#input\_cluster\_encryption\_config) | Configuration block with encryption configuration for the cluster. See examples/secrets\_encryption/main.tf for example format |
| `[]` | no |
-| [cluster\_endpoint\_private\_access](#input\_cluster\_endpoint\_private\_access) | Indicates whether or not the Amazon EKS private API server endpoint is enabled. | `bool` | `false` | no |
-| [cluster\_endpoint\_private\_access\_cidrs](#input\_cluster\_endpoint\_private\_access\_cidrs) | List of CIDR blocks which can access the Amazon EKS private API server endpoint. To use this `cluster_endpoint_private_access` and `cluster_create_endpoint_private_access_sg_rule` must be set to `true`. | `list(string)` | `null` | no |
-| [cluster\_endpoint\_private\_access\_sg](#input\_cluster\_endpoint\_private\_access\_sg) | List of security group IDs which can access the Amazon EKS private API server endpoint. To use this `cluster_endpoint_private_access` and `cluster_create_endpoint_private_access_sg_rule` must be set to `true`. | `list(string)` | `null` | no |
-| [cluster\_endpoint\_public\_access](#input\_cluster\_endpoint\_public\_access) | Indicates whether or not the Amazon EKS public API server endpoint is enabled. When it's set to `false` ensure to have a proper private access with `cluster_endpoint_private_access = true`. | `bool` | `true` | no |
-| [cluster\_endpoint\_public\_access\_cidrs](#input\_cluster\_endpoint\_public\_access\_cidrs) | List of CIDR blocks which can access the Amazon EKS public API server endpoint. | `list(string)` |
[ "0.0.0.0/0" ]
| no |
+| [cluster\_endpoint\_private\_access](#input\_cluster\_endpoint\_private\_access) | Indicates whether or not the Amazon EKS private API server endpoint is enabled | `bool` | `false` | no |
+| [cluster\_endpoint\_private\_access\_cidrs](#input\_cluster\_endpoint\_private\_access\_cidrs) | List of CIDR blocks which can access the Amazon EKS private API server endpoint. To use this `cluster_endpoint_private_access` and `cluster_create_endpoint_private_access_sg_rule` must be set to `true` | `list(string)` | `null` | no |
+| [cluster\_endpoint\_private\_access\_sg](#input\_cluster\_endpoint\_private\_access\_sg) | List of security group IDs which can access the Amazon EKS private API server endpoint. To use this `cluster_endpoint_private_access` and `cluster_create_endpoint_private_access_sg_rule` must be set to `true` | `list(string)` | `null` | no |
+| [cluster\_endpoint\_public\_access](#input\_cluster\_endpoint\_public\_access) | Indicates whether or not the Amazon EKS public API server endpoint is enabled. When it's set to `false` ensure to have a proper private access with `cluster_endpoint_private_access = true` | `bool` | `true` | no |
+| [cluster\_endpoint\_public\_access\_cidrs](#input\_cluster\_endpoint\_public\_access\_cidrs) | List of CIDR blocks which can access the Amazon EKS public API server endpoint | `list(string)` |
[ "0.0.0.0/0" ]
| no |
| [cluster\_iam\_role\_arn](#input\_cluster\_iam\_role\_arn) | Existing IAM role ARN for the cluster. Required if `create_cluster_iam_role` is set to `false` | `string` | `null` | no |
| [cluster\_iam\_role\_name](#input\_cluster\_iam\_role\_name) | Name to use on cluster role created | `string` | `null` | no |
| [cluster\_iam\_role\_path](#input\_cluster\_iam\_role\_path) | Cluster IAM role path | `string` | `null` | no |
@@ -214,57 +214,61 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
| [cluster\_service\_ipv4\_cidr](#input\_cluster\_service\_ipv4\_cidr) | service ipv4 cidr for the kubernetes cluster | `string` | `null` | no |
| [cluster\_tags](#input\_cluster\_tags) | A map of additional tags to add to the cluster | `map(string)` | `{}` | no |
| [cluster\_timeouts](#input\_cluster\_timeouts) | Create, update, and delete timeout configurations for the cluster | `map(string)` | `{}` | no |
-| [cluster\_version](#input\_cluster\_version) | Kubernetes minor version to use for the EKS cluster (for example 1.21). | `string` | `null` | no |
+| [cluster\_version](#input\_cluster\_version) | Kubernetes minor version to use for the EKS cluster (for example 1.21) | `string` | `null` | no |
| [create](#input\_create) | Controls if EKS resources should be created (it affects almost all resources) | `bool` | `true` | no |
| [create\_cluster\_iam\_role](#input\_create\_cluster\_iam\_role) | Determines whether a cluster IAM role is created or to use an existing IAM role | `bool` | `true` | no |
-| [create\_cluster\_security\_group](#input\_create\_cluster\_security\_group) | Whether to create a security group for the cluster or attach the cluster to `cluster_security_group_id`. | `bool` | `true` | no |
-| [create\_fargate\_pod\_execution\_role](#input\_create\_fargate\_pod\_execution\_role) | Controls if the EKS Fargate pod execution IAM role should be created. | `bool` | `true` | no |
-| [default\_platform](#input\_default\_platform) | Default platform name. Valid options are `linux` and `windows`. | `string` | `"linux"` | no |
+| [create\_cluster\_security\_group](#input\_create\_cluster\_security\_group) | Whether to create a security group for the cluster or attach the cluster to `cluster_security_group_id` | `bool` | `true` | no |
+| [create\_fargate](#input\_create\_fargate) | Determines whether Fargate resources are created | `bool` | `true` | no |
+| [create\_fargate\_pod\_execution\_role](#input\_create\_fargate\_pod\_execution\_role) | Controls if the EKS Fargate pod execution IAM role should be created | `bool` | `true` | no |
+| [default\_platform](#input\_default\_platform) | Default platform name. Valid options are `linux` and `windows` | `string` | `"linux"` | no |
| [enable\_irsa](#input\_enable\_irsa) | Whether to create OpenID Connect Provider for EKS to enable IRSA | `bool` | `false` | no |
-| [fargate\_pod\_execution\_role\_name](#input\_fargate\_pod\_execution\_role\_name) | The IAM Role that provides permissions for the EKS Fargate Profile. | `string` | `null` | no |
-| [fargate\_profiles](#input\_fargate\_profiles) | Fargate profiles to create. See `fargate_profile` keys section in fargate submodule's README.md for more details | `any` | `{}` | no |
-| [fargate\_subnet\_ids](#input\_fargate\_subnet\_ids) | A list of subnet IDs to place fargate workers within (if different from `subnet_ids`) | `list(string)` | `[]` | no |
+| [fargate\_iam\_role\_path](#input\_fargate\_iam\_role\_path) | Fargate IAM role path | `string` | `null` | no |
+| [fargate\_iam\_role\_permissions\_boundary](#input\_fargate\_iam\_role\_permissions\_boundary) | ARN of the policy that is used to set the permissions boundary for the Fargate role | `string` | `null` | no |
+| [fargate\_pod\_execution\_role\_arn](#input\_fargate\_pod\_execution\_role\_arn) | Existing Amazon Resource Name (ARN) of the IAM Role that provides permissions for the EKS Fargate Profile. Required if `create_fargate_pod_execution_role` is `false` | `string` | `null` | no |
+| [fargate\_profiles](#input\_fargate\_profiles) | Fargate profiles to create. See `fargate_profile` keys section in Fargate submodule's README.md for more details | `any` | `{}` | no |
+| [fargate\_subnet\_ids](#input\_fargate\_subnet\_ids) | A list of subnet IDs to place Fargate workers within (if different from `subnet_ids`) | `list(string)` | `[]` | no |
+| [fargate\_tags](#input\_fargate\_tags) | A map of additional tags to add to the Fargate resources created | `map(string)` | `{}` | no |
| [group\_default\_settings](#input\_group\_default\_settings) | Override default values for autoscaling group, node group settings | `any` | `{}` | no |
| [iam\_instance\_profiles](#input\_iam\_instance\_profiles) | Map of instance profile definitions to create | `map(any)` | `{}` | no |
-| [iam\_path](#input\_iam\_path) | If provided, all IAM roles will be created on this path. | `string` | `"/"` | no |
+| [iam\_path](#input\_iam\_path) | If provided, all IAM roles will be created on this path | `string` | `"/"` | no |
| [kubeconfig\_api\_version](#input\_kubeconfig\_api\_version) | KubeConfig API version. Defaults to client.authentication.k8s.io/v1alpha1 | `string` | `"client.authentication.k8s.io/v1alpha1"` | no |
-| [kubeconfig\_aws\_authenticator\_additional\_args](#input\_kubeconfig\_aws\_authenticator\_additional\_args) | Any additional arguments to pass to the authenticator such as the role to assume. e.g. ["-r", "MyEksRole"]. | `list(string)` | `[]` | no |
-| [kubeconfig\_aws\_authenticator\_command](#input\_kubeconfig\_aws\_authenticator\_command) | Command to use to fetch AWS EKS credentials. | `string` | `"aws-iam-authenticator"` | no |
-| [kubeconfig\_aws\_authenticator\_command\_args](#input\_kubeconfig\_aws\_authenticator\_command\_args) | Default arguments passed to the authenticator command. Defaults to [token -i $cluster\_name]. | `list(string)` | `[]` | no |
-| [kubeconfig\_aws\_authenticator\_env\_variables](#input\_kubeconfig\_aws\_authenticator\_env\_variables) | Environment variables that should be used when executing the authenticator. e.g. { AWS\_PROFILE = "eks"}. | `map(string)` | `{}` | no |
+| [kubeconfig\_aws\_authenticator\_additional\_args](#input\_kubeconfig\_aws\_authenticator\_additional\_args) | Any additional arguments to pass to the authenticator such as the role to assume. e.g. ["-r", "MyEksRole"] | `list(string)` | `[]` | no |
+| [kubeconfig\_aws\_authenticator\_command](#input\_kubeconfig\_aws\_authenticator\_command) | Command to use to fetch AWS EKS credentials | `string` | `"aws-iam-authenticator"` | no |
+| [kubeconfig\_aws\_authenticator\_command\_args](#input\_kubeconfig\_aws\_authenticator\_command\_args) | Default arguments passed to the authenticator command. Defaults to [token -i $cluster\_name] | `list(string)` | `[]` | no |
+| [kubeconfig\_aws\_authenticator\_env\_variables](#input\_kubeconfig\_aws\_authenticator\_env\_variables) | Environment variables that should be used when executing the authenticator. e.g. { AWS\_PROFILE = "eks"} | `map(string)` | `{}` | no |
| [kubeconfig\_file\_permission](#input\_kubeconfig\_file\_permission) | File permission of the Kubectl config file containing cluster configuration saved to `kubeconfig_output_path.` | `string` | `"0600"` | no |
-| [kubeconfig\_name](#input\_kubeconfig\_name) | Override the default name used for items kubeconfig. | `string` | `""` | no |
-| [kubeconfig\_output\_path](#input\_kubeconfig\_output\_path) | Where to save the Kubectl config file (if `write_kubeconfig = true`). Assumed to be a directory if the value ends with a forward slash `/`. | `string` | `"./"` | no |
+| [kubeconfig\_name](#input\_kubeconfig\_name) | Override the default name used for items kubeconfig | `string` | `""` | no |
+| [kubeconfig\_output\_path](#input\_kubeconfig\_output\_path) | Where to save the Kubectl config file (if `write_kubeconfig = true`). Assumed to be a directory if the value ends with a forward slash `/` | `string` | `"./"` | no |
| [launch\_templates](#input\_launch\_templates) | Map of launch template definitions to create | `map(any)` | `{}` | no |
-| [manage\_aws\_auth](#input\_manage\_aws\_auth) | Whether to apply the aws-auth configmap file. | `bool` | `true` | no |
-| [manage\_cluster\_iam\_resources](#input\_manage\_cluster\_iam\_resources) | Whether to let the module manage cluster IAM resources. If set to false, `cluster_iam_role_arn` must be specified. | `bool` | `true` | no |
-| [manage\_worker\_iam\_resources](#input\_manage\_worker\_iam\_resources) | Whether to let the module manage worker IAM resources. If set to false, iam\_instance\_profile\_name must be specified for workers. | `bool` | `true` | no |
-| [map\_accounts](#input\_map\_accounts) | Additional AWS account numbers to add to the aws-auth configmap. | `list(string)` | `[]` | no |
-| [map\_roles](#input\_map\_roles) | Additional IAM roles to add to the aws-auth configmap. |
| `[]` | no |
+| [manage\_aws\_auth](#input\_manage\_aws\_auth) | Whether to apply the aws-auth configmap file | `bool` | `true` | no |
+| [manage\_cluster\_iam\_resources](#input\_manage\_cluster\_iam\_resources) | Whether to let the module manage cluster IAM resources. If set to false, `cluster_iam_role_arn` must be specified | `bool` | `true` | no |
+| [manage\_worker\_iam\_resources](#input\_manage\_worker\_iam\_resources) | Whether to let the module manage worker IAM resources. If set to false, iam\_instance\_profile\_name must be specified for workers | `bool` | `true` | no |
+| [map\_accounts](#input\_map\_accounts) | Additional AWS account numbers to add to the aws-auth configmap | `list(string)` | `[]` | no |
+| [map\_roles](#input\_map\_roles) | Additional IAM roles to add to the aws-auth configmap |
| `[]` | no |
| [node\_groups](#input\_node\_groups) | Map of map of node groups to create. See `node_groups` module's documentation for more details | `any` | `{}` | no |
| [node\_groups\_defaults](#input\_node\_groups\_defaults) | Map of values to be applied to all node groups. See `node_groups` module's documentation for more details | `any` | `{}` | no |
-| [openid\_connect\_audiences](#input\_openid\_connect\_audiences) | List of OpenID Connect audience client IDs to add to the IRSA provider. | `list(string)` | `[]` | no |
-| [permissions\_boundary](#input\_permissions\_boundary) | If provided, all IAM roles will be created with this permissions boundary attached. | `string` | `null` | no |
+| [openid\_connect\_audiences](#input\_openid\_connect\_audiences) | List of OpenID Connect audience client IDs to add to the IRSA provider | `list(string)` | `[]` | no |
+| [permissions\_boundary](#input\_permissions\_boundary) | If provided, all IAM roles will be created with this permissions boundary attached | `string` | `null` | no |
| [subnet\_ids](#input\_subnet\_ids) | A list of subnet IDs to place the EKS cluster and workers within | `list(string)` | `[]` | no |
| [tags](#input\_tags) | A map of tags to add to all resources. Tags added to launch configuration or templates override these values for ASG Tags only | `map(string)` | `{}` | no |
| [vpc\_id](#input\_vpc\_id) | ID of the VPC where the cluster and workers will be provisioned | `string` | `null` | no |
-| [wait\_for\_cluster\_timeout](#input\_wait\_for\_cluster\_timeout) | A timeout (in seconds) to wait for cluster to be available. | `number` | `300` | no |
+| [wait\_for\_cluster\_timeout](#input\_wait\_for\_cluster\_timeout) | A timeout (in seconds) to wait for cluster to be available | `number` | `300` | no |
| [worker\_additional\_security\_group\_ids](#input\_worker\_additional\_security\_group\_ids) | A list of additional security group ids to attach to worker instances | `list(string)` | `[]` | no |
-| [worker\_ami\_name\_filter](#input\_worker\_ami\_name\_filter) | Name filter for AWS EKS worker AMI. If not provided, the latest official AMI for the specified 'cluster\_version' is used. | `string` | `""` | no |
-| [worker\_ami\_name\_filter\_windows](#input\_worker\_ami\_name\_filter\_windows) | Name filter for AWS EKS Windows worker AMI. If not provided, the latest official AMI for the specified 'cluster\_version' is used. | `string` | `""` | no |
-| [worker\_ami\_owner\_id](#input\_worker\_ami\_owner\_id) | The ID of the owner for the AMI to use for the AWS EKS workers. Valid values are an AWS account ID, 'self' (the current account), or an AWS owner alias (e.g. 'amazon', 'aws-marketplace', 'microsoft'). | `string` | `"amazon"` | no |
-| [worker\_ami\_owner\_id\_windows](#input\_worker\_ami\_owner\_id\_windows) | The ID of the owner for the AMI to use for the AWS EKS Windows workers. Valid values are an AWS account ID, 'self' (the current account), or an AWS owner alias (e.g. 'amazon', 'aws-marketplace', 'microsoft'). | `string` | `"amazon"` | no |
-| [worker\_create\_cluster\_primary\_security\_group\_rules](#input\_worker\_create\_cluster\_primary\_security\_group\_rules) | Whether to create security group rules to allow communication between pods on workers and pods using the primary cluster security group. | `bool` | `false` | no |
-| [worker\_create\_initial\_lifecycle\_hooks](#input\_worker\_create\_initial\_lifecycle\_hooks) | Whether to create initial lifecycle hooks provided in worker groups. | `bool` | `false` | no |
-| [worker\_create\_security\_group](#input\_worker\_create\_security\_group) | Whether to create a security group for the workers or attach the workers to `worker_security_group_id`. | `bool` | `true` | no |
+| [worker\_ami\_name\_filter](#input\_worker\_ami\_name\_filter) | Name filter for AWS EKS worker AMI. If not provided, the latest official AMI for the specified 'cluster\_version' is used | `string` | `""` | no |
+| [worker\_ami\_name\_filter\_windows](#input\_worker\_ami\_name\_filter\_windows) | Name filter for AWS EKS Windows worker AMI. If not provided, the latest official AMI for the specified 'cluster\_version' is used | `string` | `""` | no |
+| [worker\_ami\_owner\_id](#input\_worker\_ami\_owner\_id) | The ID of the owner for the AMI to use for the AWS EKS workers. Valid values are an AWS account ID, 'self' (the current account), or an AWS owner alias (e.g. 'amazon', 'aws-marketplace', 'microsoft') | `string` | `"amazon"` | no |
+| [worker\_ami\_owner\_id\_windows](#input\_worker\_ami\_owner\_id\_windows) | The ID of the owner for the AMI to use for the AWS EKS Windows workers. Valid values are an AWS account ID, 'self' (the current account), or an AWS owner alias (e.g. 'amazon', 'aws-marketplace', 'microsoft') | `string` | `"amazon"` | no |
+| [worker\_create\_cluster\_primary\_security\_group\_rules](#input\_worker\_create\_cluster\_primary\_security\_group\_rules) | Whether to create security group rules to allow communication between pods on workers and pods using the primary cluster security group | `bool` | `false` | no |
+| [worker\_create\_initial\_lifecycle\_hooks](#input\_worker\_create\_initial\_lifecycle\_hooks) | Whether to create initial lifecycle hooks provided in worker groups | `bool` | `false` | no |
+| [worker\_create\_security\_group](#input\_worker\_create\_security\_group) | Whether to create a security group for the workers or attach the workers to `worker_security_group_id` | `bool` | `true` | no |
| [worker\_groups](#input\_worker\_groups) | A map of maps defining worker group configurations to be defined using AWS Launch Template | `map(any)` | `{}` | no |
-| [worker\_security\_group\_id](#input\_worker\_security\_group\_id) | If provided, all workers will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the EKS cluster. | `string` | `""` | no |
-| [worker\_sg\_ingress\_from\_port](#input\_worker\_sg\_ingress\_from\_port) | Minimum port number from which pods will accept communication. Must be changed to a lower value if some pods in your cluster will expose a port lower than 1025 (e.g. 22, 80, or 443). | `number` | `1025` | no |
+| [worker\_security\_group\_id](#input\_worker\_security\_group\_id) | If provided, all workers will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the EKS cluster | `string` | `""` | no |
+| [worker\_sg\_ingress\_from\_port](#input\_worker\_sg\_ingress\_from\_port) | Minimum port number from which pods will accept communication. Must be changed to a lower value if some pods in your cluster will expose a port lower than 1025 (e.g. 22, 80, or 443) | `number` | `1025` | no |
| [workers\_additional\_policies](#input\_workers\_additional\_policies) | Additional policies to be added to workers | `list(string)` | `[]` | no |
-| [workers\_egress\_cidrs](#input\_workers\_egress\_cidrs) | List of CIDR blocks that are permitted for workers egress traffic. | `list(string)` |
[ "0.0.0.0/0" ]
| no |
-| [workers\_role\_name](#input\_workers\_role\_name) | User defined workers role name. | `string` | `""` | no |
-| [write\_kubeconfig](#input\_write\_kubeconfig) | Whether to write a Kubectl config file containing the cluster configuration. Saved to `kubeconfig_output_path`. | `bool` | `true` | no |
+| [workers\_egress\_cidrs](#input\_workers\_egress\_cidrs) | List of CIDR blocks that are permitted for workers egress traffic | `list(string)` |
[ "0.0.0.0/0" ]
| no |
+| [workers\_role\_name](#input\_workers\_role\_name) | User defined workers role name | `string` | `""` | no |
+| [write\_kubeconfig](#input\_write\_kubeconfig) | Whether to write a Kubectl config file containing the cluster configuration. Saved to `kubeconfig_output_path` | `bool` | `true` | no |
## Outputs
diff --git a/aws_auth.tf b/aws_auth.tf
index 1510f4e218..2199cb2652 100644
--- a/aws_auth.tf
+++ b/aws_auth.tf
@@ -18,7 +18,7 @@ locals {
for role in concat(
aws_iam_instance_profile.workers.*.role,
module.node_groups.aws_auth_roles,
- module.fargate.aws_auth_roles,
+ [{ worker_role_arn = module.fargate.iam_role_arn, platform = "fargate" }],
) :
{
# Work around https://github.com/kubernetes-sigs/aws-iam-authenticator/issues/153
diff --git a/locals.tf b/locals.tf
index 2aa231415d..a2ffeb25e7 100644
--- a/locals.tf
+++ b/locals.tf
@@ -3,7 +3,6 @@ locals {
# EKS Cluster
cluster_id = try(aws_eks_cluster.this[0].id, "")
cluster_arn = try(aws_eks_cluster.this[0].arn, "")
- cluster_name = try(aws_eks_cluster.this[0].name, "")
cluster_endpoint = try(aws_eks_cluster.this[0].endpoint, "")
cluster_auth_base64 = try(aws_eks_cluster.this[0].certificate_authority[0].data, "")
cluster_primary_security_group_id = try(aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id, "")
diff --git a/modules/fargate/README.md b/modules/fargate/README.md
index 0ca65e2dfd..27f3c258b8 100644
--- a/modules/fargate/README.md
+++ b/modules/fargate/README.md
@@ -24,13 +24,13 @@ See example code in `examples/fargate`.
| Name | Version |
|------|---------|
| [terraform](#requirement\_terraform) | >= 0.13.1 |
-| [aws](#requirement\_aws) | >= 3.40.0 |
+| [aws](#requirement\_aws) | >= 3.56.0 |
## Providers
| Name | Version |
|------|---------|
-| [aws](#provider\_aws) | >= 3.40.0 |
+| [aws](#provider\_aws) | >= 3.56.0 |
## Modules
@@ -42,32 +42,29 @@ No modules.
|------|------|
| [aws_eks_fargate_profile.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_fargate_profile) | resource |
| [aws_iam_role.eks_fargate_pod](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
-| [aws_iam_role_policy_attachment.eks_fargate_pod](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
| [aws_iam_policy_document.eks_fargate_pod_assume_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
-| [aws_iam_role.custom_fargate_iam_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_role) | data source |
| [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source |
## Inputs
| Name | Description | Type | Default | Required |
|------|-------------|------|---------|:--------:|
-| [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster. | `string` | `""` | no |
-| [create](#input\_create) | Controls if EKS resources should be created (it affects almost all resources) | `bool` | `true` | no |
-| [create\_fargate\_pod\_execution\_role](#input\_create\_fargate\_pod\_execution\_role) | Controls if the the IAM Role that provides permissions for the EKS Fargate Profile should be created. | `bool` | `true` | no |
-| [fargate\_pod\_execution\_role\_name](#input\_fargate\_pod\_execution\_role\_name) | The IAM Role that provides permissions for the EKS Fargate Profile. | `string` | `null` | no |
+| [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster | `string` | `""` | no |
+| [create](#input\_create) | Controls if Fargate resources should be created (it affects all resources) | `bool` | `true` | no |
+| [create\_fargate\_pod\_execution\_role](#input\_create\_fargate\_pod\_execution\_role) | Controls if the the IAM Role that provides permissions for the EKS Fargate Profile should be created | `bool` | `true` | no |
+| [fargate\_pod\_execution\_role\_arn](#input\_fargate\_pod\_execution\_role\_arn) | Existing Amazon Resource Name (ARN) of the IAM Role that provides permissions for the EKS Fargate Profile. Required if `create_fargate_pod_execution_role` is `false` | `string` | `null` | no |
| [fargate\_profiles](#input\_fargate\_profiles) | Fargate profiles to create. See `fargate_profile` keys section in README.md for more details | `any` | `{}` | no |
-| [iam\_path](#input\_iam\_path) | IAM roles will be created on this path. | `string` | `"/"` | no |
-| [permissions\_boundary](#input\_permissions\_boundary) | If provided, all IAM roles will be created with this permissions boundary attached. | `string` | `null` | no |
-| [subnet\_ids](#input\_subnet\_ids) | A list of subnet IDs for the EKS Fargate profiles. | `list(string)` | `[]` | no |
-| [tags](#input\_tags) | A map of tags to add to all resources. | `map(string)` | `{}` | no |
+| [iam\_path](#input\_iam\_path) | Path to the role | `string` | `null` | no |
+| [permissions\_boundary](#input\_permissions\_boundary) | ARN of the policy that is used to set the permissions boundary for the role | `string` | `null` | no |
+| [subnet\_ids](#input\_subnet\_ids) | A list of subnet IDs for the EKS Fargate profiles | `list(string)` | `[]` | no |
+| [tags](#input\_tags) | A map of tags to add to all resources | `map(string)` | `{}` | no |
## Outputs
| Name | Description |
|------|-------------|
-| [aws\_auth\_roles](#output\_aws\_auth\_roles) | Roles for use in aws-auth ConfigMap |
| [fargate\_profile\_arns](#output\_fargate\_profile\_arns) | Amazon Resource Name (ARN) of the EKS Fargate Profiles. |
| [fargate\_profile\_ids](#output\_fargate\_profile\_ids) | EKS Cluster name and EKS Fargate Profile names separated by a colon (:). |
-| [iam\_role\_arn](#output\_iam\_role\_arn) | IAM role ARN for EKS Fargate pods |
-| [iam\_role\_name](#output\_iam\_role\_name) | IAM role name for EKS Fargate pods |
+| [iam\_role\_arn](#output\_iam\_role\_arn) | ARN of IAM role of the EKS Fargate pods |
+| [iam\_role\_name](#output\_iam\_role\_name) | Name of IAM role created for EKS Fargate pods |
diff --git a/modules/fargate/main.tf b/modules/fargate/main.tf
index 9bc99e9a79..565a0d93e8 100644
--- a/modules/fargate/main.tf
+++ b/modules/fargate/main.tf
@@ -1,16 +1,7 @@
-locals {
- create = var.create && length(var.fargate_profiles) > 0
-
- pod_execution_role_arn = coalescelist(aws_iam_role.eks_fargate_pod.*.arn, data.aws_iam_role.custom_fargate_iam_role.*.arn, [""])[0]
- pod_execution_role_name = coalescelist(aws_iam_role.eks_fargate_pod.*.name, data.aws_iam_role.custom_fargate_iam_role.*.name, [""])[0]
-
- fargate_profiles = { for k, v in var.fargate_profiles : k => v if var.create }
-}
-
data "aws_partition" "current" {}
data "aws_iam_policy_document" "eks_fargate_pod_assume_role" {
- count = local.create && var.create_fargate_pod_execution_role ? 1 : 0
+ count = var.create && var.create_fargate_pod_execution_role ? 1 : 0
statement {
effect = "Allow"
@@ -23,35 +14,28 @@ data "aws_iam_policy_document" "eks_fargate_pod_assume_role" {
}
}
-data "aws_iam_role" "custom_fargate_iam_role" {
- count = local.create && !var.create_fargate_pod_execution_role ? 1 : 0
-
- name = var.fargate_pod_execution_role_name
-}
-
resource "aws_iam_role" "eks_fargate_pod" {
- count = local.create && var.create_fargate_pod_execution_role ? 1 : 0
+ count = var.create && var.create_fargate_pod_execution_role ? 1 : 0
+
+ name_prefix = format("%s-fargate", substr(var.cluster_name, 0, 24))
+ path = var.iam_path
- name_prefix = format("%s-fargate", substr(var.cluster_name, 0, 24))
assume_role_policy = data.aws_iam_policy_document.eks_fargate_pod_assume_role[0].json
permissions_boundary = var.permissions_boundary
- tags = var.tags
- path = var.iam_path
-}
-
-resource "aws_iam_role_policy_attachment" "eks_fargate_pod" {
- count = local.create && var.create_fargate_pod_execution_role ? 1 : 0
+ managed_policy_arns = [
+ "arn:${data.aws_partition.current.partition}:iam::aws:policy/AmazonEKSFargatePodExecutionRolePolicy",
+ ]
+ force_detach_policies = true
- policy_arn = "arn:${data.aws_partition.current.partition}:iam::aws:policy/AmazonEKSFargatePodExecutionRolePolicy"
- role = aws_iam_role.eks_fargate_pod[0].name
+ tags = var.tags
}
resource "aws_eks_fargate_profile" "this" {
- for_each = local.fargate_profiles
+ for_each = var.create ? var.fargate_profiles : {}
cluster_name = var.cluster_name
fargate_profile_name = lookup(each.value, "name", format("%s-fargate-%s", var.cluster_name, replace(each.key, "_", "-")))
- pod_execution_role_arn = local.pod_execution_role_arn
+ pod_execution_role_arn = var.create_fargate_pod_execution_role ? aws_iam_role.eks_fargate_pod[0].arn : var.fargate_pod_execution_role_arn
subnet_ids = lookup(each.value, "subnet_ids", var.subnet_ids)
dynamic "selector" {
diff --git a/modules/fargate/outputs.tf b/modules/fargate/outputs.tf
index 0535d71de3..7ecfeebf47 100644
--- a/modules/fargate/outputs.tf
+++ b/modules/fargate/outputs.tf
@@ -9,21 +9,11 @@ output "fargate_profile_arns" {
}
output "iam_role_name" {
- description = "IAM role name for EKS Fargate pods"
- value = local.pod_execution_role_name
+ description = "Name of IAM role created for EKS Fargate pods"
+ value = try(aws_iam_role.eks_fargate_pod[0].name, "")
}
output "iam_role_arn" {
- description = "IAM role ARN for EKS Fargate pods"
- value = local.pod_execution_role_arn
-}
-
-output "aws_auth_roles" {
- description = "Roles for use in aws-auth ConfigMap"
- value = [
- for i in range(1) : {
- worker_role_arn = local.pod_execution_role_arn
- platform = "fargate"
- } if local.create
- ]
+ description = "ARN of IAM role of the EKS Fargate pods"
+ value = try(aws_iam_role.eks_fargate_pod[0].arn, var.fargate_pod_execution_role_arn, "")
}
diff --git a/modules/fargate/variables.tf b/modules/fargate/variables.tf
index b836447099..86fefbb39d 100644
--- a/modules/fargate/variables.tf
+++ b/modules/fargate/variables.tf
@@ -1,29 +1,29 @@
variable "create" {
- description = "Controls if EKS resources should be created (it affects almost all resources)"
+ description = "Controls if Fargate resources should be created (it affects all resources)"
type = bool
default = true
}
variable "create_fargate_pod_execution_role" {
- description = "Controls if the the IAM Role that provides permissions for the EKS Fargate Profile should be created."
+ description = "Controls if the the IAM Role that provides permissions for the EKS Fargate Profile should be created"
type = bool
default = true
}
variable "cluster_name" {
- description = "Name of the EKS cluster."
+ description = "Name of the EKS cluster"
type = string
default = ""
}
variable "iam_path" {
- description = "IAM roles will be created on this path."
+ description = "Path to the role"
type = string
- default = "/"
+ default = null
}
-variable "fargate_pod_execution_role_name" {
- description = "The IAM Role that provides permissions for the EKS Fargate Profile."
+variable "permissions_boundary" {
+ description = "ARN of the policy that is used to set the permissions boundary for the role"
type = string
default = null
}
@@ -34,20 +34,20 @@ variable "fargate_profiles" {
default = {}
}
-variable "permissions_boundary" {
- description = "If provided, all IAM roles will be created with this permissions boundary attached."
+variable "fargate_pod_execution_role_arn" {
+ description = "Existing Amazon Resource Name (ARN) of the IAM Role that provides permissions for the EKS Fargate Profile. Required if `create_fargate_pod_execution_role` is `false`"
type = string
default = null
}
variable "subnet_ids" {
- description = "A list of subnet IDs for the EKS Fargate profiles."
+ description = "A list of subnet IDs for the EKS Fargate profiles"
type = list(string)
default = []
}
variable "tags" {
- description = "A map of tags to add to all resources."
+ description = "A map of tags to add to all resources"
type = map(string)
default = {}
}
diff --git a/modules/fargate/versions.tf b/modules/fargate/versions.tf
index 2051547e51..97955e9bc8 100644
--- a/modules/fargate/versions.tf
+++ b/modules/fargate/versions.tf
@@ -2,6 +2,9 @@ terraform {
required_version = ">= 0.13.1"
required_providers {
- aws = ">= 3.40.0"
+ aws = {
+ source = "hashicorp/aws"
+ version = ">= 3.56.0"
+ }
}
}
diff --git a/variables.tf b/variables.tf
index 564a25c12e..f9ecb45180 100644
--- a/variables.tf
+++ b/variables.tf
@@ -27,7 +27,7 @@ variable "cluster_iam_role_arn" {
}
variable "cluster_version" {
- description = "Kubernetes minor version to use for the EKS cluster (for example 1.21)."
+ description = "Kubernetes minor version to use for the EKS cluster (for example 1.21)"
type = string
default = null
}
@@ -52,19 +52,19 @@ variable "subnet_ids" {
}
variable "cluster_endpoint_private_access" {
- description = "Indicates whether or not the Amazon EKS private API server endpoint is enabled."
+ description = "Indicates whether or not the Amazon EKS private API server endpoint is enabled"
type = bool
default = false
}
variable "cluster_endpoint_public_access" {
- description = "Indicates whether or not the Amazon EKS public API server endpoint is enabled. When it's set to `false` ensure to have a proper private access with `cluster_endpoint_private_access = true`."
+ description = "Indicates whether or not the Amazon EKS public API server endpoint is enabled. When it's set to `false` ensure to have a proper private access with `cluster_endpoint_private_access = true`"
type = bool
default = true
}
variable "cluster_endpoint_public_access_cidrs" {
- description = "List of CIDR blocks which can access the Amazon EKS public API server endpoint."
+ description = "List of CIDR blocks which can access the Amazon EKS public API server endpoint"
type = list(string)
default = ["0.0.0.0/0"]
}
@@ -113,7 +113,7 @@ variable "cluster_log_kms_key_id" {
################################################################################
variable "create_cluster_security_group" {
- description = "Whether to create a security group for the cluster or attach the cluster to `cluster_security_group_id`."
+ description = "Whether to create a security group for the cluster or attach the cluster to `cluster_security_group_id`"
type = bool
default = true
}
@@ -147,13 +147,13 @@ variable "cluster_security_group_tags" {
################################################################################
variable "write_kubeconfig" {
- description = "Whether to write a Kubectl config file containing the cluster configuration. Saved to `kubeconfig_output_path`."
+ description = "Whether to write a Kubectl config file containing the cluster configuration. Saved to `kubeconfig_output_path`"
type = bool
default = true
}
variable "kubeconfig_output_path" {
- description = "Where to save the Kubectl config file (if `write_kubeconfig = true`). Assumed to be a directory if the value ends with a forward slash `/`."
+ description = "Where to save the Kubectl config file (if `write_kubeconfig = true`). Assumed to be a directory if the value ends with a forward slash `/`"
type = string
default = "./"
}
@@ -175,7 +175,7 @@ variable "enable_irsa" {
}
variable "openid_connect_audiences" {
- description = "List of OpenID Connect audience client IDs to add to the IRSA provider."
+ description = "List of OpenID Connect audience client IDs to add to the IRSA provider"
type = list(string)
default = []
}
@@ -220,7 +220,61 @@ variable "cluster_iam_role_tags" {
default = {}
}
+################################################################################
+# Fargate
+################################################################################
+variable "create_fargate" {
+ description = "Determines whether Fargate resources are created"
+ type = bool
+ default = true
+}
+
+variable "create_fargate_pod_execution_role" {
+ description = "Controls if the EKS Fargate pod execution IAM role should be created"
+ type = bool
+ default = true
+}
+
+variable "fargate_pod_execution_role_arn" {
+ description = "Existing Amazon Resource Name (ARN) of the IAM Role that provides permissions for the EKS Fargate Profile. Required if `create_fargate_pod_execution_role` is `false`"
+ type = string
+ default = null
+}
+
+variable "fargate_subnet_ids" {
+ description = "A list of subnet IDs to place Fargate workers within (if different from `subnet_ids`)"
+ type = list(string)
+ default = []
+}
+
+variable "fargate_iam_role_path" {
+ description = "Fargate IAM role path"
+ type = string
+ default = null
+}
+
+variable "fargate_iam_role_permissions_boundary" {
+ description = "ARN of the policy that is used to set the permissions boundary for the Fargate role"
+ type = string
+ default = null
+}
+
+variable "fargate_profiles" {
+ description = "Fargate profiles to create. See `fargate_profile` keys section in Fargate submodule's README.md for more details"
+ type = any
+ default = {}
+}
+
+variable "fargate_tags" {
+ description = "A map of additional tags to add to the Fargate resources created"
+ type = map(string)
+ default = {}
+}
+
+################################################################################
+# Node Groups
+################################################################################
@@ -237,13 +291,13 @@ variable "cluster_iam_role_tags" {
variable "default_platform" {
- description = "Default platform name. Valid options are `linux` and `windows`."
+ description = "Default platform name. Valid options are `linux` and `windows`"
type = string
default = "linux"
}
variable "manage_aws_auth" {
- description = "Whether to apply the aws-auth configmap file."
+ description = "Whether to apply the aws-auth configmap file"
type = bool
default = true
}
@@ -255,13 +309,13 @@ variable "aws_auth_additional_labels" {
}
variable "map_accounts" {
- description = "Additional AWS account numbers to add to the aws-auth configmap."
+ description = "Additional AWS account numbers to add to the aws-auth configmap"
type = list(string)
default = []
}
variable "map_roles" {
- description = "Additional IAM roles to add to the aws-auth configmap."
+ description = "Additional IAM roles to add to the aws-auth configmap"
type = list(object({
rolearn = string
username = string
@@ -271,7 +325,7 @@ variable "map_roles" {
}
variable "map_users" {
- description = "Additional IAM users to add to the aws-auth configmap."
+ description = "Additional IAM users to add to the aws-auth configmap"
type = list(object({
userarn = string
username = string
@@ -292,11 +346,7 @@ variable "iam_instance_profiles" {
default = {}
}
-variable "fargate_subnet_ids" {
- description = "A list of subnet IDs to place fargate workers within (if different from `subnet_ids`)"
- type = list(string)
- default = []
-}
+
@@ -314,31 +364,31 @@ variable "group_default_settings" {
}
variable "worker_security_group_id" {
- description = "If provided, all workers will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the EKS cluster."
+ description = "If provided, all workers will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the EKS cluster"
type = string
default = ""
}
variable "worker_ami_name_filter" {
- description = "Name filter for AWS EKS worker AMI. If not provided, the latest official AMI for the specified 'cluster_version' is used."
+ description = "Name filter for AWS EKS worker AMI. If not provided, the latest official AMI for the specified 'cluster_version' is used"
type = string
default = ""
}
variable "worker_ami_name_filter_windows" {
- description = "Name filter for AWS EKS Windows worker AMI. If not provided, the latest official AMI for the specified 'cluster_version' is used."
+ description = "Name filter for AWS EKS Windows worker AMI. If not provided, the latest official AMI for the specified 'cluster_version' is used"
type = string
default = ""
}
variable "worker_ami_owner_id" {
- description = "The ID of the owner for the AMI to use for the AWS EKS workers. Valid values are an AWS account ID, 'self' (the current account), or an AWS owner alias (e.g. 'amazon', 'aws-marketplace', 'microsoft')."
+ description = "The ID of the owner for the AMI to use for the AWS EKS workers. Valid values are an AWS account ID, 'self' (the current account), or an AWS owner alias (e.g. 'amazon', 'aws-marketplace', 'microsoft')"
type = string
default = "amazon"
}
variable "worker_ami_owner_id_windows" {
- description = "The ID of the owner for the AMI to use for the AWS EKS Windows workers. Valid values are an AWS account ID, 'self' (the current account), or an AWS owner alias (e.g. 'amazon', 'aws-marketplace', 'microsoft')."
+ description = "The ID of the owner for the AMI to use for the AWS EKS Windows workers. Valid values are an AWS account ID, 'self' (the current account), or an AWS owner alias (e.g. 'amazon', 'aws-marketplace', 'microsoft')"
type = string
default = "amazon"
}
@@ -350,7 +400,7 @@ variable "worker_additional_security_group_ids" {
}
variable "worker_sg_ingress_from_port" {
- description = "Minimum port number from which pods will accept communication. Must be changed to a lower value if some pods in your cluster will expose a port lower than 1025 (e.g. 22, 80, or 443)."
+ description = "Minimum port number from which pods will accept communication. Must be changed to a lower value if some pods in your cluster will expose a port lower than 1025 (e.g. 22, 80, or 443)"
type = number
default = 1025
}
@@ -367,31 +417,31 @@ variable "kubeconfig_api_version" {
}
variable "kubeconfig_aws_authenticator_command" {
- description = "Command to use to fetch AWS EKS credentials."
+ description = "Command to use to fetch AWS EKS credentials"
type = string
default = "aws-iam-authenticator"
}
variable "kubeconfig_aws_authenticator_command_args" {
- description = "Default arguments passed to the authenticator command. Defaults to [token -i $cluster_name]."
+ description = "Default arguments passed to the authenticator command. Defaults to [token -i $cluster_name]"
type = list(string)
default = []
}
variable "kubeconfig_aws_authenticator_additional_args" {
- description = "Any additional arguments to pass to the authenticator such as the role to assume. e.g. [\"-r\", \"MyEksRole\"]."
+ description = "Any additional arguments to pass to the authenticator such as the role to assume. e.g. [\"-r\", \"MyEksRole\"]"
type = list(string)
default = []
}
variable "kubeconfig_aws_authenticator_env_variables" {
- description = "Environment variables that should be used when executing the authenticator. e.g. { AWS_PROFILE = \"eks\"}."
+ description = "Environment variables that should be used when executing the authenticator. e.g. { AWS_PROFILE = \"eks\"}"
type = map(string)
default = {}
}
variable "kubeconfig_name" {
- description = "Override the default name used for items kubeconfig."
+ description = "Override the default name used for items kubeconfig"
type = string
default = ""
}
@@ -399,49 +449,49 @@ variable "kubeconfig_name" {
variable "worker_create_security_group" {
- description = "Whether to create a security group for the workers or attach the workers to `worker_security_group_id`."
+ description = "Whether to create a security group for the workers or attach the workers to `worker_security_group_id`"
type = bool
default = true
}
variable "worker_create_initial_lifecycle_hooks" {
- description = "Whether to create initial lifecycle hooks provided in worker groups."
+ description = "Whether to create initial lifecycle hooks provided in worker groups"
type = bool
default = false
}
variable "worker_create_cluster_primary_security_group_rules" {
- description = "Whether to create security group rules to allow communication between pods on workers and pods using the primary cluster security group."
+ description = "Whether to create security group rules to allow communication between pods on workers and pods using the primary cluster security group"
type = bool
default = false
}
variable "permissions_boundary" {
- description = "If provided, all IAM roles will be created with this permissions boundary attached."
+ description = "If provided, all IAM roles will be created with this permissions boundary attached"
type = string
default = null
}
variable "iam_path" {
- description = "If provided, all IAM roles will be created on this path."
+ description = "If provided, all IAM roles will be created on this path"
type = string
default = "/"
}
variable "cluster_create_endpoint_private_access_sg_rule" {
- description = "Whether to create security group rules for the access to the Amazon EKS private API server endpoint. When is `true`, `cluster_endpoint_private_access_cidrs` must be setted."
+ description = "Whether to create security group rules for the access to the Amazon EKS private API server endpoint. When is `true`, `cluster_endpoint_private_access_cidrs` must be setted"
type = bool
default = false
}
variable "cluster_endpoint_private_access_cidrs" {
- description = "List of CIDR blocks which can access the Amazon EKS private API server endpoint. To use this `cluster_endpoint_private_access` and `cluster_create_endpoint_private_access_sg_rule` must be set to `true`."
+ description = "List of CIDR blocks which can access the Amazon EKS private API server endpoint. To use this `cluster_endpoint_private_access` and `cluster_create_endpoint_private_access_sg_rule` must be set to `true`"
type = list(string)
default = null
}
variable "cluster_endpoint_private_access_sg" {
- description = "List of security group IDs which can access the Amazon EKS private API server endpoint. To use this `cluster_endpoint_private_access` and `cluster_create_endpoint_private_access_sg_rule` must be set to `true`."
+ description = "List of security group IDs which can access the Amazon EKS private API server endpoint. To use this `cluster_endpoint_private_access` and `cluster_create_endpoint_private_access_sg_rule` must be set to `true`"
type = list(string)
default = null
}
@@ -449,26 +499,26 @@ variable "cluster_endpoint_private_access_sg" {
variable "manage_cluster_iam_resources" {
- description = "Whether to let the module manage cluster IAM resources. If set to false, `cluster_iam_role_arn` must be specified."
+ description = "Whether to let the module manage cluster IAM resources. If set to false, `cluster_iam_role_arn` must be specified"
type = bool
default = true
}
variable "manage_worker_iam_resources" {
- description = "Whether to let the module manage worker IAM resources. If set to false, iam_instance_profile_name must be specified for workers."
+ description = "Whether to let the module manage worker IAM resources. If set to false, iam_instance_profile_name must be specified for workers"
type = bool
default = true
}
variable "workers_role_name" {
- description = "User defined workers role name."
+ description = "User defined workers role name"
type = string
default = ""
}
variable "attach_worker_cni_policy" {
- description = "Whether to attach the Amazon managed `AmazonEKS_CNI_Policy` IAM policy to the default worker IAM role. WARNING: If set `false` the permissions must be assigned to the `aws-node` DaemonSet pods via another method or nodes will not be able to join the cluster."
+ description = "Whether to attach the Amazon managed `AmazonEKS_CNI_Policy` IAM policy to the default worker IAM role. WARNING: If set `false` the permissions must be assigned to the `aws-node` DaemonSet pods via another method or nodes will not be able to join the cluster"
type = bool
default = true
}
@@ -485,40 +535,23 @@ variable "node_groups" {
default = {}
}
-variable "fargate_profiles" {
- description = "Fargate profiles to create. See `fargate_profile` keys section in fargate submodule's README.md for more details"
- type = any
- default = {}
-}
-
-variable "create_fargate_pod_execution_role" {
- description = "Controls if the EKS Fargate pod execution IAM role should be created."
- type = bool
- default = true
-}
-
-variable "fargate_pod_execution_role_name" {
- description = "The IAM Role that provides permissions for the EKS Fargate Profile."
- type = string
- default = null
-}
variable "cluster_egress_cidrs" {
- description = "List of CIDR blocks that are permitted for cluster egress traffic."
+ description = "List of CIDR blocks that are permitted for cluster egress traffic"
type = list(string)
default = ["0.0.0.0/0"]
}
variable "workers_egress_cidrs" {
- description = "List of CIDR blocks that are permitted for workers egress traffic."
+ description = "List of CIDR blocks that are permitted for workers egress traffic"
type = list(string)
default = ["0.0.0.0/0"]
}
variable "wait_for_cluster_timeout" {
- description = "A timeout (in seconds) to wait for cluster to be available."
+ description = "A timeout (in seconds) to wait for cluster to be available"
type = number
default = 300
}
diff --git a/workers.tf b/workers.tf
index 00018c5797..6a2f69e9f2 100644
--- a/workers.tf
+++ b/workers.tf
@@ -15,18 +15,19 @@ locals {
module "fargate" {
source = "./modules/fargate"
- create = var.create
+ create = var.create_fargate
create_fargate_pod_execution_role = var.create_fargate_pod_execution_role
+ fargate_pod_execution_role_name = var.fargate_pod_execution_role_name
- cluster_name = local.cluster_name
- fargate_pod_execution_role_name = var.fargate_pod_execution_role_name
- permissions_boundary = var.permissions_boundary
- iam_path = var.iam_path
- subnet_ids = coalescelist(var.fargate_subnet_ids, var.subnet_ids, [""])
+ cluster_name = aws_eks_cluster.this[0].name
+ subnet_ids = coalescelist(var.fargate_subnet_ids, var.subnet_ids, [""])
+
+ iam_path = var.fargate_iam_role_path
+ permissions_boundary = var.fargate_iam_role_permissions_boundary
fargate_profiles = var.fargate_profiles
- tags = var.tags
+ tags = merge(var.tags, var.fargate_tags)
}
################################################################################
@@ -38,7 +39,7 @@ module "node_groups" {
create = var.create
- cluster_name = local.cluster_name
+ cluster_name = aws_eks_cluster.this[0].name
cluster_endpoint = local.cluster_endpoint
cluster_auth_base64 = local.cluster_auth_base64
@@ -66,7 +67,7 @@ module "node_groups" {
resource "aws_autoscaling_group" "this" {
for_each = var.create ? var.worker_groups : {}
- name_prefix = "${join("-", [local.cluster_name, try(each.value.name, each.key)])}-"
+ name_prefix = "${join("-", [aws_eks_cluster.this[0].name, try(each.value.name, each.key)])}-"
launch_template {
name = each.value.launch_template_key # required
@@ -189,11 +190,11 @@ resource "aws_autoscaling_group" "this" {
[
{
"key" = "Name"
- "value" = "${join("-", [local.cluster_name, lookup(each.value, "name", each.key)])}-eks-asg"
+ "value" = "${join("-", [aws_eks_cluster.this[0].name, lookup(each.value, "name", each.key)])}-eks-asg"
"propagate_at_launch" = true
},
{
- "key" = "kubernetes.io/cluster/${local.cluster_name}"
+ "key" = "kubernetes.io/cluster/${aws_eks_cluster.this[0].name}"
"value" = "owned"
"propagate_at_launch" = true
},
@@ -225,7 +226,7 @@ resource "aws_autoscaling_group" "this" {
resource "aws_launch_template" "this" {
for_each = var.create ? var.launch_templates : {}
- name_prefix = "${local.cluster_name}-${try(each.value.name, each.key)}"
+ name_prefix = "${aws_eks_cluster.this[0].name}-${try(each.value.name, each.key)}"
description = try(each.value.description, var.group_default_settings.description, null)
ebs_optimized = try(each.value.ebs_optimized, var.group_default_settings.ebs_optimized, null)
@@ -414,7 +415,7 @@ resource "aws_launch_template" "this" {
tag_specifications {
resource_type = "volume"
tags = merge(
- { "Name" = "${local.cluster_name}-${try(each.value.name, each.key)}-eks_asg" },
+ { "Name" = "${aws_eks_cluster.this[0].name}-${try(each.value.name, each.key)}-eks_asg" },
var.tags,
{ for tag in lookup(each.value, "tags", {}) : tag["key"] => tag["value"] if tag["key"] != "Name" && tag["propagate_at_launch"] }
)
@@ -423,7 +424,7 @@ resource "aws_launch_template" "this" {
tag_specifications {
resource_type = "instance"
tags = merge(
- { "Name" = "${local.cluster_name}-${try(each.value.name, each.key)}-eks_asg" },
+ { "Name" = "${aws_eks_cluster.this[0].name}-${try(each.value.name, each.key)}-eks_asg" },
{ for tag_key, tag_value in var.tags :
tag_key => tag_value
if tag_key != "Name" && !contains([for tag in lookup(each.value, "tags", {}) : tag["key"]], tag_key)
@@ -434,7 +435,7 @@ resource "aws_launch_template" "this" {
tag_specifications {
resource_type = "network-interface"
tags = merge(
- { "Name" = "${local.cluster_name}-${try(each.value.name, each.key)}-eks_asg" },
+ { "Name" = "${aws_eks_cluster.this[0].name}-${try(each.value.name, each.key)}-eks_asg" },
var.tags,
{ for tag in lookup(each.value, "tags", {}) : tag["key"] => tag["value"] if tag["key"] != "Name" && tag["propagate_at_launch"] }
)
@@ -470,7 +471,7 @@ resource "aws_launch_template" "this" {
resource "aws_iam_role" "workers" {
count = var.manage_worker_iam_resources && var.create ? 1 : 0
- name_prefix = var.workers_role_name != "" ? null : local.cluster_name
+ name_prefix = var.workers_role_name != "" ? null : aws_eks_cluster.this[0].name
name = var.workers_role_name != "" ? var.workers_role_name : null
assume_role_policy = data.aws_iam_policy_document.workers_assume_role_policy.json
permissions_boundary = var.permissions_boundary
@@ -483,7 +484,7 @@ resource "aws_iam_role" "workers" {
resource "aws_iam_instance_profile" "workers" {
count = var.create && var.manage_worker_iam_resources ? 1 : 0
- name_prefix = local.cluster_name
+ name_prefix = aws_eks_cluster.this[0].name
role = aws_iam_role.workers[0].id
path = var.iam_path
From 7c24527293b1568ed98196d00e768bb6a368c1e6 Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Mon, 8 Nov 2021 15:37:13 -0500
Subject: [PATCH 09/83] refactor: remove aws-auth in favor of EKS provided auth
via API/CLI, move node group back to root
---
README.md | 26 +-
aws_auth.tf | 69 ---
data.tf | 14 -
docs/faq.md | 27 +-
examples/bottlerocket/README.md | 3 -
examples/bottlerocket/main.tf | 3 -
examples/bottlerocket/outputs.tf | 18 +-
examples/complete/README.md | 3 -
examples/complete/main.tf | 33 --
examples/complete/outputs.tf | 10 -
examples/fargate/README.md | 2 -
examples/fargate/main.tf | 3 -
examples/fargate/outputs.tf | 10 -
examples/instance_refresh/README.md | 2 -
examples/instance_refresh/outputs.tf | 10 -
examples/launch_templates/README.md | 2 -
examples/launch_templates/outputs.tf | 10 -
.../README.md | 2 -
.../outputs.tf | 10 -
examples/managed_node_groups/README.md | 9 +-
examples/managed_node_groups/main.tf | 4 -
examples/managed_node_groups/outputs.tf | 18 +-
examples/managed_node_groups/variables.tf | 48 --
examples/secrets_encryption/README.md | 2 -
examples/secrets_encryption/outputs.tf | 10 -
locals.tf | 4 +-
main.tf | 24 +-
modules/node_groups/README.md | 112 ----
modules/node_groups/launch_template.tf | 146 ------
modules/node_groups/locals.tf | 51 --
modules/node_groups/main.tf | 105 ----
modules/node_groups/outputs.tf | 14 -
modules/node_groups/templates/userdata.sh.tpl | 34 --
modules/node_groups/variables.tf | 65 ---
modules/node_groups/versions.tf | 8 -
outputs.tf | 50 +-
variables.tf | 115 +---
workers.tf | 491 ++++++++++--------
38 files changed, 310 insertions(+), 1257 deletions(-)
delete mode 100644 aws_auth.tf
delete mode 100644 modules/node_groups/README.md
delete mode 100644 modules/node_groups/launch_template.tf
delete mode 100644 modules/node_groups/locals.tf
delete mode 100644 modules/node_groups/main.tf
delete mode 100644 modules/node_groups/outputs.tf
delete mode 100644 modules/node_groups/templates/userdata.sh.tpl
delete mode 100644 modules/node_groups/variables.tf
delete mode 100644 modules/node_groups/versions.tf
diff --git a/README.md b/README.md
index f204e93487..2371dd99da 100644
--- a/README.md
+++ b/README.md
@@ -128,9 +128,6 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
| Name | Version |
|------|---------|
| [aws](#provider\_aws) | >= 3.56.0 |
-| [http](#provider\_http) | >= 2.4.1 |
-| [kubernetes](#provider\_kubernetes) | >= 1.11.1 |
-| [local](#provider\_local) | >= 1.4.0 |
| [tls](#provider\_tls) | >= 2.2.0 |
## Modules
@@ -138,7 +135,6 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
| Name | Source | Version |
|------|--------|---------|
| [fargate](#module\_fargate) | ./modules/fargate | n/a |
-| [node\_groups](#module\_node\_groups) | ./modules/node_groups | n/a |
## Resources
@@ -171,8 +167,6 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
| [aws_security_group_rule.workers_ingress_cluster_kubelet](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
| [aws_security_group_rule.workers_ingress_cluster_primary](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
| [aws_security_group_rule.workers_ingress_self](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [kubernetes_config_map.aws_auth](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/config_map) | resource |
-| [local_file.kubeconfig](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource |
| [aws_ami.eks_worker](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source |
| [aws_ami.eks_worker_windows](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source |
| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source |
@@ -180,7 +174,6 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
| [aws_iam_policy_document.cluster_assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
| [aws_iam_policy_document.workers_assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
| [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source |
-| [http_http.wait_for_cluster](https://registry.terraform.io/providers/terraform-aws-modules/http/latest/docs/data-sources/http) | data source |
| [tls_certificate.this](https://registry.terraform.io/providers/hashicorp/tls/latest/docs/data-sources/certificate) | data source |
## Inputs
@@ -188,7 +181,6 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
| Name | Description | Type | Default | Required |
|------|-------------|------|---------|:--------:|
| [attach\_worker\_cni\_policy](#input\_attach\_worker\_cni\_policy) | Whether to attach the Amazon managed `AmazonEKS_CNI_Policy` IAM policy to the default worker IAM role. WARNING: If set `false` the permissions must be assigned to the `aws-node` DaemonSet pods via another method or nodes will not be able to join the cluster | `bool` | `true` | no |
-| [aws\_auth\_additional\_labels](#input\_aws\_auth\_additional\_labels) | Additional kubernetes labels applied on aws-auth ConfigMap | `map(string)` | `{}` | no |
| [cluster\_create\_endpoint\_private\_access\_sg\_rule](#input\_cluster\_create\_endpoint\_private\_access\_sg\_rule) | Whether to create security group rules for the access to the Amazon EKS private API server endpoint. When is `true`, `cluster_endpoint_private_access_cidrs` must be setted | `bool` | `false` | no |
| [cluster\_egress\_cidrs](#input\_cluster\_egress\_cidrs) | List of CIDR blocks that are permitted for cluster egress traffic | `list(string)` |
[ "0.0.0.0/0" ]
| no |
| [cluster\_enabled\_log\_types](#input\_cluster\_enabled\_log\_types) | A list of the desired control plane logging to enable. For more information, see Amazon EKS Control Plane Logging documentation (https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html) | `list(string)` | `[]` | no |
@@ -218,7 +210,7 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
| [create](#input\_create) | Controls if EKS resources should be created (it affects almost all resources) | `bool` | `true` | no |
| [create\_cluster\_iam\_role](#input\_create\_cluster\_iam\_role) | Determines whether a cluster IAM role is created or to use an existing IAM role | `bool` | `true` | no |
| [create\_cluster\_security\_group](#input\_create\_cluster\_security\_group) | Whether to create a security group for the cluster or attach the cluster to `cluster_security_group_id` | `bool` | `true` | no |
-| [create\_fargate](#input\_create\_fargate) | Determines whether Fargate resources are created | `bool` | `true` | no |
+| [create\_fargate](#input\_create\_fargate) | Determines whether Fargate resources are created | `bool` | `false` | no |
| [create\_fargate\_pod\_execution\_role](#input\_create\_fargate\_pod\_execution\_role) | Controls if the EKS Fargate pod execution IAM role should be created | `bool` | `true` | no |
| [default\_platform](#input\_default\_platform) | Default platform name. Valid options are `linux` and `windows` | `string` | `"linux"` | no |
| [enable\_irsa](#input\_enable\_irsa) | Whether to create OpenID Connect Provider for EKS to enable IRSA | `bool` | `false` | no |
@@ -229,23 +221,15 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
| [fargate\_subnet\_ids](#input\_fargate\_subnet\_ids) | A list of subnet IDs to place Fargate workers within (if different from `subnet_ids`) | `list(string)` | `[]` | no |
| [fargate\_tags](#input\_fargate\_tags) | A map of additional tags to add to the Fargate resources created | `map(string)` | `{}` | no |
| [group\_default\_settings](#input\_group\_default\_settings) | Override default values for autoscaling group, node group settings | `any` | `{}` | no |
-| [iam\_instance\_profiles](#input\_iam\_instance\_profiles) | Map of instance profile definitions to create | `map(any)` | `{}` | no |
| [iam\_path](#input\_iam\_path) | If provided, all IAM roles will be created on this path | `string` | `"/"` | no |
| [kubeconfig\_api\_version](#input\_kubeconfig\_api\_version) | KubeConfig API version. Defaults to client.authentication.k8s.io/v1alpha1 | `string` | `"client.authentication.k8s.io/v1alpha1"` | no |
| [kubeconfig\_aws\_authenticator\_additional\_args](#input\_kubeconfig\_aws\_authenticator\_additional\_args) | Any additional arguments to pass to the authenticator such as the role to assume. e.g. ["-r", "MyEksRole"] | `list(string)` | `[]` | no |
| [kubeconfig\_aws\_authenticator\_command](#input\_kubeconfig\_aws\_authenticator\_command) | Command to use to fetch AWS EKS credentials | `string` | `"aws-iam-authenticator"` | no |
| [kubeconfig\_aws\_authenticator\_command\_args](#input\_kubeconfig\_aws\_authenticator\_command\_args) | Default arguments passed to the authenticator command. Defaults to [token -i $cluster\_name] | `list(string)` | `[]` | no |
| [kubeconfig\_aws\_authenticator\_env\_variables](#input\_kubeconfig\_aws\_authenticator\_env\_variables) | Environment variables that should be used when executing the authenticator. e.g. { AWS\_PROFILE = "eks"} | `map(string)` | `{}` | no |
-| [kubeconfig\_file\_permission](#input\_kubeconfig\_file\_permission) | File permission of the Kubectl config file containing cluster configuration saved to `kubeconfig_output_path.` | `string` | `"0600"` | no |
| [kubeconfig\_name](#input\_kubeconfig\_name) | Override the default name used for items kubeconfig | `string` | `""` | no |
-| [kubeconfig\_output\_path](#input\_kubeconfig\_output\_path) | Where to save the Kubectl config file (if `write_kubeconfig = true`). Assumed to be a directory if the value ends with a forward slash `/` | `string` | `"./"` | no |
| [launch\_templates](#input\_launch\_templates) | Map of launch template definitions to create | `map(any)` | `{}` | no |
-| [manage\_aws\_auth](#input\_manage\_aws\_auth) | Whether to apply the aws-auth configmap file | `bool` | `true` | no |
-| [manage\_cluster\_iam\_resources](#input\_manage\_cluster\_iam\_resources) | Whether to let the module manage cluster IAM resources. If set to false, `cluster_iam_role_arn` must be specified | `bool` | `true` | no |
| [manage\_worker\_iam\_resources](#input\_manage\_worker\_iam\_resources) | Whether to let the module manage worker IAM resources. If set to false, iam\_instance\_profile\_name must be specified for workers | `bool` | `true` | no |
-| [map\_accounts](#input\_map\_accounts) | Additional AWS account numbers to add to the aws-auth configmap | `list(string)` | `[]` | no |
-| [map\_roles](#input\_map\_roles) | Additional IAM roles to add to the aws-auth configmap |
| `[]` | no |
| [node\_groups](#input\_node\_groups) | Map of map of node groups to create. See `node_groups` module's documentation for more details | `any` | `{}` | no |
| [node\_groups\_defaults](#input\_node\_groups\_defaults) | Map of values to be applied to all node groups. See `node_groups` module's documentation for more details | `any` | `{}` | no |
| [openid\_connect\_audiences](#input\_openid\_connect\_audiences) | List of OpenID Connect audience client IDs to add to the IRSA provider | `list(string)` | `[]` | no |
@@ -253,14 +237,11 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
| [subnet\_ids](#input\_subnet\_ids) | A list of subnet IDs to place the EKS cluster and workers within | `list(string)` | `[]` | no |
| [tags](#input\_tags) | A map of tags to add to all resources. Tags added to launch configuration or templates override these values for ASG Tags only | `map(string)` | `{}` | no |
| [vpc\_id](#input\_vpc\_id) | ID of the VPC where the cluster and workers will be provisioned | `string` | `null` | no |
-| [wait\_for\_cluster\_timeout](#input\_wait\_for\_cluster\_timeout) | A timeout (in seconds) to wait for cluster to be available | `number` | `300` | no |
-| [worker\_additional\_security\_group\_ids](#input\_worker\_additional\_security\_group\_ids) | A list of additional security group ids to attach to worker instances | `list(string)` | `[]` | no |
| [worker\_ami\_name\_filter](#input\_worker\_ami\_name\_filter) | Name filter for AWS EKS worker AMI. If not provided, the latest official AMI for the specified 'cluster\_version' is used | `string` | `""` | no |
| [worker\_ami\_name\_filter\_windows](#input\_worker\_ami\_name\_filter\_windows) | Name filter for AWS EKS Windows worker AMI. If not provided, the latest official AMI for the specified 'cluster\_version' is used | `string` | `""` | no |
| [worker\_ami\_owner\_id](#input\_worker\_ami\_owner\_id) | The ID of the owner for the AMI to use for the AWS EKS workers. Valid values are an AWS account ID, 'self' (the current account), or an AWS owner alias (e.g. 'amazon', 'aws-marketplace', 'microsoft') | `string` | `"amazon"` | no |
| [worker\_ami\_owner\_id\_windows](#input\_worker\_ami\_owner\_id\_windows) | The ID of the owner for the AMI to use for the AWS EKS Windows workers. Valid values are an AWS account ID, 'self' (the current account), or an AWS owner alias (e.g. 'amazon', 'aws-marketplace', 'microsoft') | `string` | `"amazon"` | no |
| [worker\_create\_cluster\_primary\_security\_group\_rules](#input\_worker\_create\_cluster\_primary\_security\_group\_rules) | Whether to create security group rules to allow communication between pods on workers and pods using the primary cluster security group | `bool` | `false` | no |
-| [worker\_create\_initial\_lifecycle\_hooks](#input\_worker\_create\_initial\_lifecycle\_hooks) | Whether to create initial lifecycle hooks provided in worker groups | `bool` | `false` | no |
| [worker\_create\_security\_group](#input\_worker\_create\_security\_group) | Whether to create a security group for the workers or attach the workers to `worker_security_group_id` | `bool` | `true` | no |
| [worker\_groups](#input\_worker\_groups) | A map of maps defining worker group configurations to be defined using AWS Launch Template | `map(any)` | `{}` | no |
| [worker\_security\_group\_id](#input\_worker\_security\_group\_id) | If provided, all workers will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the EKS cluster | `string` | `""` | no |
@@ -268,7 +249,6 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
| [workers\_additional\_policies](#input\_workers\_additional\_policies) | Additional policies to be added to workers | `list(string)` | `[]` | no |
| [workers\_egress\_cidrs](#input\_workers\_egress\_cidrs) | List of CIDR blocks that are permitted for workers egress traffic | `list(string)` |
[ "0.0.0.0/0" ]
| no |
| [workers\_role\_name](#input\_workers\_role\_name) | User defined workers role name | `string` | `""` | no |
-| [write\_kubeconfig](#input\_write\_kubeconfig) | Whether to write a Kubectl config file containing the cluster configuration. Saved to `kubeconfig_output_path` | `bool` | `true` | no |
## Outputs
@@ -286,14 +266,10 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
| [cluster\_primary\_security\_group\_id](#output\_cluster\_primary\_security\_group\_id) | The cluster primary security group ID created by the EKS cluster on 1.14 or later. Referred to as 'Cluster security group' in the EKS console. |
| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Security group ID attached to the EKS cluster. On 1.14 or later, this is the 'Additional security groups' in the EKS console. |
| [cluster\_version](#output\_cluster\_version) | The Kubernetes server version for the EKS cluster. |
-| [config\_map\_aws\_auth](#output\_config\_map\_aws\_auth) | A kubernetes configuration to authenticate to this EKS cluster. |
| [fargate\_iam\_role\_arn](#output\_fargate\_iam\_role\_arn) | IAM role ARN for EKS Fargate pods |
| [fargate\_iam\_role\_name](#output\_fargate\_iam\_role\_name) | IAM role name for EKS Fargate pods |
| [fargate\_profile\_arns](#output\_fargate\_profile\_arns) | Amazon Resource Name (ARN) of the EKS Fargate Profiles. |
| [fargate\_profile\_ids](#output\_fargate\_profile\_ids) | EKS Cluster name and EKS Fargate Profile names separated by a colon (:). |
-| [kubeconfig](#output\_kubeconfig) | kubectl config file contents for this EKS cluster. Will block on cluster creation until the cluster is really ready. |
-| [kubeconfig\_filename](#output\_kubeconfig\_filename) | The filename of the generated kubectl config. Will block on cluster creation until the cluster is really ready. |
-| [node\_groups](#output\_node\_groups) | Outputs from EKS node groups. Map of maps, keyed by var.node\_groups keys |
| [oidc\_provider\_arn](#output\_oidc\_provider\_arn) | The ARN of the OIDC Provider if `enable_irsa = true`. |
| [security\_group\_rule\_cluster\_https\_worker\_ingress](#output\_security\_group\_rule\_cluster\_https\_worker\_ingress) | Security group rule responsible for allowing pods to communicate with the EKS cluster API. |
| [worker\_iam\_instance\_profile\_arns](#output\_worker\_iam\_instance\_profile\_arns) | default IAM instance profile ARN for EKS worker groups |
diff --git a/aws_auth.tf b/aws_auth.tf
deleted file mode 100644
index 2199cb2652..0000000000
--- a/aws_auth.tf
+++ /dev/null
@@ -1,69 +0,0 @@
-locals {
- auth_worker_roles = [
- for index in range(0, var.create ? local.worker_group_count : 0) : {
- worker_role_arn = "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:role/${element(
- coalescelist(
- aws_iam_instance_profile.workers.*.role,
- data.aws_iam_instance_profile.custom_worker_group_iam_instance_profile.*.role_name,
- [""]
- ),
- index,
- )}"
- platform = lookup(var.worker_groups[index], "platform", var.default_platform)
- }
- ]
-
- # Convert to format needed by aws-auth ConfigMap
- configmap_roles = [
- for role in concat(
- aws_iam_instance_profile.workers.*.role,
- module.node_groups.aws_auth_roles,
- [{ worker_role_arn = module.fargate.iam_role_arn, platform = "fargate" }],
- ) :
- {
- # Work around https://github.com/kubernetes-sigs/aws-iam-authenticator/issues/153
- # Strip the leading slash off so that Terraform doesn't think it's a regex
- rolearn = replace(role["worker_role_arn"], replace(var.iam_path, "/^//", ""), "")
- username = role["platform"] == "fargate" ? "system:node:{{SessionName}}" : "system:node:{{EC2PrivateDNSName}}"
- groups = tolist(concat(
- [
- "system:bootstrappers",
- "system:nodes",
- ],
- role["platform"] == "windows" ? ["eks:kube-proxy-windows"] : [],
- role["platform"] == "fargate" ? ["system:node-proxier"] : [],
- ))
- }
- ]
-}
-
-resource "kubernetes_config_map" "aws_auth" {
- count = var.create && var.manage_aws_auth ? 1 : 0
-
- metadata {
- name = "aws-auth"
- namespace = "kube-system"
- labels = merge(
- {
- "app.kubernetes.io/managed-by" = "Terraform"
- # / are replaced by . because label validator fails in this lib
- # https://github.com/kubernetes/apimachinery/blob/1bdd76d09076d4dc0362456e59c8f551f5f24a72/pkg/util/validation/validation.go#L166
- "terraform.io/module" = "terraform-aws-modules.eks.aws"
- },
- var.aws_auth_additional_labels
- )
- }
-
- data = {
- mapRoles = yamlencode(
- distinct(concat(
- local.configmap_roles,
- var.map_roles,
- ))
- )
- mapUsers = yamlencode(var.map_users)
- mapAccounts = yamlencode(var.map_accounts)
- }
-
- depends_on = [data.http.wait_for_cluster[0]]
-}
diff --git a/data.tf b/data.tf
index a1e87557c4..4cf387da29 100644
--- a/data.tf
+++ b/data.tf
@@ -47,17 +47,3 @@ data "aws_ami" "eks_worker_windows" {
owners = [var.worker_ami_owner_id_windows]
}
-
-data "http" "wait_for_cluster" {
- count = var.create && var.manage_aws_auth ? 1 : 0
-
- url = format("%s/healthz", aws_eks_cluster.this[0].endpoint)
- ca_certificate = base64decode(local.cluster_auth_base64)
- timeout = var.wait_for_cluster_timeout
-
- depends_on = [
- aws_eks_cluster.this,
- aws_security_group_rule.cluster_private_access_sg_source,
- aws_security_group_rule.cluster_private_access_cidrs_source,
- ]
-}
diff --git a/docs/faq.md b/docs/faq.md
index 6fef297060..7f3dc718d6 100644
--- a/docs/faq.md
+++ b/docs/faq.md
@@ -31,40 +31,17 @@ When the private endpoint is enabled ensure that VPC DNS resolution and hostname
Nodes need to be able to connect to other AWS services plus pull down container images from repos. If for some reason you cannot enable public internet access for nodes you can add VPC endpoints to the relevant services: EC2 API, ECR API, ECR DKR and S3.
-### `aws-auth` ConfigMap not present
-
-The module configures the `aws-auth` ConfigMap. This is used by the cluster to grant IAM users and roles RBAC permissions in the cluster, like the IAM role assigned to the worker nodes.
-
-Confirm that the ConfigMap matches the contents of the `config_map_aws_auth` module output. You can retrieve the live config by running the following in your terraform folder:
-`kubectl --kubeconfig=kubeconfig_* -n kube-system get cm aws-auth -o yaml`
-
-If the ConfigMap is missing or the contents are incorrect then ensure that you have properly configured the kubernetes provider block by referring to [README.md](https://github.com/terraform-aws-modules/terraform-aws-eks/#usage-example) and run `terraform apply` again.
-
-Users with `manage_aws_auth = false` will need to apply the ConfigMap themselves.
-
## How can I work with the cluster if I disable the public endpoint?
You have to interact with the cluster from within the VPC that it's associated with, from an instance that's allowed access via the cluster's security group.
Creating a new cluster with the public endpoint disabled is harder to achieve. You will either want to pass in a pre-configured cluster security group or apply the `aws-auth` configmap in a separate action.
-## ConfigMap "aws-auth" already exists
-
-This can happen if the kubernetes provider has not been configured for use with the cluster. The kubernetes provider will be accessing your default kubernetes cluster which already has the map defined. Read [README.md](https://github.com/terraform-aws-modules/terraform-aws-eks/#usage-example) for more details on how to configure the kubernetes provider correctly.
-
-Users upgrading from modules before 8.0.0 will need to import their existing aws-auth ConfigMap in to the terraform state. See 8.0.0's [CHANGELOG](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/v8.0.0/CHANGELOG.md#v800---2019-12-11) for more details.
-
-## `Error: Get http://localhost/api/v1/namespaces/kube-system/configmaps/aws-auth: dial tcp 127.0.0.1:80: connect: connection refused`
-
-Usually this means that the kubernetes provider has not been configured, there is no default `~/.kube/config` and so the kubernetes provider is attempting to talk to localhost.
-
-You need to configure the kubernetes provider correctly. See [README.md](https://github.com/terraform-aws-modules/terraform-aws-eks/#usage-example) for more details.
-
## How can I stop Terraform from removing the EKS tags from my VPC and subnets?
You need to add the tags to the VPC and subnets yourself. See the [basic example](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/basic).
-An alternative is to use the aws provider's [`ignore_tags` variable](https://www.terraform.io/docs/providers/aws/#ignore\_tags-configuration-block). However this can also cause terraform to display a perpetual difference.
+An alternative is to use the aws provider's [`ignore_tags` variable](https://www.terraform.io/docs/providers/aws/#ignore_tags-configuration-block). However this can also cause terraform to display a perpetual difference.
## How do I safely remove old worker groups?
@@ -232,4 +209,4 @@ module "eks" {
}
}
}
-```
\ No newline at end of file
+```
diff --git a/examples/bottlerocket/README.md b/examples/bottlerocket/README.md
index 2588dbe182..4d8b24b8be 100644
--- a/examples/bottlerocket/README.md
+++ b/examples/bottlerocket/README.md
@@ -69,7 +69,4 @@ No inputs.
|------|-------------|
| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for EKS control plane. |
| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Security group ids attached to the cluster control plane. |
-| [config\_map\_aws\_auth](#output\_config\_map\_aws\_auth) | A kubernetes configuration to authenticate to this EKS cluster. |
-| [kubectl\_config](#output\_kubectl\_config) | kubectl config as generated by the module. |
-| [node\_groups](#output\_node\_groups) | Outputs from node groups |
diff --git a/examples/bottlerocket/main.tf b/examples/bottlerocket/main.tf
index 94e21c1f2e..2fcab6f85f 100644
--- a/examples/bottlerocket/main.tf
+++ b/examples/bottlerocket/main.tf
@@ -25,9 +25,6 @@ module "eks" {
cluster_endpoint_private_access = true
cluster_endpoint_public_access = true
- write_kubeconfig = false
- manage_aws_auth = true
-
worker_groups = {
one = {
name = "bottlerocket-nodes"
diff --git a/examples/bottlerocket/outputs.tf b/examples/bottlerocket/outputs.tf
index 10a3a96604..8ea0263436 100644
--- a/examples/bottlerocket/outputs.tf
+++ b/examples/bottlerocket/outputs.tf
@@ -8,17 +8,7 @@ output "cluster_security_group_id" {
value = module.eks.cluster_security_group_id
}
-output "kubectl_config" {
- description = "kubectl config as generated by the module."
- value = module.eks.kubeconfig
-}
-
-output "config_map_aws_auth" {
- description = "A kubernetes configuration to authenticate to this EKS cluster."
- value = module.eks.config_map_aws_auth
-}
-
-output "node_groups" {
- description = "Outputs from node groups"
- value = module.eks.node_groups
-}
+# output "node_groups" {
+# description = "Outputs from node groups"
+# value = module.eks.node_groups
+# }
diff --git a/examples/complete/README.md b/examples/complete/README.md
index 3463ed1e30..505251c50f 100644
--- a/examples/complete/README.md
+++ b/examples/complete/README.md
@@ -42,7 +42,6 @@ Note that this example may create resources which cost money. Run `terraform des
|------|--------|---------|
| [disabled\_eks](#module\_disabled\_eks) | ../.. | n/a |
| [disabled\_fargate](#module\_disabled\_fargate) | ../../modules/fargate | n/a |
-| [disabled\_node\_groups](#module\_disabled\_node\_groups) | ../../modules/node_groups | n/a |
| [eks](#module\_eks) | ../.. | n/a |
| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 |
@@ -68,7 +67,5 @@ No inputs.
|------|-------------|
| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for EKS control plane. |
| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Security group ids attached to the cluster control plane. |
-| [config\_map\_aws\_auth](#output\_config\_map\_aws\_auth) | A kubernetes configuration to authenticate to this EKS cluster. |
| [kubectl\_config](#output\_kubectl\_config) | kubectl config as generated by the module. |
-| [node\_groups](#output\_node\_groups) | Outputs from node groups |
diff --git a/examples/complete/main.tf b/examples/complete/main.tf
index bf4147158e..d5fc73e578 100644
--- a/examples/complete/main.tf
+++ b/examples/complete/main.tf
@@ -104,33 +104,6 @@ module "eks" {
}
}
- # AWS Auth (kubernetes_config_map)
- map_roles = [
- {
- rolearn = "arn:aws:iam::66666666666:role/role1"
- username = "role1"
- groups = ["system:masters"]
- },
- ]
-
- map_users = [
- {
- userarn = "arn:aws:iam::66666666666:user/user1"
- username = "user1"
- groups = ["system:masters"]
- },
- {
- userarn = "arn:aws:iam::66666666666:user/user2"
- username = "user2"
- groups = ["system:masters"]
- },
- ]
-
- map_accounts = [
- "777777777777",
- "888888888888",
- ]
-
tags = {
Example = local.name
GithubRepo = "terraform-aws-eks"
@@ -154,12 +127,6 @@ module "disabled_fargate" {
create_fargate_pod_execution_role = false
}
-module "disabled_node_groups" {
- source = "../../modules/node_groups"
-
- create = false
-}
-
################################################################################
# Kubernetes provider configuration
################################################################################
diff --git a/examples/complete/outputs.tf b/examples/complete/outputs.tf
index 10a3a96604..fcc55c0596 100644
--- a/examples/complete/outputs.tf
+++ b/examples/complete/outputs.tf
@@ -12,13 +12,3 @@ output "kubectl_config" {
description = "kubectl config as generated by the module."
value = module.eks.kubeconfig
}
-
-output "config_map_aws_auth" {
- description = "A kubernetes configuration to authenticate to this EKS cluster."
- value = module.eks.config_map_aws_auth
-}
-
-output "node_groups" {
- description = "Outputs from node groups"
- value = module.eks.node_groups
-}
diff --git a/examples/fargate/README.md b/examples/fargate/README.md
index 7f2ff38d0e..9757e410f5 100644
--- a/examples/fargate/README.md
+++ b/examples/fargate/README.md
@@ -62,7 +62,5 @@ No inputs.
|------|-------------|
| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for EKS control plane. |
| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Security group ids attached to the cluster control plane. |
-| [config\_map\_aws\_auth](#output\_config\_map\_aws\_auth) | A kubernetes configuration to authenticate to this EKS cluster. |
| [fargate\_profile\_arns](#output\_fargate\_profile\_arns) | Outputs from node groups |
-| [kubectl\_config](#output\_kubectl\_config) | kubectl config as generated by the module. |
diff --git a/examples/fargate/main.tf b/examples/fargate/main.tf
index d4b21032c9..77320fcd67 100644
--- a/examples/fargate/main.tf
+++ b/examples/fargate/main.tf
@@ -97,8 +97,6 @@ module "eks" {
}
}
- manage_aws_auth = false
-
tags = {
Example = local.name
GithubRepo = "terraform-aws-eks"
@@ -106,7 +104,6 @@ module "eks" {
}
}
-
##############################################
# Calling submodule with existing EKS cluster
##############################################
diff --git a/examples/fargate/outputs.tf b/examples/fargate/outputs.tf
index b7f23eeaf2..4d4e2ecaff 100644
--- a/examples/fargate/outputs.tf
+++ b/examples/fargate/outputs.tf
@@ -8,16 +8,6 @@ output "cluster_security_group_id" {
value = module.eks.cluster_security_group_id
}
-output "kubectl_config" {
- description = "kubectl config as generated by the module."
- value = module.eks.kubeconfig
-}
-
-output "config_map_aws_auth" {
- description = "A kubernetes configuration to authenticate to this EKS cluster."
- value = module.eks.config_map_aws_auth
-}
-
output "fargate_profile_arns" {
description = "Outputs from node groups"
value = module.eks.fargate_profile_arns
diff --git a/examples/instance_refresh/README.md b/examples/instance_refresh/README.md
index eb5af76913..db4185fb51 100644
--- a/examples/instance_refresh/README.md
+++ b/examples/instance_refresh/README.md
@@ -75,8 +75,6 @@ No inputs.
|------|-------------|
| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for EKS control plane. |
| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Security group ids attached to the cluster control plane. |
-| [config\_map\_aws\_auth](#output\_config\_map\_aws\_auth) | A kubernetes configuration to authenticate to this EKS cluster. |
-| [kubectl\_config](#output\_kubectl\_config) | kubectl config as generated by the module. |
| [sqs\_queue\_asg\_notification\_arn](#output\_sqs\_queue\_asg\_notification\_arn) | SQS queue ASG notification ARN |
| [sqs\_queue\_asg\_notification\_url](#output\_sqs\_queue\_asg\_notification\_url) | SQS queue ASG notification URL |
diff --git a/examples/instance_refresh/outputs.tf b/examples/instance_refresh/outputs.tf
index a3dd033616..1addebfa63 100644
--- a/examples/instance_refresh/outputs.tf
+++ b/examples/instance_refresh/outputs.tf
@@ -8,16 +8,6 @@ output "cluster_security_group_id" {
value = module.eks.cluster_security_group_id
}
-output "kubectl_config" {
- description = "kubectl config as generated by the module."
- value = module.eks.kubeconfig
-}
-
-output "config_map_aws_auth" {
- description = "A kubernetes configuration to authenticate to this EKS cluster."
- value = module.eks.config_map_aws_auth
-}
-
output "sqs_queue_asg_notification_arn" {
description = "SQS queue ASG notification ARN"
value = module.aws_node_termination_handler_sqs.sqs_queue_arn
diff --git a/examples/launch_templates/README.md b/examples/launch_templates/README.md
index 007ec04fcd..90650cd89e 100644
--- a/examples/launch_templates/README.md
+++ b/examples/launch_templates/README.md
@@ -60,6 +60,4 @@ No inputs.
|------|-------------|
| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for EKS control plane. |
| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Security group ids attached to the cluster control plane. |
-| [config\_map\_aws\_auth](#output\_config\_map\_aws\_auth) | A kubernetes configuration to authenticate to this EKS cluster. |
-| [kubectl\_config](#output\_kubectl\_config) | kubectl config as generated by the module. |
diff --git a/examples/launch_templates/outputs.tf b/examples/launch_templates/outputs.tf
index b778ec7926..256cb0b74e 100644
--- a/examples/launch_templates/outputs.tf
+++ b/examples/launch_templates/outputs.tf
@@ -7,13 +7,3 @@ output "cluster_security_group_id" {
description = "Security group ids attached to the cluster control plane."
value = module.eks.cluster_security_group_id
}
-
-output "kubectl_config" {
- description = "kubectl config as generated by the module."
- value = module.eks.kubeconfig
-}
-
-output "config_map_aws_auth" {
- description = "A kubernetes configuration to authenticate to this EKS cluster."
- value = module.eks.config_map_aws_auth
-}
diff --git a/examples/launch_templates_with_managed_node_groups/README.md b/examples/launch_templates_with_managed_node_groups/README.md
index 5475700b82..b877b58a9d 100644
--- a/examples/launch_templates_with_managed_node_groups/README.md
+++ b/examples/launch_templates_with_managed_node_groups/README.md
@@ -65,6 +65,4 @@ No inputs.
|------|-------------|
| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for EKS control plane. |
| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Security group ids attached to the cluster control plane. |
-| [config\_map\_aws\_auth](#output\_config\_map\_aws\_auth) | A kubernetes configuration to authenticate to this EKS cluster. |
-| [kubectl\_config](#output\_kubectl\_config) | kubectl config as generated by the module. |
diff --git a/examples/launch_templates_with_managed_node_groups/outputs.tf b/examples/launch_templates_with_managed_node_groups/outputs.tf
index 359db3a481..440cd0f723 100644
--- a/examples/launch_templates_with_managed_node_groups/outputs.tf
+++ b/examples/launch_templates_with_managed_node_groups/outputs.tf
@@ -7,13 +7,3 @@ output "cluster_security_group_id" {
description = "Security group ids attached to the cluster control plane."
value = module.eks.cluster_security_group_id
}
-
-output "kubectl_config" {
- description = "kubectl config as generated by the module."
- value = module.eks.kubeconfig
-}
-
-output "config_map_aws_auth" {
- description = "A kubernetes configuration to authenticate to this EKS cluster."
- value = module.eks.config_map_aws_auth
-}
diff --git a/examples/managed_node_groups/README.md b/examples/managed_node_groups/README.md
index 9c9732191e..fda54c8aa0 100644
--- a/examples/managed_node_groups/README.md
+++ b/examples/managed_node_groups/README.md
@@ -55,11 +55,7 @@ Note that this example may create resources which cost money. Run `terraform des
## Inputs
-| Name | Description | Type | Default | Required |
-|------|-------------|------|---------|:--------:|
-| [map\_accounts](#input\_map\_accounts) | Additional AWS account numbers to add to the aws-auth configmap. | `list(string)` |
[ "777777777777", "888888888888" ]
| no |
-| [map\_roles](#input\_map\_roles) | Additional IAM roles to add to the aws-auth configmap. |
| no |
+No inputs.
## Outputs
@@ -67,7 +63,4 @@ Note that this example may create resources which cost money. Run `terraform des
|------|-------------|
| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for EKS control plane. |
| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Security group ids attached to the cluster control plane. |
-| [config\_map\_aws\_auth](#output\_config\_map\_aws\_auth) | A kubernetes configuration to authenticate to this EKS cluster. |
-| [kubectl\_config](#output\_kubectl\_config) | kubectl config as generated by the module. |
-| [node\_groups](#output\_node\_groups) | Outputs from node groups |
diff --git a/examples/managed_node_groups/main.tf b/examples/managed_node_groups/main.tf
index 0eb4176696..3129207a44 100644
--- a/examples/managed_node_groups/main.tf
+++ b/examples/managed_node_groups/main.tf
@@ -76,10 +76,6 @@ module "eks" {
}
}
- map_roles = var.map_roles
- map_users = var.map_users
- map_accounts = var.map_accounts
-
tags = {
Example = local.name
GithubRepo = "terraform-aws-eks"
diff --git a/examples/managed_node_groups/outputs.tf b/examples/managed_node_groups/outputs.tf
index 10a3a96604..8ea0263436 100644
--- a/examples/managed_node_groups/outputs.tf
+++ b/examples/managed_node_groups/outputs.tf
@@ -8,17 +8,7 @@ output "cluster_security_group_id" {
value = module.eks.cluster_security_group_id
}
-output "kubectl_config" {
- description = "kubectl config as generated by the module."
- value = module.eks.kubeconfig
-}
-
-output "config_map_aws_auth" {
- description = "A kubernetes configuration to authenticate to this EKS cluster."
- value = module.eks.config_map_aws_auth
-}
-
-output "node_groups" {
- description = "Outputs from node groups"
- value = module.eks.node_groups
-}
+# output "node_groups" {
+# description = "Outputs from node groups"
+# value = module.eks.node_groups
+# }
diff --git a/examples/managed_node_groups/variables.tf b/examples/managed_node_groups/variables.tf
index 57853d8b4d..e69de29bb2 100644
--- a/examples/managed_node_groups/variables.tf
+++ b/examples/managed_node_groups/variables.tf
@@ -1,48 +0,0 @@
-variable "map_accounts" {
- description = "Additional AWS account numbers to add to the aws-auth configmap."
- type = list(string)
-
- default = [
- "777777777777",
- "888888888888",
- ]
-}
-
-variable "map_roles" {
- description = "Additional IAM roles to add to the aws-auth configmap."
- type = list(object({
- rolearn = string
- username = string
- groups = list(string)
- }))
-
- default = [
- {
- rolearn = "arn:aws:iam::66666666666:role/role1"
- username = "role1"
- groups = ["system:masters"]
- },
- ]
-}
-
-variable "map_users" {
- description = "Additional IAM users to add to the aws-auth configmap."
- type = list(object({
- userarn = string
- username = string
- groups = list(string)
- }))
-
- default = [
- {
- userarn = "arn:aws:iam::66666666666:user/user1"
- username = "user1"
- groups = ["system:masters"]
- },
- {
- userarn = "arn:aws:iam::66666666666:user/user2"
- username = "user2"
- groups = ["system:masters"]
- },
- ]
-}
diff --git a/examples/secrets_encryption/README.md b/examples/secrets_encryption/README.md
index 6223e8570a..567250d548 100644
--- a/examples/secrets_encryption/README.md
+++ b/examples/secrets_encryption/README.md
@@ -61,6 +61,4 @@ No inputs.
|------|-------------|
| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for EKS control plane. |
| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Security group ids attached to the cluster control plane. |
-| [config\_map\_aws\_auth](#output\_config\_map\_aws\_auth) | A kubernetes configuration to authenticate to this EKS cluster. |
-| [kubectl\_config](#output\_kubectl\_config) | kubectl config as generated by the module. |
diff --git a/examples/secrets_encryption/outputs.tf b/examples/secrets_encryption/outputs.tf
index 359db3a481..440cd0f723 100644
--- a/examples/secrets_encryption/outputs.tf
+++ b/examples/secrets_encryption/outputs.tf
@@ -7,13 +7,3 @@ output "cluster_security_group_id" {
description = "Security group ids attached to the cluster control plane."
value = module.eks.cluster_security_group_id
}
-
-output "kubectl_config" {
- description = "kubectl config as generated by the module."
- value = module.eks.kubeconfig
-}
-
-output "config_map_aws_auth" {
- description = "A kubernetes configuration to authenticate to this EKS cluster."
- value = module.eks.config_map_aws_auth
-}
diff --git a/locals.tf b/locals.tf
index a2ffeb25e7..ee8be22388 100644
--- a/locals.tf
+++ b/locals.tf
@@ -26,7 +26,7 @@ locals {
cluster_auth_base64 = local.cluster_auth_base64
aws_authenticator_kubeconfig_apiversion = var.kubeconfig_api_version
aws_authenticator_command = var.kubeconfig_aws_authenticator_command
- aws_authenticator_command_args = coalescelist(var.kubeconfig_aws_authenticator_command_args, ["token", "-i", local.cluster_name])
+ aws_authenticator_command_args = coalescelist(var.kubeconfig_aws_authenticator_command_args, ["token", "-i", var.cluster_name])
aws_authenticator_additional_args = var.kubeconfig_aws_authenticator_additional_args
aws_authenticator_env_variables = var.kubeconfig_aws_authenticator_env_variables
}) : ""
@@ -41,7 +41,7 @@ locals {
),
merge({
platform = lookup(group, "platform", var.default_platform)
- cluster_name = local.cluster_name
+ cluster_name = var.cluster_name
endpoint = local.cluster_endpoint
cluster_auth_base64 = local.cluster_auth_base64
pre_userdata = lookup(group, "pre_userdata", "")
diff --git a/main.tf b/main.tf
index 4d0076786e..5863e2f0c4 100644
--- a/main.tf
+++ b/main.tf
@@ -1,6 +1,7 @@
################################################################################
# Cluster
################################################################################
+
resource "aws_eks_cluster" "this" {
count = var.create ? 1 : 0
@@ -46,9 +47,7 @@ resource "aws_eks_cluster" "this" {
depends_on = [
aws_security_group_rule.cluster_egress_internet,
aws_security_group_rule.cluster_https_worker_ingress,
- aws_iam_role_policy_attachment.cluster_AmazonEKSClusterPolicy,
- aws_iam_role_policy_attachment.cluster_AmazonEKSServicePolicy,
- aws_iam_role_policy_attachment.cluster_AmazonEKSVPCResourceControllerPolicy,
+ aws_iam_role_policy_attachment.cluster_additional,
aws_cloudwatch_log_group.this
]
}
@@ -138,19 +137,6 @@ resource "aws_security_group_rule" "cluster_private_access_sg_source" {
security_group_id = aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id
}
-################################################################################
-# Kubeconfig
-################################################################################
-
-resource "local_file" "kubeconfig" {
- count = var.create && var.write_kubeconfig ? 1 : 0
-
- content = local.kubeconfig
- filename = substr(var.kubeconfig_output_path, -1, 1) == "/" ? "${var.kubeconfig_output_path}kubeconfig_${var.cluster_name}" : var.kubeconfig_output_path
- file_permission = var.kubeconfig_file_permission
- directory_permission = "0755"
-}
-
################################################################################
# IRSA
################################################################################
@@ -158,7 +144,7 @@ resource "local_file" "kubeconfig" {
data "tls_certificate" "this" {
count = var.create && var.enable_irsa ? 1 : 0
- url = aws_eks_cluster.this.identity[0].oidc[0].issuer
+ url = aws_eks_cluster.this[0].identity[0].oidc[0].issuer
}
resource "aws_iam_openid_connect_provider" "oidc_provider" {
@@ -166,7 +152,7 @@ resource "aws_iam_openid_connect_provider" "oidc_provider" {
client_id_list = distinct(compact(concat([local.sts_principal], var.openid_connect_audiences)))
thumbprint_list = [data.tls_certificate.this[0].certificates[0].sha1_fingerprint]
- url = aws_eks_cluster.this.identity[0].oidc[0].issuer
+ url = aws_eks_cluster.this[0].identity[0].oidc[0].issuer
tags = merge(
{
@@ -251,7 +237,7 @@ resource "aws_iam_policy" "cluster_additional" {
description = "Additional permissions for EKS cluster"
policy = data.aws_iam_policy_document.cluster_additional[0].json
- tags = merge(var.tags, var.cluster_role_iam_tags)
+ tags = merge(var.tags, var.cluster_iam_role_tags)
}
resource "aws_iam_role_policy_attachment" "cluster_additional" {
diff --git a/modules/node_groups/README.md b/modules/node_groups/README.md
deleted file mode 100644
index 3abbbddd57..0000000000
--- a/modules/node_groups/README.md
+++ /dev/null
@@ -1,112 +0,0 @@
-# EKS `node_groups` submodule
-
-Helper submodule to create and manage resources related to `eks_node_groups`.
-
-## Node Groups' IAM Role
-
-The role ARN specified in `var.default_iam_role_arn` will be used by default. In a simple configuration this will be the worker role created by the parent module.
-
-`iam_role_arn` must be specified in either `var.node_groups_defaults` or `var.node_groups` if the default parent IAM role is not being created for whatever reason, for example if `manage_worker_iam_resources` is set to false in the parent.
-
-## `node_groups` and `node_groups_defaults` keys
-`node_groups_defaults` is a map that can take the below keys. Values will be used if not specified in individual node groups.
-
-`node_groups` is a map of maps. Key of first level will be used as unique value for `for_each` resources and in the `aws_eks_node_group` name. Inner map can take the below values.
-
-| Name | Description | Type | If unset |
-|------|-------------|:----:|:-----:|
-| additional\_tags | Additional tags to apply to node group | map(string) | Only `var.tags` applied |
-| ami\_release\_version | AMI version of workers | string | Provider default behavior |
-| ami\_type | AMI Type. See Terraform or AWS docs | string | Provider default behavior |
-| ami\_id | ID of custom AMI. If you use a custom AMI, you need to set `ami_is_eks_optimized` | string | Provider default behavior |
-| ami\_is\_eks\_optimized | If the custom AMI is an EKS optimised image, ignored if `ami_id` is not set. If this is `true` then `bootstrap.sh` is called automatically (max pod logic needs to be manually set), if this is `false` you need to provide all the node configuration in `pre_userdata` | bool | `true` |
-| capacity\_type | Type of instance capacity to provision. Options are `ON_DEMAND` and `SPOT` | string | Provider default behavior |
-| create_launch_template | Create and use a default launch template | bool | `false` |
-| desired\_capacity | Desired number of workers | number | `var.workers_group_defaults[asg_desired_capacity]` |
-| disk\_encrypted | Whether the root disk will be encrypyted. Requires `create_launch_template` to be `true` and `disk_kms_key_id` to be set | bool | false |
-| disk\_kms\_key\_id | KMS Key used to encrypt the root disk. Requires both `create_launch_template` and `disk_encrypted` to be `true` | string | "" |
-| disk\_size | Workers' disk size | number | Provider default behavior |
-| disk\_type | Workers' disk type. Require `create_launch_template` to be `true`| string | Provider default behavior |
-| disk\_throughput | Workers' disk throughput. Require `create_launch_template` to be `true` and `disk_type` to be `gp3`| number | Provider default behavior |
-| disk\_iops | Workers' disk IOPS. Require `create_launch_template` to be `true` and `disk_type` to be `gp3`| number | Provider default behavior |
-| ebs\_optimized | Enables/disables EBS optimization. Require `create_launch_template` to be `true` | bool | `true` if defined `instance\_types` are not present in `var.ebs\_optimized\_not\_supported` |
-| enable_monitoring | Enables/disables detailed monitoring. Require `create_launch_template` to be `true`| bool | `true` |
-| eni_delete | Delete the Elastic Network Interface (ENI) on termination (if set to false you will have to manually delete before destroying) | bool | `true` |
-| force\_update\_version | Force version update if existing pods are unable to be drained due to a pod disruption budget issue. | bool | Provider default behavior |
-| iam\_role\_arn | IAM role ARN for workers | string | `var.default_iam_role_arn` |
-| instance\_types | Node group's instance type(s). Multiple types can be specified when `capacity_type="SPOT"`. | list | `[var.workers_group_defaults[instance_type]]` |
-| k8s\_labels | Kubernetes labels | map(string) | No labels applied |
-| key\_name | Key name for workers. Set to empty string to disable remote access | string | `var.workers_group_defaults[key_name]` |
-| bootstrap_env | Provide environment variables to customise [bootstrap.sh](https://github.com/awslabs/amazon-eks-ami/blob/master/files/bootstrap.sh). Require `create_launch_template` to be `true` | map(string) | `{}` |
-| kubelet_extra_args | Extra arguments for kubelet, this is automatically merged with `labels`. Require `create_launch_template` to be `true` | string | "" |
-| launch_template_id | The id of a aws_launch_template to use | string | No LT used |
-| launch\_template_version | The version of the LT to use | string | none |
-| max\_capacity | Max number of workers | number | `var.workers_group_defaults[asg_max_size]` |
-| min\_capacity | Min number of workers | number | `var.workers_group_defaults[asg_min_size]` |
-| update_config.max\_unavailable\_percentage | Max percentage of unavailable nodes during update. (e.g. 25, 50, etc) | number | `null` if `update_config.max_unavailable` is set |
-| update_config.max\_unavailable | Max number of unavailable nodes during update | number | `null` if `update_config.max_unavailable_percentage` is set |
-| name | Name of the node group. If you don't really need this, we recommend you to use `name_prefix` instead. | string | Will use the autogenerate name prefix |
-| name_prefix | Name prefix of the node group | string | Auto generated |
-| pre_userdata | userdata to pre-append to the default userdata. Require `create_launch_template` to be `true`| string | "" |
-| public_ip | Associate a public ip address with a worker. Require `create_launch_template` to be `true`| string | `false`
-| source\_security\_group\_ids | Source security groups for remote access to workers | list(string) | If key\_name is specified: THE REMOTE ACCESS WILL BE OPENED TO THE WORLD |
-| subnets | Subnets to contain workers | list(string) | `var.workers_group_defaults[subnets]` |
-| version | Kubernetes version | string | Provider default behavior |
-| taints | Kubernetes node taints | list(map) | empty |
-| timeouts | A map of timeouts for create/update/delete operations. | `map(string)` | Provider default behavior |
-| update_default_version | Whether or not to set the new launch template version the Default | bool | `true` |
-| metadata_http_endpoint | The state of the instance metadata service. Requires `create_launch_template` to be `true` | string | `var.workers_group_defaults[metadata_http_endpoint]` |
-| metadata_http_tokens | If session tokens are required. Requires `create_launch_template` to be `true` | string | `var.workers_group_defaults[metadata_http_tokens]` |
-| metadata_http_put_response_hop_limit | The desired HTTP PUT response hop limit for instance metadata requests. Requires `create_launch_template` to be `true` | number | `var.workers_group_defaults[metadata_http_put_response_hop_limit]` |
-
-
-## Requirements
-
-| Name | Version |
-|------|---------|
-| [terraform](#requirement\_terraform) | >= 0.13.1 |
-| [aws](#requirement\_aws) | >= 3.56.0 |
-| [cloudinit](#requirement\_cloudinit) | >= 2.0 |
-
-## Providers
-
-| Name | Version |
-|------|---------|
-| [aws](#provider\_aws) | >= 3.56.0 |
-| [cloudinit](#provider\_cloudinit) | >= 2.0 |
-
-## Modules
-
-No modules.
-
-## Resources
-
-| Name | Type |
-|------|------|
-| [aws_eks_node_group.workers](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_node_group) | resource |
-| [aws_launch_template.workers](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource |
-| [cloudinit_config.workers_userdata](https://registry.terraform.io/providers/hashicorp/cloudinit/latest/docs/data-sources/config) | data source |
-
-## Inputs
-
-| Name | Description | Type | Default | Required |
-|------|-------------|------|---------|:--------:|
-| [cluster\_auth\_base64](#input\_cluster\_auth\_base64) | Base64 encoded CA of parent cluster | `string` | `""` | no |
-| [cluster\_endpoint](#input\_cluster\_endpoint) | Endpoint of parent cluster | `string` | `""` | no |
-| [cluster\_name](#input\_cluster\_name) | Name of parent cluster | `string` | `""` | no |
-| [create](#input\_create) | Controls if EKS resources should be created (it affects almost all resources) | `bool` | `true` | no |
-| [default\_iam\_role\_arn](#input\_default\_iam\_role\_arn) | ARN of the default IAM worker role to use if one is not specified in `var.node_groups` or `var.node_groups_defaults` | `string` | `""` | no |
-| [node\_default\_settings](#input\_node\_default\_settings) | Node group defaults from parent | `any` | `{}` | no |
-| [node\_groups](#input\_node\_groups) | Map of maps of `eks_node_groups` to create. See "`node_groups` and `node_groups_defaults` keys" section in README.md for more details | `any` | `{}` | no |
-| [node\_groups\_defaults](#input\_node\_groups\_defaults) | map of maps of node groups to create. See "`node_groups` and `node_groups_defaults` keys" section in README.md for more details | `any` | `{}` | no |
-| [tags](#input\_tags) | A map of tags to add to all resources | `map(string)` | `{}` | no |
-| [worker\_additional\_security\_group\_ids](#input\_worker\_additional\_security\_group\_ids) | A list of additional security group ids to attach to worker instances | `list(string)` | `[]` | no |
-| [worker\_security\_group\_id](#input\_worker\_security\_group\_id) | If provided, all workers will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the EKS cluster. | `string` | `""` | no |
-
-## Outputs
-
-| Name | Description |
-|------|-------------|
-| [aws\_auth\_roles](#output\_aws\_auth\_roles) | Roles for use in aws-auth ConfigMap |
-| [node\_groups](#output\_node\_groups) | Outputs from EKS node groups. Map of maps, keyed by `var.node_groups` keys. See `aws_eks_node_group` Terraform documentation for values |
-
diff --git a/modules/node_groups/launch_template.tf b/modules/node_groups/launch_template.tf
deleted file mode 100644
index 3972f8f005..0000000000
--- a/modules/node_groups/launch_template.tf
+++ /dev/null
@@ -1,146 +0,0 @@
-data "cloudinit_config" "workers_userdata" {
- for_each = { for k, v in local.node_groups_expanded : k => v if v["create_launch_template"] }
-
- gzip = false
- base64_encode = true
- boundary = "//"
-
- part {
- content_type = "text/x-shellscript"
- content = templatefile("${path.module}/templates/userdata.sh.tpl",
- {
- cluster_name = var.cluster_name
- cluster_endpoint = var.cluster_endpoint
- cluster_auth_base64 = var.cluster_auth_base64
- ami_id = lookup(each.value, "ami_id", "")
- ami_is_eks_optimized = each.value["ami_is_eks_optimized"]
- bootstrap_env = each.value["bootstrap_env"]
- kubelet_extra_args = each.value["kubelet_extra_args"]
- pre_userdata = each.value["pre_userdata"]
- capacity_type = lookup(each.value, "capacity_type", "ON_DEMAND")
- append_labels = length(lookup(each.value, "k8s_labels", {})) > 0 ? ",${join(",", [for k, v in lookup(each.value, "k8s_labels", {}) : "${k}=${v}"])}" : ""
- }
- )
- }
-}
-
-# This is based on the LT that EKS would create if no custom one is specified (aws ec2 describe-launch-template-versions --launch-template-id xxx)
-# there are several more options one could set but you probably dont need to modify them
-# you can take the default and add your custom AMI and/or custom tags
-#
-# Trivia: AWS transparently creates a copy of your LaunchTemplate and actually uses that copy then for the node group. If you DONT use a custom AMI,
-# then the default user-data for bootstrapping a cluster is merged in the copy.
-resource "aws_launch_template" "workers" {
- for_each = { for k, v in local.node_groups_expanded : k => v if v["create_launch_template"] }
-
- name_prefix = local.node_groups_names[each.key]
- description = format("EKS Managed Node Group custom LT for %s", local.node_groups_names[each.key])
- update_default_version = lookup(each.value, "update_default_version", true)
-
- block_device_mappings {
- device_name = "/dev/xvda"
-
- ebs {
- volume_size = lookup(each.value, "disk_size", null)
- volume_type = lookup(each.value, "disk_type", null)
- iops = lookup(each.value, "disk_iops", null)
- throughput = lookup(each.value, "disk_throughput", null)
- encrypted = lookup(each.value, "disk_encrypted", null)
- kms_key_id = lookup(each.value, "disk_kms_key_id", null)
- delete_on_termination = true
- }
- }
-
- ebs_optimized = lookup(each.value, "ebs_optimized", null)
-
- instance_type = each.value["set_instance_types_on_lt"] ? element(each.value.instance_types, 0) : null
-
- monitoring {
- enabled = lookup(each.value, "enable_monitoring", null)
- }
-
- network_interfaces {
- associate_public_ip_address = lookup(each.value, "public_ip", null)
- delete_on_termination = lookup(each.value, "eni_delete", null)
- security_groups = compact(flatten([
- var.worker_security_group_id,
- var.worker_additional_security_group_ids,
- lookup(
- each.value,
- "additional_security_group_ids",
- null,
- ),
- ]))
- }
-
- # if you want to use a custom AMI
- image_id = lookup(each.value, "ami_id", null)
-
- # If you use a custom AMI, you need to supply via user-data, the bootstrap script as EKS DOESNT merge its managed user-data then
- # you can add more than the minimum code you see in the template, e.g. install SSM agent, see https://github.com/aws/containers-roadmap/issues/593#issuecomment-577181345
- #
- # (optionally you can use https://registry.terraform.io/providers/hashicorp/cloudinit/latest/docs/data-sources/cloudinit_config to render the script, example: https://github.com/terraform-aws-modules/terraform-aws-eks/pull/997#issuecomment-705286151)
-
- user_data = data.cloudinit_config.workers_userdata[each.key].rendered
-
- key_name = lookup(each.value, "key_name", null)
-
- metadata_options {
- http_endpoint = lookup(each.value, "metadata_http_endpoint", null)
- http_tokens = lookup(each.value, "metadata_http_tokens", null)
- http_put_response_hop_limit = lookup(each.value, "metadata_http_put_response_hop_limit", null)
- }
-
- # Supplying custom tags to EKS instances is another use-case for LaunchTemplates
- tag_specifications {
- resource_type = "instance"
-
- tags = merge(
- var.tags,
- {
- Name = local.node_groups_names[each.key]
- },
- lookup(var.node_groups_defaults, "additional_tags", {}),
- lookup(var.node_groups[each.key], "additional_tags", {})
- )
- }
-
- # Supplying custom tags to EKS instances root volumes is another use-case for LaunchTemplates. (doesnt add tags to dynamically provisioned volumes via PVC tho)
- tag_specifications {
- resource_type = "volume"
-
- tags = merge(
- var.tags,
- {
- Name = local.node_groups_names[each.key]
- },
- lookup(var.node_groups_defaults, "additional_tags", {}),
- lookup(var.node_groups[each.key], "additional_tags", {})
- )
- }
-
- # Supplying custom tags to EKS instances ENI's is another use-case for LaunchTemplates
- tag_specifications {
- resource_type = "network-interface"
-
- tags = merge(
- var.tags,
- {
- Name = local.node_groups_names[each.key]
- },
- lookup(var.node_groups_defaults, "additional_tags", {}),
- lookup(var.node_groups[each.key], "additional_tags", {})
- )
- }
-
- # Tag the LT itself
- tags = merge(
- var.tags,
- lookup(var.node_groups_defaults, "additional_tags", {}),
- lookup(var.node_groups[each.key], "additional_tags", {}),
- )
-
- lifecycle {
- create_before_destroy = true
- }
-}
diff --git a/modules/node_groups/locals.tf b/modules/node_groups/locals.tf
deleted file mode 100644
index 5b9a017421..0000000000
--- a/modules/node_groups/locals.tf
+++ /dev/null
@@ -1,51 +0,0 @@
-locals {
- # Merge defaults and per-group values to make code cleaner
- node_groups_expanded = { for k, v in var.node_groups : k => merge(
- {
- desired_capacity = var.node_default_settings["asg_desired_capacity"]
- iam_role_arn = var.default_iam_role_arn
- instance_types = [var.node_default_settings["instance_type"]]
- key_name = var.node_default_settings["key_name"]
- launch_template_id = var.node_default_settings["launch_template_id"]
- launch_template_version = var.node_default_settings["launch_template_version"]
- set_instance_types_on_lt = false
- max_capacity = var.node_default_settings["asg_max_size"]
- min_capacity = var.node_default_settings["asg_min_size"]
- subnet_ids = var.node_default_settings["subnet_ids"]
- create_launch_template = false
- bootstrap_env = {}
- kubelet_extra_args = var.node_default_settings["kubelet_extra_args"]
- disk_size = var.node_default_settings["root_volume_size"]
- disk_type = var.node_default_settings["root_volume_type"]
- disk_iops = var.node_default_settings["root_iops"]
- disk_throughput = var.node_default_settings["root_volume_throughput"]
- disk_encrypted = var.node_default_settings["root_encrypted"]
- disk_kms_key_id = var.node_default_settings["root_kms_key_id"]
- enable_monitoring = var.node_default_settings["enable_monitoring"]
- eni_delete = var.node_default_settings["eni_delete"]
- public_ip = var.node_default_settings["public_ip"]
- pre_userdata = var.node_default_settings["pre_userdata"]
- additional_security_group_ids = var.node_default_settings["additional_security_group_ids"]
- taints = []
- timeouts = var.node_default_settings["timeouts"]
- update_default_version = true
- ebs_optimized = null
- metadata_http_endpoint = var.node_default_settings["metadata_http_endpoint"]
- metadata_http_tokens = var.node_default_settings["metadata_http_tokens"]
- metadata_http_put_response_hop_limit = var.node_default_settings["metadata_http_put_response_hop_limit"]
- ami_is_eks_optimized = true
- },
- var.node_groups_defaults,
- v,
- ) if var.create }
-
- node_groups_names = { for k, v in local.node_groups_expanded : k => lookup(
- v,
- "name",
- lookup(
- v,
- "name_prefix",
- join("-", [var.cluster_name, k])
- )
- ) }
-}
diff --git a/modules/node_groups/main.tf b/modules/node_groups/main.tf
deleted file mode 100644
index 8d68756c06..0000000000
--- a/modules/node_groups/main.tf
+++ /dev/null
@@ -1,105 +0,0 @@
-resource "aws_eks_node_group" "workers" {
- for_each = local.node_groups_expanded
-
- node_group_name_prefix = lookup(each.value, "name", null) == null ? local.node_groups_names[each.key] : null
- node_group_name = lookup(each.value, "name", null)
-
- cluster_name = var.cluster_name
- node_role_arn = each.value["iam_role_arn"]
- subnet_ids = each.value["subnet_ids"]
-
- scaling_config {
- desired_size = each.value["desired_capacity"]
- max_size = each.value["max_capacity"]
- min_size = each.value["min_capacity"]
- }
-
- ami_type = lookup(each.value, "ami_type", null)
- disk_size = each.value["launch_template_id"] != null || each.value["create_launch_template"] ? null : lookup(each.value, "disk_size", null)
- instance_types = !each.value["set_instance_types_on_lt"] ? each.value["instance_types"] : null
- release_version = lookup(each.value, "ami_release_version", null)
- capacity_type = lookup(each.value, "capacity_type", null)
- force_update_version = lookup(each.value, "force_update_version", null)
-
- dynamic "remote_access" {
- for_each = each.value["key_name"] != "" && each.value["launch_template_id"] == null && !each.value["create_launch_template"] ? [{
- ec2_ssh_key = each.value["key_name"]
- source_security_group_ids = lookup(each.value, "source_security_group_ids", [])
- }] : []
-
- content {
- ec2_ssh_key = remote_access.value["ec2_ssh_key"]
- source_security_group_ids = remote_access.value["source_security_group_ids"]
- }
- }
-
- dynamic "launch_template" {
- for_each = each.value["launch_template_id"] != null ? [{
- id = each.value["launch_template_id"]
- version = each.value["launch_template_version"]
- }] : []
-
- content {
- id = launch_template.value["id"]
- version = launch_template.value["version"]
- }
- }
-
- dynamic "launch_template" {
- for_each = each.value["launch_template_id"] == null && each.value["create_launch_template"] ? [{
- id = aws_launch_template.workers[each.key].id
- version = each.value["launch_template_version"] == "$Latest" ? aws_launch_template.workers[each.key].latest_version : (
- each.value["launch_template_version"] == "$Default" ? aws_launch_template.workers[each.key].default_version : each.value["launch_template_version"]
- )
- }] : []
-
- content {
- id = launch_template.value["id"]
- version = launch_template.value["version"]
- }
- }
-
- dynamic "taint" {
- for_each = each.value["taints"]
-
- content {
- key = taint.value["key"]
- value = taint.value["value"]
- effect = taint.value["effect"]
- }
- }
-
- dynamic "update_config" {
- for_each = try(each.value.update_config.max_unavailable_percentage > 0, each.value.update_config.max_unavailable > 0, false) ? [true] : []
-
- content {
- max_unavailable_percentage = try(each.value.update_config.max_unavailable_percentage, null)
- max_unavailable = try(each.value.update_config.max_unavailable, null)
- }
- }
-
- timeouts {
- create = lookup(each.value["timeouts"], "create", null)
- update = lookup(each.value["timeouts"], "update", null)
- delete = lookup(each.value["timeouts"], "delete", null)
- }
-
- version = lookup(each.value, "version", null)
-
- labels = merge(
- lookup(var.node_groups_defaults, "k8s_labels", {}),
- lookup(var.node_groups[each.key], "k8s_labels", {})
- )
-
- tags = merge(
- var.tags,
- lookup(var.node_groups_defaults, "additional_tags", {}),
- lookup(var.node_groups[each.key], "additional_tags", {}),
- )
-
- lifecycle {
- create_before_destroy = true
- ignore_changes = [scaling_config[0].desired_size]
- }
-
-}
diff --git a/modules/node_groups/outputs.tf b/modules/node_groups/outputs.tf
deleted file mode 100644
index ad148ea514..0000000000
--- a/modules/node_groups/outputs.tf
+++ /dev/null
@@ -1,14 +0,0 @@
-output "node_groups" {
- description = "Outputs from EKS node groups. Map of maps, keyed by `var.node_groups` keys. See `aws_eks_node_group` Terraform documentation for values"
- value = aws_eks_node_group.workers
-}
-
-output "aws_auth_roles" {
- description = "Roles for use in aws-auth ConfigMap"
- value = [
- for k, v in local.node_groups_expanded : {
- worker_role_arn = lookup(v, "iam_role_arn", var.default_iam_role_arn)
- platform = "linux"
- }
- ]
-}
diff --git a/modules/node_groups/templates/userdata.sh.tpl b/modules/node_groups/templates/userdata.sh.tpl
deleted file mode 100644
index 321c17b427..0000000000
--- a/modules/node_groups/templates/userdata.sh.tpl
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/bin/bash -e
-%{ if length(ami_id) == 0 ~}
-
-# Set bootstrap env
-printf '#!/bin/bash
-%{ for k, v in bootstrap_env ~}
-export ${k}="${v}"
-%{ endfor ~}
-export ADDITIONAL_KUBELET_EXTRA_ARGS="${kubelet_extra_args}"
-' > /etc/profile.d/eks-bootstrap-env.sh
-
-# Source extra environment variables in bootstrap script
-sed -i '/^set -o errexit/a\\nsource /etc/profile.d/eks-bootstrap-env.sh' /etc/eks/bootstrap.sh
-
-# Merge ADDITIONAL_KUBELET_EXTRA_ARGS into KUBELET_EXTRA_ARGS
-sed -i 's/^KUBELET_EXTRA_ARGS="$${KUBELET_EXTRA_ARGS:-}/KUBELET_EXTRA_ARGS="$${KUBELET_EXTRA_ARGS:-} $${ADDITIONAL_KUBELET_EXTRA_ARGS}/' /etc/eks/bootstrap.sh
-%{else ~}
-
-# Set variables for custom AMI
-API_SERVER_URL=${cluster_endpoint}
-B64_CLUSTER_CA=${cluster_auth_base64}
-%{ for k, v in bootstrap_env ~}
-${k}="${v}"
-%{ endfor ~}
-KUBELET_EXTRA_ARGS='--node-labels=eks.amazonaws.com/nodegroup-image=${ami_id},eks.amazonaws.com/capacityType=${capacity_type}${append_labels} ${kubelet_extra_args}'
-%{endif ~}
-
-# User supplied pre userdata
-${pre_userdata}
-%{ if length(ami_id) > 0 && ami_is_eks_optimized ~}
-
-# Call bootstrap for EKS optimised custom AMI
-/etc/eks/bootstrap.sh ${cluster_name} --apiserver-endpoint "$${API_SERVER_URL}" --b64-cluster-ca "$${B64_CLUSTER_CA}" --kubelet-extra-args "$${KUBELET_EXTRA_ARGS}"
-%{ endif ~}
diff --git a/modules/node_groups/variables.tf b/modules/node_groups/variables.tf
deleted file mode 100644
index 93412ef6f0..0000000000
--- a/modules/node_groups/variables.tf
+++ /dev/null
@@ -1,65 +0,0 @@
-variable "create" {
- description = "Controls if EKS resources should be created (it affects almost all resources)"
- type = bool
- default = true
-}
-
-variable "cluster_name" {
- description = "Name of parent cluster"
- type = string
- default = ""
-}
-
-variable "cluster_endpoint" {
- description = "Endpoint of parent cluster"
- type = string
- default = ""
-}
-
-variable "cluster_auth_base64" {
- description = "Base64 encoded CA of parent cluster"
- type = string
- default = ""
-}
-
-variable "default_iam_role_arn" {
- description = "ARN of the default IAM worker role to use if one is not specified in `var.node_groups` or `var.node_groups_defaults`"
- type = string
- default = ""
-}
-
-variable "node_default_settings" {
- description = "Node group defaults from parent"
- type = any
- default = {}
-}
-
-variable "worker_security_group_id" {
- description = "If provided, all workers will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the EKS cluster."
- type = string
- default = ""
-}
-
-variable "worker_additional_security_group_ids" {
- description = "A list of additional security group ids to attach to worker instances"
- type = list(string)
- default = []
-}
-
-variable "tags" {
- description = "A map of tags to add to all resources"
- type = map(string)
- default = {}
-}
-
-variable "node_groups_defaults" {
- description = "map of maps of node groups to create. See \"`node_groups` and `node_groups_defaults` keys\" section in README.md for more details"
- type = any
- default = {}
-}
-
-variable "node_groups" {
- description = "Map of maps of `eks_node_groups` to create. See \"`node_groups` and `node_groups_defaults` keys\" section in README.md for more details"
- type = any
- default = {}
-}
diff --git a/modules/node_groups/versions.tf b/modules/node_groups/versions.tf
deleted file mode 100644
index 5324b482ab..0000000000
--- a/modules/node_groups/versions.tf
+++ /dev/null
@@ -1,8 +0,0 @@
-terraform {
- required_version = ">= 0.13.1"
-
- required_providers {
- aws = ">= 3.56.0"
- cloudinit = ">= 2.0"
- }
-}
diff --git a/outputs.tf b/outputs.tf
index 754e7c5f97..e87515ca8b 100644
--- a/outputs.tf
+++ b/outputs.tf
@@ -1,10 +1,6 @@
output "cluster_id" {
description = "The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready."
value = local.cluster_id
-
- # So that calling plans wait for the cluster to be available before attempting to use it.
- # There is no need to duplicate this datasource
- depends_on = [data.http.wait_for_cluster]
}
output "cluster_arn" {
@@ -32,24 +28,19 @@ output "cluster_security_group_id" {
value = local.cluster_security_group_id
}
-output "config_map_aws_auth" {
- description = "A kubernetes configuration to authenticate to this EKS cluster."
- value = kubernetes_config_map.aws_auth.*
-}
-
output "cluster_iam_role_name" {
description = "IAM role name of the EKS cluster."
- value = local.cluster_iam_role_name
+ value = try(aws_iam_role.cluster[0].name, "")
}
output "cluster_iam_role_arn" {
description = "IAM role ARN of the EKS cluster."
- value = local.cluster_iam_role_arn
+ value = try(aws_iam_role.cluster[0].arn, "")
}
output "cluster_oidc_issuer_url" {
description = "The URL on the EKS cluster OIDC Issuer"
- value = local.cluster_oidc_issuer_url
+ value = try(aws_eks_cluster.this[0].identity[0].oidc[0].issuer, "")
}
output "cluster_primary_security_group_id" {
@@ -67,24 +58,6 @@ output "cloudwatch_log_group_arn" {
value = element(concat(aws_cloudwatch_log_group.this[*].arn, [""]), 0)
}
-output "kubeconfig" {
- description = "kubectl config file contents for this EKS cluster. Will block on cluster creation until the cluster is really ready."
- value = local.kubeconfig
-
- # So that calling plans wait for the cluster to be available before attempting to use it.
- # There is no need to duplicate this datasource
- depends_on = [data.http.wait_for_cluster]
-}
-
-output "kubeconfig_filename" {
- description = "The filename of the generated kubectl config. Will block on cluster creation until the cluster is really ready."
- value = concat(local_file.kubeconfig.*.filename, [""])[0]
-
- # So that calling plans wait for the cluster to be available before attempting to use it.
- # There is no need to duplicate this datasource
- depends_on = [data.http.wait_for_cluster]
-}
-
output "oidc_provider_arn" {
description = "The ARN of the OIDC Provider if `enable_irsa = true`."
value = var.enable_irsa ? concat(aws_iam_openid_connect_provider.oidc_provider[*].arn, [""])[0] : null
@@ -147,20 +120,12 @@ output "worker_iam_instance_profile_names" {
output "worker_iam_role_name" {
description = "default IAM role name for EKS worker groups"
- value = coalescelist(
- aws_iam_role.workers.*.name,
- data.aws_iam_instance_profile.custom_worker_group_iam_instance_profile.*.role_name,
- [""]
- )[0]
+ value = try(aws_iam_role.workers[0].name, "")
}
output "worker_iam_role_arn" {
description = "default IAM role ARN for EKS worker groups"
- value = coalescelist(
- aws_iam_role.workers.*.arn,
- data.aws_iam_instance_profile.custom_worker_group_iam_instance_profile.*.role_arn,
- [""]
- )[0]
+ value = try(aws_iam_role.workers[0].arn, "")
}
output "fargate_profile_ids" {
@@ -183,11 +148,6 @@ output "fargate_iam_role_arn" {
value = module.fargate.iam_role_arn
}
-output "node_groups" {
- description = "Outputs from EKS node groups. Map of maps, keyed by var.node_groups keys"
- value = module.node_groups.node_groups
-}
-
output "security_group_rule_cluster_https_worker_ingress" {
description = "Security group rule responsible for allowing pods to communicate with the EKS cluster API."
value = aws_security_group_rule.cluster_https_worker_ingress
diff --git a/variables.tf b/variables.tf
index f9ecb45180..7151f2a824 100644
--- a/variables.tf
+++ b/variables.tf
@@ -142,28 +142,6 @@ variable "cluster_security_group_tags" {
default = {}
}
-################################################################################
-# Kubeconfig
-################################################################################
-
-variable "write_kubeconfig" {
- description = "Whether to write a Kubectl config file containing the cluster configuration. Saved to `kubeconfig_output_path`"
- type = bool
- default = true
-}
-
-variable "kubeconfig_output_path" {
- description = "Where to save the Kubectl config file (if `write_kubeconfig = true`). Assumed to be a directory if the value ends with a forward slash `/`"
- type = string
- default = "./"
-}
-
-variable "kubeconfig_file_permission" {
- description = "File permission of the Kubectl config file containing cluster configuration saved to `kubeconfig_output_path.`"
- type = string
- default = "0600"
-}
-
################################################################################
# IRSA
################################################################################
@@ -227,7 +205,7 @@ variable "cluster_iam_role_tags" {
variable "create_fargate" {
description = "Determines whether Fargate resources are created"
type = bool
- default = true
+ default = false
}
variable "create_fargate_pod_execution_role" {
@@ -282,75 +260,18 @@ variable "fargate_tags" {
-
-
-
-
-### ^ADDED^
-
-
-
variable "default_platform" {
description = "Default platform name. Valid options are `linux` and `windows`"
type = string
default = "linux"
}
-variable "manage_aws_auth" {
- description = "Whether to apply the aws-auth configmap file"
- type = bool
- default = true
-}
-
-variable "aws_auth_additional_labels" {
- description = "Additional kubernetes labels applied on aws-auth ConfigMap"
- default = {}
- type = map(string)
-}
-
-variable "map_accounts" {
- description = "Additional AWS account numbers to add to the aws-auth configmap"
- type = list(string)
- default = []
-}
-
-variable "map_roles" {
- description = "Additional IAM roles to add to the aws-auth configmap"
- type = list(object({
- rolearn = string
- username = string
- groups = list(string)
- }))
- default = []
-}
-
-variable "map_users" {
- description = "Additional IAM users to add to the aws-auth configmap"
- type = list(object({
- userarn = string
- username = string
- groups = list(string)
- }))
- default = []
-}
-
variable "launch_templates" {
description = "Map of launch template definitions to create"
type = map(any)
default = {}
}
-variable "iam_instance_profiles" {
- description = "Map of instance profile definitions to create"
- type = map(any)
- default = {}
-}
-
-
-
-
-
-
variable "worker_groups" {
description = "A map of maps defining worker group configurations to be defined using AWS Launch Template"
type = map(any)
@@ -393,11 +314,11 @@ variable "worker_ami_owner_id_windows" {
default = "amazon"
}
-variable "worker_additional_security_group_ids" {
- description = "A list of additional security group ids to attach to worker instances"
- type = list(string)
- default = []
-}
+# variable "worker_additional_security_group_ids" {
+# description = "A list of additional security group ids to attach to worker instances"
+# type = list(string)
+# default = []
+# }
variable "worker_sg_ingress_from_port" {
description = "Minimum port number from which pods will accept communication. Must be changed to a lower value if some pods in your cluster will expose a port lower than 1025 (e.g. 22, 80, or 443)"
@@ -454,12 +375,6 @@ variable "worker_create_security_group" {
default = true
}
-variable "worker_create_initial_lifecycle_hooks" {
- description = "Whether to create initial lifecycle hooks provided in worker groups"
- type = bool
- default = false
-}
-
variable "worker_create_cluster_primary_security_group_rules" {
description = "Whether to create security group rules to allow communication between pods on workers and pods using the primary cluster security group"
type = bool
@@ -498,13 +413,6 @@ variable "cluster_endpoint_private_access_sg" {
-variable "manage_cluster_iam_resources" {
- description = "Whether to let the module manage cluster IAM resources. If set to false, `cluster_iam_role_arn` must be specified"
- type = bool
- default = true
-}
-
-
variable "manage_worker_iam_resources" {
description = "Whether to let the module manage worker IAM resources. If set to false, iam_instance_profile_name must be specified for workers"
type = bool
@@ -535,9 +443,6 @@ variable "node_groups" {
default = {}
}
-
-
-
variable "cluster_egress_cidrs" {
description = "List of CIDR blocks that are permitted for cluster egress traffic"
type = list(string)
@@ -549,11 +454,3 @@ variable "workers_egress_cidrs" {
type = list(string)
default = ["0.0.0.0/0"]
}
-
-variable "wait_for_cluster_timeout" {
- description = "A timeout (in seconds) to wait for cluster to be available"
- type = number
- default = 300
-}
-
-
diff --git a/workers.tf b/workers.tf
index 6a2f69e9f2..142bf0ac08 100644
--- a/workers.tf
+++ b/workers.tf
@@ -1,13 +1,3 @@
-locals {
- # Abstracted to a local so that it can be shared with node group as well
- # Only valus that are common between ASG and Node Group are pulled out to this local map
- group_default_settings = {
- min_size = try(var.group_default_settings.min_size, 1)
- max_size = try(var.group_default_settings.max_size, 3)
- desired_capacity = try(var.group_default_settings.desired_capacity, 1)
- }
-}
-
################################################################################
# Fargate
################################################################################
@@ -17,7 +7,7 @@ module "fargate" {
create = var.create_fargate
create_fargate_pod_execution_role = var.create_fargate_pod_execution_role
- fargate_pod_execution_role_name = var.fargate_pod_execution_role_name
+ fargate_pod_execution_role_arn = var.fargate_pod_execution_role_arn
cluster_name = aws_eks_cluster.this[0].name
subnet_ids = coalescelist(var.fargate_subnet_ids, var.subnet_ids, [""])
@@ -31,195 +21,16 @@ module "fargate" {
}
################################################################################
-# Node Groups
-################################################################################
-
-module "node_groups" {
- source = "./modules/node_groups"
-
- create = var.create
-
- cluster_name = aws_eks_cluster.this[0].name
- cluster_endpoint = local.cluster_endpoint
- cluster_auth_base64 = local.cluster_auth_base64
-
- default_iam_role_arn = coalescelist(aws_iam_role.workers[*].arn, [""])[0]
- worker_security_group_id = local.worker_security_group_id
- worker_additional_security_group_ids = var.worker_additional_security_group_ids
-
- node_default_settings = var.group_default_settings
- node_groups = var.node_groups
-
- tags = var.tags
-
- depends_on = [
- aws_eks_cluster.this,
- aws_iam_role_policy_attachment.workers_AmazonEKSWorkerNodePolicy,
- aws_iam_role_policy_attachment.workers_AmazonEKS_CNI_Policy,
- aws_iam_role_policy_attachment.workers_AmazonEC2ContainerRegistryReadOnly
- ]
-}
-
-################################################################################
-# Autoscaling Group
+# Fargate
################################################################################
-resource "aws_autoscaling_group" "this" {
- for_each = var.create ? var.worker_groups : {}
-
- name_prefix = "${join("-", [aws_eks_cluster.this[0].name, try(each.value.name, each.key)])}-"
-
- launch_template {
- name = each.value.launch_template_key # required
- version = try(each.value.launch_template_version, var.group_default_settings.min_size, "$Latest")
- }
-
- availability_zones = try(each.value.availability_zones, var.group_default_settings.availability_zones, null)
- vpc_zone_identifier = try(each.value.vpc_zone_identifier, var.group_default_settings.vpc_zone_identifier, null)
-
- min_size = try(each.value.min_size, local.group_default_settings.min_size)
- max_size = try(each.value.max_size, local.group_default_settings.max_size)
- desired_capacity = try(each.value.desired_capacity, local.group_default_settings.desired_capacity)
- capacity_rebalance = try(each.value.capacity_rebalance, var.group_default_settings.capacity_rebalance, null)
- default_cooldown = try(each.value.default_cooldown, var.group_default_settings.default_cooldown, null)
- protect_from_scale_in = try(each.value.protect_from_scale_in, var.group_default_settings.protect_from_scale_in, null)
-
- load_balancers = try(each.value.load_balancers, var.group_default_settings.load_balancers, null)
- target_group_arns = try(each.value.target_group_arns, var.group_default_settings.target_group_arns, null)
- placement_group = try(each.value.placement_group, var.group_default_settings.placement_group, null)
- health_check_type = try(each.value.health_check_type, var.group_default_settings.health_check_type, null)
- health_check_grace_period = try(each.value.health_check_grace_period, var.group_default_settings.health_check_grace_period, null)
-
- force_delete = try(each.value.force_delete, var.group_default_settings.force_delete, false)
- termination_policies = try(each.value.termination_policies, var.group_default_settings.termination_policies, null)
- suspended_processes = try(each.value.suspended_processes, var.group_default_settings.suspended_processes, "AZRebalance")
- max_instance_lifetime = try(each.value.max_instance_lifetime, var.group_default_settings.max_instance_lifetime, null)
-
- enabled_metrics = try(each.value.enabled_metrics, var.group_default_settings.enabled_metrics, null)
- metrics_granularity = try(each.value.metrics_granularity, var.group_default_settings.metrics_granularity, null)
- service_linked_role_arn = try(each.value.service_linked_role_arn, var.group_default_settings.service_linked_role_arn, null)
-
- dynamic "initial_lifecycle_hook" {
- for_each = try(each.value.initial_lifecycle_hook, var.group_default_settings.initial_lifecycle_hook, {})
- iterator = hook
-
- content {
- name = hook.value.name
- default_result = lookup(hook.value, "default_result", null)
- heartbeat_timeout = lookup(hook.value, "heartbeat_timeout", null)
- lifecycle_transition = hook.value.lifecycle_transition
- notification_metadata = lookup(hook.value, "notification_metadata", null)
- notification_target_arn = lookup(hook.value, "notification_target_arn", null)
- role_arn = lookup(hook.value, "role_arn", null)
- }
- }
-
- dynamic "instance_refresh" {
- for_each = try(each.value.instance_refresh, var.group_default_settings.instance_refresh, {})
- iterator = refresh
-
- content {
- strategy = refresh.value.strategy
- triggers = lookup(refresh.value, "triggers", null)
-
- dynamic "preferences" {
- for_each = try(refresh.value.preferences, [])
- content {
- instance_warmup = lookup(preferences.value, "instance_warmup", null)
- min_healthy_percentage = lookup(preferences.value, "min_healthy_percentage", null)
- }
- }
- }
- }
-
- dynamic "mixed_instances_policy" {
- for_each = try(each.value.mixed_instances_policy, var.group_default_settings.mixed_instances_policy, {})
- iterator = mixed
-
- content {
- dynamic "instances_distribution" {
- for_each = try(mixed.value.instances_distribution, {})
- iterator = distro
-
- content {
- on_demand_allocation_strategy = lookup(distro.value, "on_demand_allocation_strategy", null)
- on_demand_base_capacity = lookup(distro.value, "on_demand_base_capacity", null)
- on_demand_percentage_above_base_capacity = lookup(distro.value, "on_demand_percentage_above_base_capacity", null)
- spot_allocation_strategy = lookup(distro.value, "spot_allocation_strategy", null)
- spot_instance_pools = lookup(distro.value, "spot_instance_pools", null)
- spot_max_price = lookup(distro.value, "spot_max_price", null)
- }
- }
-
- launch_template {
- launch_template_specification {
- launch_template_name = local.launch_template
- version = local.launch_template_version
- }
-
- dynamic "override" {
- for_each = try(mixed.value.override, {})
- content {
- instance_type = lookup(override.value, "instance_type", null)
- weighted_capacity = lookup(override.value, "weighted_capacity", null)
-
- dynamic "launch_template_specification" {
- for_each = try(override.value.launch_template_specification, {})
- content {
- launch_template_id = lookup(launch_template_specification.value, "launch_template_id", null)
- }
- }
- }
- }
- }
- }
- }
-
- dynamic "warm_pool" {
- for_each = try(each.value.warm_pool, var.group_default_settings.warm_pool, {})
-
- content {
- pool_state = lookup(warm_pool.value, "pool_state", null)
- min_size = lookup(warm_pool.value, "min_size", null)
- max_group_prepared_capacity = lookup(warm_pool.value, "max_group_prepared_capacity", null)
- }
- }
-
- dynamic "tag" {
- for_each = concat(
- [
- {
- "key" = "Name"
- "value" = "${join("-", [aws_eks_cluster.this[0].name, lookup(each.value, "name", each.key)])}-eks-asg"
- "propagate_at_launch" = true
- },
- {
- "key" = "kubernetes.io/cluster/${aws_eks_cluster.this[0].name}"
- "value" = "owned"
- "propagate_at_launch" = true
- },
- ],
- [
- for tag_key, tag_value in var.tags :
- tomap({
- key = tag_key
- value = tag_value
- propagate_at_launch = true
- })
- if tag_key != "Name" && !contains([for tag in lookup(each.value, "tags", []) : tag["key"]], tag_key)
- ],
- lookup(each.value, "tags", {})
- )
- content {
- key = tag.value.key
- value = tag.value.value
- propagate_at_launch = tag.value.propagate_at_launch
- }
- }
-
- lifecycle {
- create_before_destroy = true
- ignore_changes = [desired_capacity]
+locals {
+ # Abstracted to a local so that it can be shared with node group as well
+ # Only valus that are common between ASG and Node Group are pulled out to this local map
+ group_default_settings = {
+ min_size = try(var.group_default_settings.min_size, 1)
+ max_size = try(var.group_default_settings.max_size, 3)
+ desired_capacity = try(var.group_default_settings.desired_capacity, 1)
}
}
@@ -451,9 +262,6 @@ resource "aws_launch_template" "this" {
aws_security_group_rule.workers_ingress_cluster_https,
aws_security_group_rule.workers_ingress_cluster_primary,
aws_security_group_rule.cluster_primary_ingress_workers,
- aws_iam_role_policy_attachment.workers_AmazonEKSWorkerNodePolicy,
- aws_iam_role_policy_attachment.workers_AmazonEKS_CNI_Policy,
- aws_iam_role_policy_attachment.workers_AmazonEC2ContainerRegistryReadOnly,
aws_iam_role_policy_attachment.workers_additional_policies
]
@@ -464,6 +272,263 @@ resource "aws_launch_template" "this" {
tags = merge(var.tags, lookup(each.value, "tags", {}))
}
+################################################################################
+# Node Groups
+################################################################################
+
+
+# resource "aws_eks_node_group" "workers" {
+# for_each = var.create : var.node_groups : {}
+
+# node_group_name_prefix = lookup(each.value, "name", null) == null ? local.node_groups_names[each.key] : null
+# node_group_name = lookup(each.value, "name_prefix", null)
+
+# cluster_name = var.cluster_name
+# node_role_arn = try(each.value.iam_role_arn, var.default_iam_role_arn)
+# subnet_ids = each.value["subnet_ids"]
+
+# scaling_config {
+# desired_size = each.value["desired_capacity"]
+# max_size = each.value["max_capacity"]
+# min_size = each.value["min_capacity"]
+# }
+
+# ami_type = lookup(each.value, "ami_type", null)
+# disk_size = lookup(each.value, "disk_size", null)
+# instance_types = lookup(each.value, "instance_types", null)
+# release_version = lookup(each.value, "ami_release_version", null)
+# capacity_type = lookup(each.value, "capacity_type", null)
+# force_update_version = lookup(each.value, "force_update_version", null)
+
+# dynamic "remote_access" {
+# for_each = each.value["key_name"] != "" && each.value["launch_template_id"] == null && !each.value["create_launch_template"] ? [{
+# ec2_ssh_key = each.value["key_name"]
+# source_security_group_ids = lookup(each.value, "source_security_group_ids", [])
+# }] : []
+
+# content {
+# ec2_ssh_key = remote_access.value["ec2_ssh_key"]
+# source_security_group_ids = remote_access.value["source_security_group_ids"]
+# }
+# }
+
+# dynamic "launch_template" {
+# for_each = [try(each.value.launch_template, {})]
+
+# content {
+# id = lookup(launch_template.value, "id", null)
+# name = lookup(launch_template.value, "name", null)
+# version = launch_template.value.version
+# }
+# }
+
+# dynamic "taint" {
+# for_each = each.value["taints"]
+
+# content {
+# key = taint.value["key"]
+# value = taint.value["value"]
+# effect = taint.value["effect"]
+# }
+# }
+
+# dynamic "update_config" {
+# for_each = try(each.value.update_config.max_unavailable_percentage > 0, each.value.update_config.max_unavailable > 0, false) ? [true] : []
+
+# content {
+# max_unavailable_percentage = try(each.value.update_config.max_unavailable_percentage, null)
+# max_unavailable = try(each.value.update_config.max_unavailable, null)
+# }
+# }
+
+# timeouts {
+# create = lookup(each.value["timeouts"], "create", null)
+# update = lookup(each.value["timeouts"], "update", null)
+# delete = lookup(each.value["timeouts"], "delete", null)
+# }
+
+# version = lookup(each.value, "version", null)
+
+# labels = merge(
+# lookup(var.node_groups_defaults, "k8s_labels", {}),
+# lookup(each.value, "k8s_labels", {})
+# )
+
+# tags = merge(
+# var.tags,
+# lookup(var.node_groups_defaults, "additional_tags", {}),
+# lookup(each.value, "additional_tags", {}),
+# )
+
+# lifecycle {
+# create_before_destroy = true
+# ignore_changes = [scaling_config[0].desired_size]
+# }
+# }
+
+################################################################################
+# Autoscaling Group
+################################################################################
+
+resource "aws_autoscaling_group" "this" {
+ for_each = var.create ? var.worker_groups : {}
+
+ name_prefix = "${join("-", [aws_eks_cluster.this[0].name, try(each.value.name, each.key)])}-"
+
+ launch_template {
+ name = each.value.launch_template_key # required
+ version = try(each.value.launch_template_version, var.group_default_settings.launch_template_version, "$Latest")
+ }
+
+ availability_zones = try(each.value.availability_zones, var.group_default_settings.availability_zones, null)
+ vpc_zone_identifier = try(each.value.vpc_zone_identifier, var.group_default_settings.vpc_zone_identifier, null)
+
+ min_size = try(each.value.min_size, local.group_default_settings.min_size)
+ max_size = try(each.value.max_size, local.group_default_settings.max_size)
+ desired_capacity = try(each.value.desired_capacity, local.group_default_settings.desired_capacity)
+ capacity_rebalance = try(each.value.capacity_rebalance, var.group_default_settings.capacity_rebalance, null)
+ default_cooldown = try(each.value.default_cooldown, var.group_default_settings.default_cooldown, null)
+ protect_from_scale_in = try(each.value.protect_from_scale_in, var.group_default_settings.protect_from_scale_in, null)
+
+ load_balancers = try(each.value.load_balancers, var.group_default_settings.load_balancers, null)
+ target_group_arns = try(each.value.target_group_arns, var.group_default_settings.target_group_arns, null)
+ placement_group = try(each.value.placement_group, var.group_default_settings.placement_group, null)
+ health_check_type = try(each.value.health_check_type, var.group_default_settings.health_check_type, null)
+ health_check_grace_period = try(each.value.health_check_grace_period, var.group_default_settings.health_check_grace_period, null)
+
+ force_delete = try(each.value.force_delete, var.group_default_settings.force_delete, false)
+ termination_policies = try(each.value.termination_policies, var.group_default_settings.termination_policies, null)
+ suspended_processes = try(each.value.suspended_processes, var.group_default_settings.suspended_processes, "AZRebalance")
+ max_instance_lifetime = try(each.value.max_instance_lifetime, var.group_default_settings.max_instance_lifetime, null)
+
+ enabled_metrics = try(each.value.enabled_metrics, var.group_default_settings.enabled_metrics, null)
+ metrics_granularity = try(each.value.metrics_granularity, var.group_default_settings.metrics_granularity, null)
+ service_linked_role_arn = try(each.value.service_linked_role_arn, var.group_default_settings.service_linked_role_arn, null)
+
+ dynamic "initial_lifecycle_hook" {
+ for_each = try(each.value.initial_lifecycle_hook, var.group_default_settings.initial_lifecycle_hook, {})
+ iterator = hook
+
+ content {
+ name = hook.value.name
+ default_result = lookup(hook.value, "default_result", null)
+ heartbeat_timeout = lookup(hook.value, "heartbeat_timeout", null)
+ lifecycle_transition = hook.value.lifecycle_transition
+ notification_metadata = lookup(hook.value, "notification_metadata", null)
+ notification_target_arn = lookup(hook.value, "notification_target_arn", null)
+ role_arn = lookup(hook.value, "role_arn", null)
+ }
+ }
+
+ dynamic "instance_refresh" {
+ for_each = try(each.value.instance_refresh, var.group_default_settings.instance_refresh, {})
+ iterator = refresh
+
+ content {
+ strategy = refresh.value.strategy
+ triggers = lookup(refresh.value, "triggers", null)
+
+ dynamic "preferences" {
+ for_each = try(refresh.value.preferences, [])
+ content {
+ instance_warmup = lookup(preferences.value, "instance_warmup", null)
+ min_healthy_percentage = lookup(preferences.value, "min_healthy_percentage", null)
+ }
+ }
+ }
+ }
+
+ dynamic "mixed_instances_policy" {
+ for_each = try(each.value.mixed_instances_policy, var.group_default_settings.mixed_instances_policy, {})
+ iterator = mixed
+
+ content {
+ dynamic "instances_distribution" {
+ for_each = try(mixed.value.instances_distribution, {})
+ iterator = distro
+
+ content {
+ on_demand_allocation_strategy = lookup(distro.value, "on_demand_allocation_strategy", null)
+ on_demand_base_capacity = lookup(distro.value, "on_demand_base_capacity", null)
+ on_demand_percentage_above_base_capacity = lookup(distro.value, "on_demand_percentage_above_base_capacity", null)
+ spot_allocation_strategy = lookup(distro.value, "spot_allocation_strategy", null)
+ spot_instance_pools = lookup(distro.value, "spot_instance_pools", null)
+ spot_max_price = lookup(distro.value, "spot_max_price", null)
+ }
+ }
+
+ launch_template {
+ launch_template_specification {
+ launch_template_name = local.launch_template
+ version = local.launch_template_version
+ }
+
+ dynamic "override" {
+ for_each = try(mixed.value.override, {})
+ content {
+ instance_type = lookup(override.value, "instance_type", null)
+ weighted_capacity = lookup(override.value, "weighted_capacity", null)
+
+ dynamic "launch_template_specification" {
+ for_each = try(override.value.launch_template_specification, {})
+ content {
+ launch_template_id = lookup(launch_template_specification.value, "launch_template_id", null)
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ dynamic "warm_pool" {
+ for_each = try(each.value.warm_pool, var.group_default_settings.warm_pool, {})
+
+ content {
+ pool_state = lookup(warm_pool.value, "pool_state", null)
+ min_size = lookup(warm_pool.value, "min_size", null)
+ max_group_prepared_capacity = lookup(warm_pool.value, "max_group_prepared_capacity", null)
+ }
+ }
+
+ dynamic "tag" {
+ for_each = concat(
+ [
+ {
+ "key" = "Name"
+ "value" = "${join("-", [aws_eks_cluster.this[0].name, lookup(each.value, "name", each.key)])}-eks-asg"
+ "propagate_at_launch" = true
+ },
+ {
+ "key" = "kubernetes.io/cluster/${aws_eks_cluster.this[0].name}"
+ "value" = "owned"
+ "propagate_at_launch" = true
+ },
+ ],
+ [
+ for tag_key, tag_value in var.tags :
+ tomap({
+ key = tag_key
+ value = tag_value
+ propagate_at_launch = true
+ })
+ if tag_key != "Name" && !contains([for tag in lookup(each.value, "tags", []) : tag["key"]], tag_key)
+ ],
+ lookup(each.value, "tags", {})
+ )
+ content {
+ key = tag.value.key
+ value = tag.value.value
+ propagate_at_launch = tag.value.propagate_at_launch
+ }
+ }
+
+ lifecycle {
+ create_before_destroy = true
+ ignore_changes = [desired_capacity]
+ }
+}
+
################################################################################
# IAM Role & Instance Profile
################################################################################
@@ -527,8 +592,12 @@ resource "aws_iam_role_policy_attachment" "workers_additional_policies" {
# Security Group
################################################################################
+locals {
+ create_worker_sg = var.create && var.worker_create_security_group
+}
+
resource "aws_security_group" "workers" {
- count = var.worker_create_security_group && var.create ? 1 : 0
+ count = local.create_worker_sg ? 1 : 0
name_prefix = var.cluster_name
description = "Security group for all nodes in the cluster."
@@ -543,7 +612,7 @@ resource "aws_security_group" "workers" {
}
resource "aws_security_group_rule" "workers_egress_internet" {
- count = var.worker_create_security_group && var.create ? 1 : 0
+ count = local.create_worker_sg ? 1 : 0
description = "Allow nodes all egress to the Internet."
protocol = "-1"
@@ -555,7 +624,7 @@ resource "aws_security_group_rule" "workers_egress_internet" {
}
resource "aws_security_group_rule" "workers_ingress_self" {
- count = var.worker_create_security_group && var.create ? 1 : 0
+ count = local.create_worker_sg ? 1 : 0
description = "Allow node to communicate with each other."
protocol = "-1"
@@ -567,7 +636,7 @@ resource "aws_security_group_rule" "workers_ingress_self" {
}
resource "aws_security_group_rule" "workers_ingress_cluster" {
- count = var.worker_create_security_group && var.create ? 1 : 0
+ count = local.create_worker_sg ? 1 : 0
description = "Allow workers pods to receive communication from the cluster control plane."
protocol = "tcp"
@@ -579,7 +648,7 @@ resource "aws_security_group_rule" "workers_ingress_cluster" {
}
resource "aws_security_group_rule" "workers_ingress_cluster_kubelet" {
- count = var.worker_create_security_group && var.create ? var.worker_sg_ingress_from_port > 10250 ? 1 : 0 : 0
+ count = local.create_worker_sg ? var.worker_sg_ingress_from_port > 10250 ? 1 : 0 : 0
description = "Allow workers Kubelets to receive communication from the cluster control plane."
protocol = "tcp"
@@ -591,7 +660,7 @@ resource "aws_security_group_rule" "workers_ingress_cluster_kubelet" {
}
resource "aws_security_group_rule" "workers_ingress_cluster_https" {
- count = var.worker_create_security_group && var.create ? 1 : 0
+ count = local.create_worker_sg ? 1 : 0
description = "Allow pods running extension API servers on port 443 to receive communication from cluster control plane."
protocol = "tcp"
@@ -603,23 +672,23 @@ resource "aws_security_group_rule" "workers_ingress_cluster_https" {
}
resource "aws_security_group_rule" "workers_ingress_cluster_primary" {
- count = var.worker_create_security_group && var.worker_create_cluster_primary_security_group_rules && var.create ? 1 : 0
+ count = local.create_worker_sg && var.worker_create_cluster_primary_security_group_rules ? 1 : 0
description = "Allow pods running on workers to receive communication from cluster primary security group (e.g. Fargate pods)."
protocol = "all"
security_group_id = local.worker_security_group_id
- source_security_group_id = local.cluster_primary_security_group_id
+ source_security_group_id = try(aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id, "")
from_port = 0
to_port = 65535
type = "ingress"
}
resource "aws_security_group_rule" "cluster_primary_ingress_workers" {
- count = var.worker_create_security_group && var.worker_create_cluster_primary_security_group_rules && var.create ? 1 : 0
+ count = local.create_worker_sg && var.worker_create_cluster_primary_security_group_rules ? 1 : 0
description = "Allow pods running on workers to send communication to cluster primary security group (e.g. Fargate pods)."
protocol = "all"
- security_group_id = local.cluster_primary_security_group_id
+ security_group_id = aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id
source_security_group_id = local.worker_security_group_id
from_port = 0
to_port = 65535
From f22a71aa3a999bf889743e93dc626a119ead730d Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Tue, 9 Nov 2021 13:23:04 -0500
Subject: [PATCH 10/83] chore: stashing from testing and validation, moving to
sub-modules even if duplciated code on launch template
---
README.md | 51 +++----
data.tf | 37 +----
examples/launch_templates/main.tf | 127 +++++++++--------
locals.tf | 49 +++----
main.tf | 35 ++---
outputs.tf | 53 ++++---
variables.tf | 48 ++++++-
versions.tf | 12 --
workers.tf | 226 +++++++++++++++---------------
9 files changed, 316 insertions(+), 322 deletions(-)
diff --git a/README.md b/README.md
index 2371dd99da..6ecfe87110 100644
--- a/README.md
+++ b/README.md
@@ -118,9 +118,6 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
| [terraform](#requirement\_terraform) | >= 0.13.1 |
| [aws](#requirement\_aws) | >= 3.56.0 |
| [cloudinit](#requirement\_cloudinit) | >= 2.0.0 |
-| [http](#requirement\_http) | >= 2.4.1 |
-| [kubernetes](#requirement\_kubernetes) | >= 1.11.1 |
-| [local](#requirement\_local) | >= 1.4.0 |
| [tls](#requirement\_tls) | >= 2.2.0 |
## Providers
@@ -143,36 +140,30 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
| [aws_autoscaling_group.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/autoscaling_group) | resource |
| [aws_cloudwatch_log_group.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_log_group) | resource |
| [aws_eks_cluster.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_cluster) | resource |
-| [aws_iam_instance_profile.workers](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_instance_profile) | resource |
+| [aws_iam_instance_profile.worker](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_instance_profile) | resource |
| [aws_iam_openid_connect_provider.oidc_provider](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_openid_connect_provider) | resource |
| [aws_iam_policy.cluster_additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
| [aws_iam_role.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
-| [aws_iam_role.workers](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
-| [aws_iam_role_policy_attachment.cluster_additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
-| [aws_iam_role_policy_attachment.workers_AmazonEC2ContainerRegistryReadOnly](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
-| [aws_iam_role_policy_attachment.workers_AmazonEKSWorkerNodePolicy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
-| [aws_iam_role_policy_attachment.workers_AmazonEKS_CNI_Policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
-| [aws_iam_role_policy_attachment.workers_additional_policies](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
+| [aws_iam_role.worker](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
| [aws_launch_template.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource |
| [aws_security_group.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
-| [aws_security_group.workers](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
+| [aws_security_group.worker](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
| [aws_security_group_rule.cluster_egress_internet](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
| [aws_security_group_rule.cluster_https_worker_ingress](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.cluster_primary_ingress_workers](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
+| [aws_security_group_rule.cluster_primary_ingress_worker](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
| [aws_security_group_rule.cluster_private_access_cidrs_source](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
| [aws_security_group_rule.cluster_private_access_sg_source](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.workers_egress_internet](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.workers_ingress_cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.workers_ingress_cluster_https](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.workers_ingress_cluster_kubelet](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.workers_ingress_cluster_primary](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.workers_ingress_self](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
+| [aws_security_group_rule.worker_egress_internet](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
+| [aws_security_group_rule.worker_ingress_cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
+| [aws_security_group_rule.worker_ingress_cluster_https](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
+| [aws_security_group_rule.worker_ingress_cluster_kubelet](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
+| [aws_security_group_rule.worker_ingress_cluster_primary](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
+| [aws_security_group_rule.worker_ingress_self](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
| [aws_ami.eks_worker](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source |
-| [aws_ami.eks_worker_windows](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source |
| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source |
| [aws_iam_policy_document.cluster_additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
| [aws_iam_policy_document.cluster_assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
-| [aws_iam_policy_document.workers_assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
+| [aws_iam_policy_document.worker_assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
| [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source |
| [tls_certificate.this](https://registry.terraform.io/providers/hashicorp/tls/latest/docs/data-sources/certificate) | data source |
@@ -212,6 +203,7 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
| [create\_cluster\_security\_group](#input\_create\_cluster\_security\_group) | Whether to create a security group for the cluster or attach the cluster to `cluster_security_group_id` | `bool` | `true` | no |
| [create\_fargate](#input\_create\_fargate) | Determines whether Fargate resources are created | `bool` | `false` | no |
| [create\_fargate\_pod\_execution\_role](#input\_create\_fargate\_pod\_execution\_role) | Controls if the EKS Fargate pod execution IAM role should be created | `bool` | `true` | no |
+| [create\_worker\_iam\_role](#input\_create\_worker\_iam\_role) | Determines whether a worker IAM role is created or to use an existing IAM role | `bool` | `true` | no |
| [default\_platform](#input\_default\_platform) | Default platform name. Valid options are `linux` and `windows` | `string` | `"linux"` | no |
| [enable\_irsa](#input\_enable\_irsa) | Whether to create OpenID Connect Provider for EKS to enable IRSA | `bool` | `false` | no |
| [fargate\_iam\_role\_path](#input\_fargate\_iam\_role\_path) | Fargate IAM role path | `string` | `null` | no |
@@ -237,18 +229,23 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
| [subnet\_ids](#input\_subnet\_ids) | A list of subnet IDs to place the EKS cluster and workers within | `list(string)` | `[]` | no |
| [tags](#input\_tags) | A map of tags to add to all resources. Tags added to launch configuration or templates override these values for ASG Tags only | `map(string)` | `{}` | no |
| [vpc\_id](#input\_vpc\_id) | ID of the VPC where the cluster and workers will be provisioned | `string` | `null` | no |
+| [worker\_additional\_policies](#input\_worker\_additional\_policies) | Additional policies to be added to workers | `list(string)` | `[]` | no |
| [worker\_ami\_name\_filter](#input\_worker\_ami\_name\_filter) | Name filter for AWS EKS worker AMI. If not provided, the latest official AMI for the specified 'cluster\_version' is used | `string` | `""` | no |
| [worker\_ami\_name\_filter\_windows](#input\_worker\_ami\_name\_filter\_windows) | Name filter for AWS EKS Windows worker AMI. If not provided, the latest official AMI for the specified 'cluster\_version' is used | `string` | `""` | no |
| [worker\_ami\_owner\_id](#input\_worker\_ami\_owner\_id) | The ID of the owner for the AMI to use for the AWS EKS workers. Valid values are an AWS account ID, 'self' (the current account), or an AWS owner alias (e.g. 'amazon', 'aws-marketplace', 'microsoft') | `string` | `"amazon"` | no |
| [worker\_ami\_owner\_id\_windows](#input\_worker\_ami\_owner\_id\_windows) | The ID of the owner for the AMI to use for the AWS EKS Windows workers. Valid values are an AWS account ID, 'self' (the current account), or an AWS owner alias (e.g. 'amazon', 'aws-marketplace', 'microsoft') | `string` | `"amazon"` | no |
| [worker\_create\_cluster\_primary\_security\_group\_rules](#input\_worker\_create\_cluster\_primary\_security\_group\_rules) | Whether to create security group rules to allow communication between pods on workers and pods using the primary cluster security group | `bool` | `false` | no |
| [worker\_create\_security\_group](#input\_worker\_create\_security\_group) | Whether to create a security group for the workers or attach the workers to `worker_security_group_id` | `bool` | `true` | no |
-| [worker\_groups](#input\_worker\_groups) | A map of maps defining worker group configurations to be defined using AWS Launch Template | `map(any)` | `{}` | no |
+| [worker\_egress\_cidrs](#input\_worker\_egress\_cidrs) | List of CIDR blocks that are permitted for workers egress traffic | `list(string)` |
[ "0.0.0.0/0" ]
| no |
+| [worker\_groups](#input\_worker\_groups) | A map of maps defining worker group configurations to be defined using AWS Launch Template | `any` | `{}` | no |
+| [worker\_iam\_role\_name](#input\_worker\_iam\_role\_name) | Name to use on worker role created | `string` | `null` | no |
+| [worker\_iam\_role\_path](#input\_worker\_iam\_role\_path) | Worker IAM role path | `string` | `null` | no |
+| [worker\_iam\_role\_permissions\_boundary](#input\_worker\_iam\_role\_permissions\_boundary) | ARN of the policy that is used to set the permissions boundary for the worker role | `string` | `null` | no |
+| [worker\_iam\_role\_tags](#input\_worker\_iam\_role\_tags) | A map of additional tags to add to the worker IAM role created | `map(string)` | `{}` | no |
+| [worker\_iam\_role\_use\_name\_prefix](#input\_worker\_iam\_role\_use\_name\_prefix) | Determines whether worker IAM role name (`worker_iam_role_name`) is used as a prefix | `string` | `true` | no |
+| [worker\_role\_name](#input\_worker\_role\_name) | User defined workers role name | `string` | `""` | no |
| [worker\_security\_group\_id](#input\_worker\_security\_group\_id) | If provided, all workers will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the EKS cluster | `string` | `""` | no |
| [worker\_sg\_ingress\_from\_port](#input\_worker\_sg\_ingress\_from\_port) | Minimum port number from which pods will accept communication. Must be changed to a lower value if some pods in your cluster will expose a port lower than 1025 (e.g. 22, 80, or 443) | `number` | `1025` | no |
-| [workers\_additional\_policies](#input\_workers\_additional\_policies) | Additional policies to be added to workers | `list(string)` | `[]` | no |
-| [workers\_egress\_cidrs](#input\_workers\_egress\_cidrs) | List of CIDR blocks that are permitted for workers egress traffic | `list(string)` |
[ "0.0.0.0/0" ]
| no |
-| [workers\_role\_name](#input\_workers\_role\_name) | User defined workers role name | `string` | `""` | no |
## Outputs
@@ -277,10 +274,4 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
| [worker\_iam\_role\_arn](#output\_worker\_iam\_role\_arn) | default IAM role ARN for EKS worker groups |
| [worker\_iam\_role\_name](#output\_worker\_iam\_role\_name) | default IAM role name for EKS worker groups |
| [worker\_security\_group\_id](#output\_worker\_security\_group\_id) | Security group ID attached to the EKS workers. |
-| [workers\_asg\_arns](#output\_workers\_asg\_arns) | IDs of the autoscaling groups containing workers. |
-| [workers\_asg\_names](#output\_workers\_asg\_names) | Names of the autoscaling groups containing workers. |
-| [workers\_launch\_template\_arns](#output\_workers\_launch\_template\_arns) | ARNs of the worker launch templates. |
-| [workers\_launch\_template\_ids](#output\_workers\_launch\_template\_ids) | IDs of the worker launch templates. |
-| [workers\_launch\_template\_latest\_versions](#output\_workers\_launch\_template\_latest\_versions) | Latest versions of the worker launch templates. |
-| [workers\_user\_data](#output\_workers\_user\_data) | User data of worker groups |
diff --git a/data.tf b/data.tf
index 4cf387da29..7995e85b90 100644
--- a/data.tf
+++ b/data.tf
@@ -2,48 +2,15 @@ data "aws_partition" "current" {}
data "aws_caller_identity" "current" {}
-data "aws_iam_policy_document" "workers_assume_role_policy" {
- statement {
- sid = "EKSWorkerAssumeRole"
-
- actions = [
- "sts:AssumeRole",
- ]
-
- principals {
- type = "Service"
- identifiers = [local.ec2_principal]
- }
- }
-}
-
data "aws_ami" "eks_worker" {
- count = contains(local.worker_groups_platforms, "linux") ? 1 : 0
+ count = var.create ? 1 : 0
filter {
name = "name"
- values = [local.worker_ami_name_filter]
+ values = [coalesce(var.worker_ami_name_filter, "amazon-eks-node-${coalesce(var.cluster_version, "cluster_version")}-v*")]
}
most_recent = true
owners = [var.worker_ami_owner_id]
}
-
-data "aws_ami" "eks_worker_windows" {
- count = contains(local.worker_groups_platforms, "windows") ? 1 : 0
-
- filter {
- name = "name"
- values = [local.worker_ami_name_filter_windows]
- }
-
- filter {
- name = "platform"
- values = ["windows"]
- }
-
- most_recent = true
-
- owners = [var.worker_ami_owner_id_windows]
-}
diff --git a/examples/launch_templates/main.tf b/examples/launch_templates/main.tf
index d9c02cb328..4a1412341f 100644
--- a/examples/launch_templates/main.tf
+++ b/examples/launch_templates/main.tf
@@ -6,6 +6,12 @@ locals {
name = "launch_template-${random_string.suffix.result}"
cluster_version = "1.20"
region = "eu-west-1"
+
+ tags = {
+ Example = local.name
+ GithubRepo = "terraform-aws-eks"
+ GithubOrg = "terraform-aws-modules"
+ }
}
################################################################################
@@ -13,64 +19,77 @@ locals {
################################################################################
module "eks" {
- source = "../.."
- cluster_name = local.name
- cluster_version = local.cluster_version
- vpc_id = module.vpc.vpc_id
- subnet_ids = module.vpc.private_subnets
+ source = "../.."
+
+ cluster_name = local.name
+ cluster_version = local.cluster_version
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_subnets
+
cluster_endpoint_private_access = true
cluster_endpoint_public_access = true
- worker_groups = {
- one = {
- name = "worker-group-1"
- instance_type = "t3.small"
- asg_desired_capacity = 2
- public_ip = true
- tags = [{
- key = "ExtraTag"
- value = "TagValue"
- propagate_at_launch = true
- }]
+ launch_templates = {
+ lt_default = {}
+ lt_two = {
+ instance_type = "t3.small"
}
- two = {
- name = "worker-group-2"
- instance_type = "t3.medium"
- asg_desired_capacity = 1
- public_ip = true
- ebs_optimized = true
- }
- three = {
- name = "worker-group-3"
- instance_type = "t2.large"
- asg_desired_capacity = 1
- public_ip = true
- elastic_inference_accelerator = "eia2.medium"
- }
- four = {
- name = "worker-group-4"
- instance_type = "t3.small"
- asg_desired_capacity = 1
- public_ip = true
- root_volume_size = 150
- root_volume_type = "gp3"
- root_volume_throughput = 300
- additional_ebs_volumes = [
- {
- block_device_name = "/dev/xvdb"
- volume_size = 100
- volume_type = "gp3"
- throughput = 150
- },
- ]
- },
}
- tags = {
- Example = local.name
- GithubRepo = "terraform-aws-eks"
- GithubOrg = "terraform-aws-modules"
+ group_default_settings = {
+ launch_template_key = "tl_default"
+ instance_type = "t3.medium"
+ }
+
+ worker_groups = {
+ # one = {
+ # name = "worker-group-1"
+ # asg_desired_capacity = 2
+ # public_ip = true
+ # tags = {
+ # ExtraTag = "TagValue"
+ # }
+ # propogated_tags = [{
+ # key = "ExtraPropgatedTag"
+ # value = "PropogatedTagValue"
+ # propagate_at_launch = false
+ # }]
+ # }
+ # two = {
+ # name = "worker-group-2"
+ # launch_template_key = "lt_two"
+ # asg_desired_capacity = 1
+ # public_ip = true
+ # ebs_optimized = true
+ # }
+ # three = {
+ # name = "worker-group-3"
+ # instance_type = "t2.large"
+ # asg_desired_capacity = 1
+ # public_ip = true
+ # elastic_inference_accelerator = "eia2.medium"
+ # }
+ # four = {
+ # name = "worker-group-4"
+ # instance_type = "t3.small"
+ # asg_desired_capacity = 1
+ # public_ip = true
+ # root_volume_size = 150
+ # root_volume_type = "gp3"
+ # root_volume_throughput = 300
+ # additional_ebs_volumes = [
+ # {
+ # block_device_name = "/dev/xvdb"
+ # volume_size = 100
+ # volume_type = "gp3"
+ # throughput = 150
+ # },
+ # ]
+ # },
}
+
+ tags = local.tags
}
################################################################################
@@ -126,9 +145,5 @@ module "vpc" {
"kubernetes.io/role/internal-elb" = "1"
}
- tags = {
- Example = local.name
- GithubRepo = "terraform-aws-eks"
- GithubOrg = "terraform-aws-modules"
- }
+ tags = local.tags
}
diff --git a/locals.tf b/locals.tf
index ee8be22388..d0fcac425e 100644
--- a/locals.tf
+++ b/locals.tf
@@ -10,12 +10,9 @@ locals {
cluster_security_group_id = var.create_cluster_security_group ? join("", aws_security_group.cluster.*.id) : var.cluster_security_group_id
# Worker groups
- worker_security_group_id = var.worker_create_security_group ? join("", aws_security_group.workers.*.id) : var.worker_security_group_id
+ worker_security_group_id = var.worker_create_security_group ? join("", aws_security_group.worker.*.id) : var.worker_security_group_id
worker_groups_platforms = [for x in var.worker_groups : try(x.platform, var.default_platform)]
- worker_ami_name_filter = coalesce(var.worker_ami_name_filter, "amazon-eks-node-${coalesce(var.cluster_version, "cluster_version")}-v*")
- worker_ami_name_filter_windows = coalesce(var.worker_ami_name_filter_windows, "Windows_Server-2019-English-Core-EKS_Optimized-${coalesce(var.cluster_version, "cluster_version")}-*")
-
ec2_principal = "ec2.${data.aws_partition.current.dns_suffix}"
sts_principal = "sts.${data.aws_partition.current.dns_suffix}"
policy_arn_prefix = "arn:${data.aws_partition.current.partition}:iam::aws:policy"
@@ -31,26 +28,26 @@ locals {
aws_authenticator_env_variables = var.kubeconfig_aws_authenticator_env_variables
}) : ""
- launch_template_userdata_rendered = [
- for key, group in(var.create ? var.worker_groups : {}) : templatefile(
- try(
- group.userdata_template_file,
- lookup(group, "platform", var.default_platform) == "windows"
- ? "${path.module}/templates/userdata_windows.tpl"
- : "${path.module}/templates/userdata.sh.tpl"
- ),
- merge({
- platform = lookup(group, "platform", var.default_platform)
- cluster_name = var.cluster_name
- endpoint = local.cluster_endpoint
- cluster_auth_base64 = local.cluster_auth_base64
- pre_userdata = lookup(group, "pre_userdata", "")
- additional_userdata = lookup(group, "additional_userdata", "")
- bootstrap_extra_args = lookup(group, "bootstrap_extra_args", "")
- kubelet_extra_args = lookup(group, "kubelet_extra_args", "")
- },
- lookup(group, "userdata_template_extra_args", "")
- )
- )
- ]
+ # launch_template_userdata_rendered = var.create ? [
+ # for key, group in var.worker_groups : templatefile(
+ # try(
+ # group.userdata_template_file,
+ # lookup(group, "platform", var.default_platform) == "windows"
+ # ? "${path.module}/templates/userdata_windows.tpl"
+ # : "${path.module}/templates/userdata.sh.tpl"
+ # ),
+ # merge({
+ # platform = lookup(group, "platform", var.default_platform)
+ # cluster_name = var.cluster_name
+ # endpoint = local.cluster_endpoint
+ # cluster_auth_base64 = local.cluster_auth_base64
+ # pre_userdata = lookup(group, "pre_userdata", "")
+ # additional_userdata = lookup(group, "additional_userdata", "")
+ # bootstrap_extra_args = lookup(group, "bootstrap_extra_args", "")
+ # kubelet_extra_args = lookup(group, "kubelet_extra_args", "")
+ # },
+ # lookup(group, "userdata_template_extra_args", "")
+ # )
+ # )
+ # ] : []
}
diff --git a/main.tf b/main.tf
index 5863e2f0c4..26278d929f 100644
--- a/main.tf
+++ b/main.tf
@@ -47,7 +47,6 @@ resource "aws_eks_cluster" "this" {
depends_on = [
aws_security_group_rule.cluster_egress_internet,
aws_security_group_rule.cluster_https_worker_ingress,
- aws_iam_role_policy_attachment.cluster_additional,
aws_cloudwatch_log_group.this
]
}
@@ -67,28 +66,30 @@ resource "aws_cloudwatch_log_group" "this" {
################################################################################
locals {
- cluster_security_group_name = try(var.cluster_security_group_name, var.cluster_name)
+ cluster_sg_name = coalesce(var.cluster_security_group_name, var.cluster_name)
+ create_cluster_sg = var.create && var.create_cluster_security_group
+ enable_cluster_private_endpoint_sg_access = local.create_cluster_sg && var.cluster_create_endpoint_private_access_sg_rule && var.cluster_endpoint_private_access
}
resource "aws_security_group" "cluster" {
- count = var.create && var.create_cluster_security_group ? 1 : 0
+ count = local.create_cluster_sg ? 1 : 0
- name = var.cluster_security_group_use_name_prefix ? null : local.cluster_security_group_name
- name_prefix = var.cluster_security_group_use_name_prefix ? "${local.cluster_security_group_name}-" : null
+ name = var.cluster_security_group_use_name_prefix ? null : local.cluster_sg_name
+ name_prefix = var.cluster_security_group_use_name_prefix ? try("${local.cluster_sg_name}-", local.cluster_sg_name) : null
description = "EKS cluster security group"
vpc_id = var.vpc_id
tags = merge(
var.tags,
{
- "Name" = local.cluster_security_group_name
+ "Name" = local.cluster_sg_name
},
var.cluster_security_group_tags
)
}
resource "aws_security_group_rule" "cluster_egress_internet" {
- count = var.create && var.create_cluster_security_group ? 1 : 0
+ count = local.create_cluster_sg ? 1 : 0
description = "Allow cluster egress access to the Internet"
protocol = "-1"
@@ -100,7 +101,7 @@ resource "aws_security_group_rule" "cluster_egress_internet" {
}
resource "aws_security_group_rule" "cluster_https_worker_ingress" {
- count = var.create && var.create_cluster_security_group && var.worker_create_security_group ? 1 : 0
+ count = local.create_cluster_sg && var.worker_create_security_group ? 1 : 0
description = "Allow pods to communicate with the EKS cluster API"
protocol = "tcp"
@@ -112,7 +113,7 @@ resource "aws_security_group_rule" "cluster_https_worker_ingress" {
}
resource "aws_security_group_rule" "cluster_private_access_cidrs_source" {
- for_each = var.create && var.cluster_create_endpoint_private_access_sg_rule && var.cluster_endpoint_private_access && var.cluster_endpoint_private_access_cidrs != null ? toset(var.cluster_endpoint_private_access_cidrs) : []
+ for_each = local.enable_cluster_private_endpoint_sg_access && var.cluster_endpoint_private_access_cidrs != null ? toset(var.cluster_endpoint_private_access_cidrs) : []
description = "Allow private K8S API ingress from custom CIDR source"
type = "ingress"
@@ -125,7 +126,7 @@ resource "aws_security_group_rule" "cluster_private_access_cidrs_source" {
}
resource "aws_security_group_rule" "cluster_private_access_sg_source" {
- count = var.create && var.cluster_create_endpoint_private_access_sg_rule && var.cluster_endpoint_private_access && var.cluster_endpoint_private_access_sg != null ? length(var.cluster_endpoint_private_access_sg) : 0
+ count = local.enable_cluster_private_endpoint_sg_access && var.cluster_endpoint_private_access_sg != null ? length(var.cluster_endpoint_private_access_sg) : 0
description = "Allow private K8S API ingress from custom Security Groups source"
type = "ingress"
@@ -167,14 +168,14 @@ resource "aws_iam_openid_connect_provider" "oidc_provider" {
################################################################################
locals {
- cluster_iam_role_name = try(var.cluster_iam_role_name, var.cluster_name)
+ cluster_iam_role_name = coalesce(var.cluster_iam_role_name, var.cluster_name)
}
resource "aws_iam_role" "cluster" {
count = var.create && var.create_cluster_iam_role ? 1 : 0
name = var.cluster_iam_role_use_name_prefix ? null : local.cluster_iam_role_name
- name_prefix = var.cluster_iam_role_use_name_prefix ? "${local.cluster_iam_role_name}-" : null
+ name_prefix = var.cluster_iam_role_use_name_prefix ? try("${local.cluster_iam_role_name}-", local.cluster_iam_role_name) : null
path = var.cluster_iam_role_path
assume_role_policy = data.aws_iam_policy_document.cluster_assume_role_policy[0].json
@@ -183,6 +184,7 @@ resource "aws_iam_role" "cluster" {
"${local.policy_arn_prefix}/AmazonEKSClusterPolicy",
"${local.policy_arn_prefix}/AmazonEKSServicePolicy",
"${local.policy_arn_prefix}/AmazonEKSVPCResourceController",
+ aws_iam_policy.cluster_additional[0].arn,
]
force_detach_policies = true
@@ -233,16 +235,9 @@ resource "aws_iam_policy" "cluster_additional" {
count = var.create && var.create_cluster_iam_role ? 1 : 0
name = var.cluster_iam_role_use_name_prefix ? null : local.cluster_iam_role_name
- name_prefix = var.cluster_iam_role_use_name_prefix ? "${local.cluster_iam_role_name}-" : null
+ name_prefix = var.cluster_iam_role_use_name_prefix ? try("${local.cluster_iam_role_name}-", local.cluster_iam_role_name) : null
description = "Additional permissions for EKS cluster"
policy = data.aws_iam_policy_document.cluster_additional[0].json
tags = merge(var.tags, var.cluster_iam_role_tags)
}
-
-resource "aws_iam_role_policy_attachment" "cluster_additional" {
- count = var.create && var.create_cluster_iam_role ? 1 : 0
-
- role = aws_iam_role.cluster[0].name
- policy_arn = aws_iam_policy.cluster_additional[0].arn
-}
diff --git a/outputs.tf b/outputs.tf
index e87515ca8b..68e0974837 100644
--- a/outputs.tf
+++ b/outputs.tf
@@ -63,20 +63,15 @@ output "oidc_provider_arn" {
value = var.enable_irsa ? concat(aws_iam_openid_connect_provider.oidc_provider[*].arn, [""])[0] : null
}
-output "workers_asg_arns" {
- description = "IDs of the autoscaling groups containing workers."
- value = aws_autoscaling_group.this.*.arn
-}
-
-output "workers_asg_names" {
- description = "Names of the autoscaling groups containing workers."
- value = aws_autoscaling_group.this.*.id
-}
+# output "workers_asg_arns" {
+# description = "IDs of the autoscaling groups containing workers."
+# value = aws_autoscaling_group.this.*.arn
+# }
-output "workers_user_data" {
- description = "User data of worker groups"
- value = local.launch_template_userdata_rendered
-}
+# output "workers_asg_names" {
+# description = "Names of the autoscaling groups containing workers."
+# value = aws_autoscaling_group.this.*.id
+# }
# output "workers_default_ami_id" {
# description = "ID of the default worker group AMI"
@@ -88,20 +83,20 @@ output "workers_user_data" {
# value = local.default_ami_id_windows
# }
-output "workers_launch_template_ids" {
- description = "IDs of the worker launch templates."
- value = aws_launch_template.this.*.id
-}
+# output "workers_launch_template_ids" {
+# description = "IDs of the worker launch templates."
+# value = aws_launch_template.this.*.id
+# }
-output "workers_launch_template_arns" {
- description = "ARNs of the worker launch templates."
- value = aws_launch_template.this.*.arn
-}
+# output "workers_launch_template_arns" {
+# description = "ARNs of the worker launch templates."
+# value = aws_launch_template.this.*.arn
+# }
-output "workers_launch_template_latest_versions" {
- description = "Latest versions of the worker launch templates."
- value = aws_launch_template.this.*.latest_version
-}
+# output "workers_launch_template_latest_versions" {
+# description = "Latest versions of the worker launch templates."
+# value = aws_launch_template.this.*.latest_version
+# }
output "worker_security_group_id" {
description = "Security group ID attached to the EKS workers."
@@ -110,22 +105,22 @@ output "worker_security_group_id" {
output "worker_iam_instance_profile_arns" {
description = "default IAM instance profile ARN for EKS worker groups"
- value = aws_iam_instance_profile.workers.*.arn
+ value = aws_iam_instance_profile.worker.*.arn
}
output "worker_iam_instance_profile_names" {
description = "default IAM instance profile name for EKS worker groups"
- value = aws_iam_instance_profile.workers.*.name
+ value = aws_iam_instance_profile.worker.*.name
}
output "worker_iam_role_name" {
description = "default IAM role name for EKS worker groups"
- value = try(aws_iam_role.workers[0].name, "")
+ value = try(aws_iam_role.worker[0].name, "")
}
output "worker_iam_role_arn" {
description = "default IAM role ARN for EKS worker groups"
- value = try(aws_iam_role.workers[0].arn, "")
+ value = try(aws_iam_role.worker[0].arn, "")
}
output "fargate_profile_ids" {
diff --git a/variables.tf b/variables.tf
index 7151f2a824..c9ef5c0241 100644
--- a/variables.tf
+++ b/variables.tf
@@ -198,6 +198,46 @@ variable "cluster_iam_role_tags" {
default = {}
}
+################################################################################
+# Workers IAM Role
+################################################################################
+
+variable "create_worker_iam_role" {
+ description = "Determines whether a worker IAM role is created or to use an existing IAM role"
+ type = bool
+ default = true
+}
+
+variable "worker_iam_role_name" {
+ description = "Name to use on worker role created"
+ type = string
+ default = null
+}
+
+variable "worker_iam_role_use_name_prefix" {
+ description = "Determines whether worker IAM role name (`worker_iam_role_name`) is used as a prefix"
+ type = string
+ default = true
+}
+
+variable "worker_iam_role_path" {
+ description = "Worker IAM role path"
+ type = string
+ default = null
+}
+
+variable "worker_iam_role_permissions_boundary" {
+ description = "ARN of the policy that is used to set the permissions boundary for the worker role"
+ type = string
+ default = null
+}
+
+variable "worker_iam_role_tags" {
+ description = "A map of additional tags to add to the worker IAM role created"
+ type = map(string)
+ default = {}
+}
+
################################################################################
# Fargate
################################################################################
@@ -274,7 +314,7 @@ variable "launch_templates" {
variable "worker_groups" {
description = "A map of maps defining worker group configurations to be defined using AWS Launch Template"
- type = map(any)
+ type = any
default = {}
}
@@ -326,7 +366,7 @@ variable "worker_sg_ingress_from_port" {
default = 1025
}
-variable "workers_additional_policies" {
+variable "worker_additional_policies" {
description = "Additional policies to be added to workers"
type = list(string)
default = []
@@ -419,7 +459,7 @@ variable "manage_worker_iam_resources" {
default = true
}
-variable "workers_role_name" {
+variable "worker_role_name" {
description = "User defined workers role name"
type = string
default = ""
@@ -449,7 +489,7 @@ variable "cluster_egress_cidrs" {
default = ["0.0.0.0/0"]
}
-variable "workers_egress_cidrs" {
+variable "worker_egress_cidrs" {
description = "List of CIDR blocks that are permitted for workers egress traffic"
type = list(string)
default = ["0.0.0.0/0"]
diff --git a/versions.tf b/versions.tf
index 9ed32587e6..4e574b1aa9 100644
--- a/versions.tf
+++ b/versions.tf
@@ -6,22 +6,10 @@ terraform {
source = "hashicorp/aws"
version = ">= 3.56.0"
}
- local = {
- source = "hashicorp/local"
- version = ">= 1.4.0"
- }
- kubernetes = {
- source = "hashicorp/kubernetes"
- version = ">= 1.11.1"
- }
cloudinit = {
source = "hashicorp/cloudinit"
version = ">= 2.0.0"
}
- http = {
- source = "terraform-aws-modules/http"
- version = ">= 2.4.1"
- }
tls = {
source = "hashicorp/tls"
version = ">= 2.2.0"
diff --git a/workers.tf b/workers.tf
index 142bf0ac08..b9cc3654bd 100644
--- a/workers.tf
+++ b/workers.tf
@@ -41,12 +41,15 @@ resource "aws_launch_template" "this" {
description = try(each.value.description, var.group_default_settings.description, null)
ebs_optimized = try(each.value.ebs_optimized, var.group_default_settings.ebs_optimized, null)
- image_id = try(each.value.image_id, var.group_default_settings.image_id, null)
+ image_id = try(each.value.image_id, var.group_default_settings.image_id, data.aws_ami.eks_worker[0].image_id)
instance_type = try(each.value.instance_type, var.group_default_settings.instance_type, "m6i.large")
key_name = try(each.value.key_name, var.group_default_settings.key_name, null)
user_data = try(each.value.user_data, var.group_default_settings.user_data, null)
- vpc_security_group_ids = try(each.value.vpc_security_group_ids, var.group_default_settings.vpc_security_group_ids, null)
+ vpc_security_group_ids = compact(concat(
+ [try(aws_security_group.worker[0].id, "")],
+ try(each.value.vpc_security_group_ids, var.group_default_settings.vpc_security_group_ids, [])
+ ))
default_version = try(each.value.default_version, var.group_default_settings.default_version, null)
update_default_version = try(each.value.update_default_version, var.group_default_settings.update_default_version, null)
@@ -135,17 +138,12 @@ resource "aws_launch_template" "this" {
}
}
- # iam_instance_profile {
- # name = coalescelist(
- # aws_iam_instance_profile.workers.*.name,
- # data.aws_iam_instance_profile.custom_worker_group_iam_instance_profile.*.name,
- # )[count.index]
- # }
- # TODO - oy
dynamic "iam_instance_profile" {
- for_each = try([each.value.iam_instance_profile_name], [])
+ for_each = [{
+ "arn" = try(each.value.iam_instance_profile_arn, aws_iam_instance_profile.worker[0].arn, {})
+ }]
content {
- name = each.value
+ arn = lookup(iam_instance_profile.value, "arn", null)
}
}
@@ -223,53 +221,50 @@ resource "aws_launch_template" "this" {
}
}
- tag_specifications {
- resource_type = "volume"
- tags = merge(
- { "Name" = "${aws_eks_cluster.this[0].name}-${try(each.value.name, each.key)}-eks_asg" },
- var.tags,
- { for tag in lookup(each.value, "tags", {}) : tag["key"] => tag["value"] if tag["key"] != "Name" && tag["propagate_at_launch"] }
- )
- }
+ # tag_specifications {
+ # resource_type = "volume"
+ # tags = merge(
+ # var.tags,
+ # lookup(each.value, "tags", {}),
+ # { "Name" = try(each.value.name, "${aws_eks_cluster.this[0].name}-${each.key}") }
+ # )
+ # }
- tag_specifications {
- resource_type = "instance"
- tags = merge(
- { "Name" = "${aws_eks_cluster.this[0].name}-${try(each.value.name, each.key)}-eks_asg" },
- { for tag_key, tag_value in var.tags :
- tag_key => tag_value
- if tag_key != "Name" && !contains([for tag in lookup(each.value, "tags", {}) : tag["key"]], tag_key)
- }
- )
- }
+ # tag_specifications {
+ # resource_type = "instance"
+ # tags = merge(
+ # var.tags,
+ # lookup(each.value, "tags", {}),
+ # { "Name" = try(each.value.name, "${aws_eks_cluster.this[0].name}-${each.key}") }
+ # )
+ # }
- tag_specifications {
- resource_type = "network-interface"
- tags = merge(
- { "Name" = "${aws_eks_cluster.this[0].name}-${try(each.value.name, each.key)}-eks_asg" },
- var.tags,
- { for tag in lookup(each.value, "tags", {}) : tag["key"] => tag["value"] if tag["key"] != "Name" && tag["propagate_at_launch"] }
- )
- }
+ # tag_specifications {
+ # resource_type = "network-interface"
+ # tags = merge(
+ # var.tags,
+ # lookup(each.value, "tags", {}),
+ # { "Name" = try(each.value.name, "${aws_eks_cluster.this[0].name}-${each.key}") }
+ # )
+ # }
# Prevent premature access of security group roles and policies by pods that
- # require permissions on create/destroy that depend on workers.
+ # require permissions on create/destroy that depend on worker.
depends_on = [
- aws_security_group_rule.workers_egress_internet,
- aws_security_group_rule.workers_ingress_self,
- aws_security_group_rule.workers_ingress_cluster,
- aws_security_group_rule.workers_ingress_cluster_kubelet,
- aws_security_group_rule.workers_ingress_cluster_https,
- aws_security_group_rule.workers_ingress_cluster_primary,
- aws_security_group_rule.cluster_primary_ingress_workers,
- aws_iam_role_policy_attachment.workers_additional_policies
+ aws_security_group_rule.worker_egress_internet,
+ aws_security_group_rule.worker_ingress_self,
+ aws_security_group_rule.worker_ingress_cluster,
+ aws_security_group_rule.worker_ingress_cluster_kubelet,
+ aws_security_group_rule.worker_ingress_cluster_https,
+ aws_security_group_rule.worker_ingress_cluster_primary,
+ aws_security_group_rule.cluster_primary_ingress_worker,
]
lifecycle {
create_before_destroy = true
}
- tags = merge(var.tags, lookup(each.value, "tags", {}))
+ # tags = merge(var.tags, lookup(each.value, "tags", {}))
}
################################################################################
@@ -277,7 +272,7 @@ resource "aws_launch_template" "this" {
################################################################################
-# resource "aws_eks_node_group" "workers" {
+# resource "aws_eks_node_group" "worker" {
# for_each = var.create : var.node_groups : {}
# node_group_name_prefix = lookup(each.value, "name", null) == null ? local.node_groups_names[each.key] : null
@@ -285,7 +280,7 @@ resource "aws_launch_template" "this" {
# cluster_name = var.cluster_name
# node_role_arn = try(each.value.iam_role_arn, var.default_iam_role_arn)
-# subnet_ids = each.value["subnet_ids"]
+# subnet_ids = coalescelist(each.value["subnet_ids"], var.subnet_ids, [""])
# scaling_config {
# desired_size = each.value["desired_capacity"]
@@ -371,17 +366,23 @@ resource "aws_launch_template" "this" {
################################################################################
resource "aws_autoscaling_group" "this" {
- for_each = var.create ? var.worker_groups : {}
+ for_each = var.create ? var.worker_groups : object({})
name_prefix = "${join("-", [aws_eks_cluster.this[0].name, try(each.value.name, each.key)])}-"
launch_template {
- name = each.value.launch_template_key # required
+ name = try(
+ aws_launch_template.this[each.value.launch_template_key].name,
+ each.value.launch_template_name,
+ # defaults should be last
+ aws_launch_template.this[var.group_default_settings.launch_template_key].name,
+ var.group_default_settings.launch_template_name,
+ )
version = try(each.value.launch_template_version, var.group_default_settings.launch_template_version, "$Latest")
}
availability_zones = try(each.value.availability_zones, var.group_default_settings.availability_zones, null)
- vpc_zone_identifier = try(each.value.vpc_zone_identifier, var.group_default_settings.vpc_zone_identifier, null)
+ vpc_zone_identifier = try(each.value.vpc_zone_identifier, var.group_default_settings.vpc_zone_identifier, var.subnet_ids)
min_size = try(each.value.min_size, local.group_default_settings.min_size)
max_size = try(each.value.max_size, local.group_default_settings.max_size)
@@ -398,7 +399,7 @@ resource "aws_autoscaling_group" "this" {
force_delete = try(each.value.force_delete, var.group_default_settings.force_delete, false)
termination_policies = try(each.value.termination_policies, var.group_default_settings.termination_policies, null)
- suspended_processes = try(each.value.suspended_processes, var.group_default_settings.suspended_processes, "AZRebalance")
+ suspended_processes = try(each.value.suspended_processes, var.group_default_settings.suspended_processes, ["AZRebalance"])
max_instance_lifetime = try(each.value.max_instance_lifetime, var.group_default_settings.max_instance_lifetime, null)
enabled_metrics = try(each.value.enabled_metrics, var.group_default_settings.enabled_metrics, null)
@@ -504,17 +505,21 @@ resource "aws_autoscaling_group" "this" {
"value" = "owned"
"propagate_at_launch" = true
},
+ {
+ "key" = "k8s.io/cluster/${aws_eks_cluster.this[0].name}"
+ "value" = "owned"
+ "propagate_at_launch" = true
+ },
],
[
- for tag_key, tag_value in var.tags :
+ for k, v in merge(var.tags, lookup(each.value, "tags", {})) :
tomap({
- key = tag_key
- value = tag_value
+ key = k
+ value = v
propagate_at_launch = true
})
- if tag_key != "Name" && !contains([for tag in lookup(each.value, "tags", []) : tag["key"]], tag_key)
],
- lookup(each.value, "tags", {})
+ lookup(each.value, "propogated_tags", [])
)
content {
key = tag.value.key
@@ -527,65 +532,66 @@ resource "aws_autoscaling_group" "this" {
create_before_destroy = true
ignore_changes = [desired_capacity]
}
+
+ depends_on = [
+ aws_launch_template.this
+ ]
}
################################################################################
# IAM Role & Instance Profile
################################################################################
-resource "aws_iam_role" "workers" {
- count = var.manage_worker_iam_resources && var.create ? 1 : 0
-
- name_prefix = var.workers_role_name != "" ? null : aws_eks_cluster.this[0].name
- name = var.workers_role_name != "" ? var.workers_role_name : null
- assume_role_policy = data.aws_iam_policy_document.workers_assume_role_policy.json
- permissions_boundary = var.permissions_boundary
- path = var.iam_path
- force_detach_policies = true
-
- tags = var.tags
+locals {
+ worker_iam_role_name = coalesce(var.worker_iam_role_name, var.cluster_name)
}
-resource "aws_iam_instance_profile" "workers" {
- count = var.create && var.manage_worker_iam_resources ? 1 : 0
+resource "aws_iam_role" "worker" {
+ count = var.create && var.create_worker_iam_role ? 1 : 0
- name_prefix = aws_eks_cluster.this[0].name
- role = aws_iam_role.workers[0].id
- path = var.iam_path
+ name = var.worker_iam_role_use_name_prefix ? null : local.worker_iam_role_name
+ name_prefix = var.worker_iam_role_use_name_prefix ? try("${local.worker_iam_role_name}-", local.worker_iam_role_name) : null
+ path = var.worker_iam_role_path
- lifecycle {
- create_before_destroy = true
- }
+ assume_role_policy = data.aws_iam_policy_document.worker_assume_role_policy[0].json
+ permissions_boundary = var.worker_iam_role_permissions_boundary
+ managed_policy_arns = compact(distinct(concat([
+ "${local.policy_arn_prefix}/AmazonEKSWorkerNodePolicy",
+ "${local.policy_arn_prefix}/AmazonEC2ContainerRegistryReadOnly",
+ var.attach_worker_cni_policy ? "${local.policy_arn_prefix}/AmazonEKS_CNI_Policy" : "",
+ ], var.worker_additional_policies)))
+ force_detach_policies = true
- tags = var.tags
+ tags = merge(var.tags, var.worker_iam_role_tags)
}
-resource "aws_iam_role_policy_attachment" "workers_AmazonEKSWorkerNodePolicy" {
- count = var.create && var.manage_worker_iam_resources ? 1 : 0
+data "aws_iam_policy_document" "worker_assume_role_policy" {
+ count = var.create && var.create_worker_iam_role ? 1 : 0
- policy_arn = "${local.policy_arn_prefix}/AmazonEKSWorkerNodePolicy"
- role = aws_iam_role.workers[0].name
-}
-
-resource "aws_iam_role_policy_attachment" "workers_AmazonEKS_CNI_Policy" {
- count = var.create && var.manage_worker_iam_resources && var.attach_worker_cni_policy ? 1 : 0
+ statement {
+ sid = "EKSWorkerAssumeRole"
+ actions = ["sts:AssumeRole"]
- policy_arn = "${local.policy_arn_prefix}/AmazonEKS_CNI_Policy"
- role = aws_iam_role.workers[0].name
+ principals {
+ type = "Service"
+ identifiers = [local.ec2_principal]
+ }
+ }
}
-resource "aws_iam_role_policy_attachment" "workers_AmazonEC2ContainerRegistryReadOnly" {
- count = var.create && var.manage_worker_iam_resources ? 1 : 0
+resource "aws_iam_instance_profile" "worker" {
+ count = var.create && var.create_worker_iam_role ? 1 : 0
- policy_arn = "${local.policy_arn_prefix}/AmazonEC2ContainerRegistryReadOnly"
- role = aws_iam_role.workers[0].name
-}
+ name = var.worker_iam_role_use_name_prefix ? null : local.worker_iam_role_name
+ name_prefix = var.worker_iam_role_use_name_prefix ? try("${local.worker_iam_role_name}-", local.worker_iam_role_name) : null
+ path = var.worker_iam_role_path
+ role = aws_iam_role.worker[0].id
-resource "aws_iam_role_policy_attachment" "workers_additional_policies" {
- for_each = var.create && var.manage_worker_iam_resources ? toset(var.workers_additional_policies) : []
+ lifecycle {
+ create_before_destroy = true
+ }
- role = aws_iam_role.workers[0].name
- policy_arn = each.value
+ tags = merge(var.tags, var.worker_iam_role_tags)
}
################################################################################
@@ -596,7 +602,7 @@ locals {
create_worker_sg = var.create && var.worker_create_security_group
}
-resource "aws_security_group" "workers" {
+resource "aws_security_group" "worker" {
count = local.create_worker_sg ? 1 : 0
name_prefix = var.cluster_name
@@ -611,19 +617,19 @@ resource "aws_security_group" "workers" {
)
}
-resource "aws_security_group_rule" "workers_egress_internet" {
+resource "aws_security_group_rule" "worker_egress_internet" {
count = local.create_worker_sg ? 1 : 0
description = "Allow nodes all egress to the Internet."
protocol = "-1"
security_group_id = local.worker_security_group_id
- cidr_blocks = var.workers_egress_cidrs
+ cidr_blocks = var.worker_egress_cidrs
from_port = 0
to_port = 0
type = "egress"
}
-resource "aws_security_group_rule" "workers_ingress_self" {
+resource "aws_security_group_rule" "worker_ingress_self" {
count = local.create_worker_sg ? 1 : 0
description = "Allow node to communicate with each other."
@@ -635,10 +641,10 @@ resource "aws_security_group_rule" "workers_ingress_self" {
type = "ingress"
}
-resource "aws_security_group_rule" "workers_ingress_cluster" {
+resource "aws_security_group_rule" "worker_ingress_cluster" {
count = local.create_worker_sg ? 1 : 0
- description = "Allow workers pods to receive communication from the cluster control plane."
+ description = "Allow worker pods to receive communication from the cluster control plane."
protocol = "tcp"
security_group_id = local.worker_security_group_id
source_security_group_id = local.cluster_security_group_id
@@ -647,10 +653,10 @@ resource "aws_security_group_rule" "workers_ingress_cluster" {
type = "ingress"
}
-resource "aws_security_group_rule" "workers_ingress_cluster_kubelet" {
+resource "aws_security_group_rule" "worker_ingress_cluster_kubelet" {
count = local.create_worker_sg ? var.worker_sg_ingress_from_port > 10250 ? 1 : 0 : 0
- description = "Allow workers Kubelets to receive communication from the cluster control plane."
+ description = "Allow worker Kubelets to receive communication from the cluster control plane."
protocol = "tcp"
security_group_id = local.worker_security_group_id
source_security_group_id = local.cluster_security_group_id
@@ -659,7 +665,7 @@ resource "aws_security_group_rule" "workers_ingress_cluster_kubelet" {
type = "ingress"
}
-resource "aws_security_group_rule" "workers_ingress_cluster_https" {
+resource "aws_security_group_rule" "worker_ingress_cluster_https" {
count = local.create_worker_sg ? 1 : 0
description = "Allow pods running extension API servers on port 443 to receive communication from cluster control plane."
@@ -671,10 +677,10 @@ resource "aws_security_group_rule" "workers_ingress_cluster_https" {
type = "ingress"
}
-resource "aws_security_group_rule" "workers_ingress_cluster_primary" {
+resource "aws_security_group_rule" "worker_ingress_cluster_primary" {
count = local.create_worker_sg && var.worker_create_cluster_primary_security_group_rules ? 1 : 0
- description = "Allow pods running on workers to receive communication from cluster primary security group (e.g. Fargate pods)."
+ description = "Allow pods running on worker to receive communication from cluster primary security group (e.g. Fargate pods)."
protocol = "all"
security_group_id = local.worker_security_group_id
source_security_group_id = try(aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id, "")
@@ -683,10 +689,10 @@ resource "aws_security_group_rule" "workers_ingress_cluster_primary" {
type = "ingress"
}
-resource "aws_security_group_rule" "cluster_primary_ingress_workers" {
+resource "aws_security_group_rule" "cluster_primary_ingress_worker" {
count = local.create_worker_sg && var.worker_create_cluster_primary_security_group_rules ? 1 : 0
- description = "Allow pods running on workers to send communication to cluster primary security group (e.g. Fargate pods)."
+ description = "Allow pods running on worker to send communication to cluster primary security group (e.g. Fargate pods)."
protocol = "all"
security_group_id = aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id
source_security_group_id = local.worker_security_group_id
From 7b9e968c391ea1f89decf1d9b8de7572be86aafa Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Tue, 9 Nov 2021 13:40:07 -0500
Subject: [PATCH 11/83] chore: back to sub-modules we shall go!
---
modules/eks-managed-node-group/README.md | 113 +++++
.../eks-managed-node-group/launch_template.tf | 146 ++++++
modules/eks-managed-node-group/locals.tf | 51 ++
modules/eks-managed-node-group/main.tf | 105 ++++
modules/eks-managed-node-group/outputs.tf | 14 +
.../templates/userdata.sh.tpl | 34 ++
modules/eks-managed-node-group/variables.tf | 71 +++
modules/eks-managed-node-group/versions.tf | 8 +
modules/self-managed-node-group/README.md | 54 +++
modules/self-managed-node-group/main.tf | 380 +++++++++++++++
modules/self-managed-node-group/outputs.tf | 96 ++++
modules/self-managed-node-group/variables.tf | 449 ++++++++++++++++++
modules/self-managed-node-group/versions.tf | 15 +
13 files changed, 1536 insertions(+)
create mode 100644 modules/eks-managed-node-group/README.md
create mode 100644 modules/eks-managed-node-group/launch_template.tf
create mode 100644 modules/eks-managed-node-group/locals.tf
create mode 100644 modules/eks-managed-node-group/main.tf
create mode 100644 modules/eks-managed-node-group/outputs.tf
create mode 100644 modules/eks-managed-node-group/templates/userdata.sh.tpl
create mode 100644 modules/eks-managed-node-group/variables.tf
create mode 100644 modules/eks-managed-node-group/versions.tf
create mode 100644 modules/self-managed-node-group/README.md
create mode 100644 modules/self-managed-node-group/main.tf
create mode 100644 modules/self-managed-node-group/outputs.tf
create mode 100644 modules/self-managed-node-group/variables.tf
create mode 100644 modules/self-managed-node-group/versions.tf
diff --git a/modules/eks-managed-node-group/README.md b/modules/eks-managed-node-group/README.md
new file mode 100644
index 0000000000..cbfaabc97f
--- /dev/null
+++ b/modules/eks-managed-node-group/README.md
@@ -0,0 +1,113 @@
+# EKS Managed Node Group Module
+
+Helper submodule to create and manage resources related to `eks_node_groups`.
+
+## Node Groups' IAM Role
+
+The role ARN specified in `var.default_iam_role_arn` will be used by default. In a simple configuration this will be the worker role created by the parent module.
+
+`iam_role_arn` must be specified in either `var.node_groups_defaults` or `var.node_groups` if the default parent IAM role is not being created for whatever reason, for example if `manage_worker_iam_resources` is set to false in the parent.
+
+## `node_groups` and `node_groups_defaults` keys
+`node_groups_defaults` is a map that can take the below keys. Values will be used if not specified in individual node groups.
+
+`node_groups` is a map of maps. Key of first level will be used as unique value for `for_each` resources and in the `aws_eks_node_group` name. Inner map can take the below values.
+
+| Name | Description | Type | If unset |
+|------|-------------|:----:|:-----:|
+| additional\_tags | Additional tags to apply to node group | map(string) | Only `var.tags` applied |
+| ami\_release\_version | AMI version of workers | string | Provider default behavior |
+| ami\_type | AMI Type. See Terraform or AWS docs | string | Provider default behavior |
+| ami\_id | ID of custom AMI. If you use a custom AMI, you need to set `ami_is_eks_optimized` | string | Provider default behavior |
+| ami\_is\_eks\_optimized | If the custom AMI is an EKS optimised image, ignored if `ami_id` is not set. If this is `true` then `bootstrap.sh` is called automatically (max pod logic needs to be manually set), if this is `false` you need to provide all the node configuration in `pre_userdata` | bool | `true` |
+| capacity\_type | Type of instance capacity to provision. Options are `ON_DEMAND` and `SPOT` | string | Provider default behavior |
+| create_launch_template | Create and use a default launch template | bool | `false` |
+| desired\_capacity | Desired number of workers | number | `var.workers_group_defaults[asg_desired_capacity]` |
+| disk\_encrypted | Whether the root disk will be encrypyted. Requires `create_launch_template` to be `true` and `disk_kms_key_id` to be set | bool | false |
+| disk\_kms\_key\_id | KMS Key used to encrypt the root disk. Requires both `create_launch_template` and `disk_encrypted` to be `true` | string | "" |
+| disk\_size | Workers' disk size | number | Provider default behavior |
+| disk\_type | Workers' disk type. Require `create_launch_template` to be `true`| string | Provider default behavior |
+| disk\_throughput | Workers' disk throughput. Require `create_launch_template` to be `true` and `disk_type` to be `gp3`| number | Provider default behavior |
+| disk\_iops | Workers' disk IOPS. Require `create_launch_template` to be `true` and `disk_type` to be `gp3`| number | Provider default behavior |
+| ebs\_optimized | Enables/disables EBS optimization. Require `create_launch_template` to be `true` | bool | `true` if defined `instance\_types` are not present in `var.ebs\_optimized\_not\_supported` |
+| enable_monitoring | Enables/disables detailed monitoring. Require `create_launch_template` to be `true`| bool | `true` |
+| eni_delete | Delete the Elastic Network Interface (ENI) on termination (if set to false you will have to manually delete before destroying) | bool | `true` |
+| force\_update\_version | Force version update if existing pods are unable to be drained due to a pod disruption budget issue. | bool | Provider default behavior |
+| iam\_role\_arn | IAM role ARN for workers | string | `var.default_iam_role_arn` |
+| instance\_types | Node group's instance type(s). Multiple types can be specified when `capacity_type="SPOT"`. | list | `[var.workers_group_defaults[instance_type]]` |
+| k8s\_labels | Kubernetes labels | map(string) | No labels applied |
+| key\_name | Key name for workers. Set to empty string to disable remote access | string | `var.workers_group_defaults[key_name]` |
+| bootstrap_env | Provide environment variables to customise [bootstrap.sh](https://github.com/awslabs/amazon-eks-ami/blob/master/files/bootstrap.sh). Require `create_launch_template` to be `true` | map(string) | `{}` |
+| kubelet_extra_args | Extra arguments for kubelet, this is automatically merged with `labels`. Require `create_launch_template` to be `true` | string | "" |
+| launch_template_id | The id of a aws_launch_template to use | string | No LT used |
+| launch\_template_version | The version of the LT to use | string | none |
+| max\_capacity | Max number of workers | number | `var.workers_group_defaults[asg_max_size]` |
+| min\_capacity | Min number of workers | number | `var.workers_group_defaults[asg_min_size]` |
+| update_config.max\_unavailable\_percentage | Max percentage of unavailable nodes during update. (e.g. 25, 50, etc) | number | `null` if `update_config.max_unavailable` is set |
+| update_config.max\_unavailable | Max number of unavailable nodes during update | number | `null` if `update_config.max_unavailable_percentage` is set |
+| name | Name of the node group. If you don't really need this, we recommend you to use `name_prefix` instead. | string | Will use the autogenerate name prefix |
+| name_prefix | Name prefix of the node group | string | Auto generated |
+| pre_userdata | userdata to pre-append to the default userdata. Require `create_launch_template` to be `true`| string | "" |
+| public_ip | Associate a public ip address with a worker. Require `create_launch_template` to be `true`| string | `false`
+| source\_security\_group\_ids | Source security groups for remote access to workers | list(string) | If key\_name is specified: THE REMOTE ACCESS WILL BE OPENED TO THE WORLD |
+| subnets | Subnets to contain workers | list(string) | `var.workers_group_defaults[subnets]` |
+| version | Kubernetes version | string | Provider default behavior |
+| taints | Kubernetes node taints | list(map) | empty |
+| timeouts | A map of timeouts for create/update/delete operations. | `map(string)` | Provider default behavior |
+| update_default_version | Whether or not to set the new launch template version the Default | bool | `true` |
+| metadata_http_endpoint | The state of the instance metadata service. Requires `create_launch_template` to be `true` | string | `var.workers_group_defaults[metadata_http_endpoint]` |
+| metadata_http_tokens | If session tokens are required. Requires `create_launch_template` to be `true` | string | `var.workers_group_defaults[metadata_http_tokens]` |
+| metadata_http_put_response_hop_limit | The desired HTTP PUT response hop limit for instance metadata requests. Requires `create_launch_template` to be `true` | number | `var.workers_group_defaults[metadata_http_put_response_hop_limit]` |
+
+
+## Requirements
+
+| Name | Version |
+|------|---------|
+| [terraform](#requirement\_terraform) | >= 0.13.1 |
+| [aws](#requirement\_aws) | >= 3.56.0 |
+| [cloudinit](#requirement\_cloudinit) | >= 2.0 |
+
+## Providers
+
+| Name | Version |
+|------|---------|
+| [aws](#provider\_aws) | >= 3.56.0 |
+| [cloudinit](#provider\_cloudinit) | >= 2.0 |
+
+## Modules
+
+No modules.
+
+## Resources
+
+| Name | Type |
+|------|------|
+| [aws_eks_node_group.workers](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_node_group) | resource |
+| [aws_launch_template.workers](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource |
+| [cloudinit_config.workers_userdata](https://registry.terraform.io/providers/hashicorp/cloudinit/latest/docs/data-sources/config) | data source |
+
+## Inputs
+
+| Name | Description | Type | Default | Required |
+|------|-------------|------|---------|:--------:|
+| [cluster\_auth\_base64](#input\_cluster\_auth\_base64) | Base64 encoded CA of parent cluster | `string` | `""` | no |
+| [cluster\_endpoint](#input\_cluster\_endpoint) | Endpoint of parent cluster | `string` | `""` | no |
+| [cluster\_name](#input\_cluster\_name) | Name of parent cluster | `string` | `""` | no |
+| [create\_eks](#input\_create\_eks) | Controls if EKS resources should be created (it affects almost all resources) | `bool` | `true` | no |
+| [default\_iam\_role\_arn](#input\_default\_iam\_role\_arn) | ARN of the default IAM worker role to use if one is not specified in `var.node_groups` or `var.node_groups_defaults` | `string` | `""` | no |
+| [ebs\_optimized\_not\_supported](#input\_ebs\_optimized\_not\_supported) | List of instance types that do not support EBS optimization | `list(string)` | `[]` | no |
+| [node\_groups](#input\_node\_groups) | Map of maps of `eks_node_groups` to create. See "`node_groups` and `node_groups_defaults` keys" section in README.md for more details | `any` | `{}` | no |
+| [node\_groups\_defaults](#input\_node\_groups\_defaults) | map of maps of node groups to create. See "`node_groups` and `node_groups_defaults` keys" section in README.md for more details | `any` | `{}` | no |
+| [tags](#input\_tags) | A map of tags to add to all resources | `map(string)` | `{}` | no |
+| [worker\_additional\_security\_group\_ids](#input\_worker\_additional\_security\_group\_ids) | A list of additional security group ids to attach to worker instances | `list(string)` | `[]` | no |
+| [worker\_security\_group\_id](#input\_worker\_security\_group\_id) | If provided, all workers will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the EKS cluster. | `string` | `""` | no |
+| [workers\_group\_defaults](#input\_workers\_group\_defaults) | Workers group defaults from parent | `any` | `{}` | no |
+
+## Outputs
+
+| Name | Description |
+|------|-------------|
+| [aws\_auth\_roles](#output\_aws\_auth\_roles) | Roles for use in aws-auth ConfigMap |
+| [node\_groups](#output\_node\_groups) | Outputs from EKS node groups. Map of maps, keyed by `var.node_groups` keys. See `aws_eks_node_group` Terraform documentation for values |
+
diff --git a/modules/eks-managed-node-group/launch_template.tf b/modules/eks-managed-node-group/launch_template.tf
new file mode 100644
index 0000000000..6abe358d5a
--- /dev/null
+++ b/modules/eks-managed-node-group/launch_template.tf
@@ -0,0 +1,146 @@
+data "cloudinit_config" "workers_userdata" {
+ for_each = { for k, v in local.node_groups_expanded : k => v if v["create_launch_template"] }
+
+ gzip = false
+ base64_encode = true
+ boundary = "//"
+
+ part {
+ content_type = "text/x-shellscript"
+ content = templatefile("${path.module}/templates/userdata.sh.tpl",
+ {
+ cluster_name = var.cluster_name
+ cluster_endpoint = var.cluster_endpoint
+ cluster_auth_base64 = var.cluster_auth_base64
+ ami_id = lookup(each.value, "ami_id", "")
+ ami_is_eks_optimized = each.value["ami_is_eks_optimized"]
+ bootstrap_env = each.value["bootstrap_env"]
+ kubelet_extra_args = each.value["kubelet_extra_args"]
+ pre_userdata = each.value["pre_userdata"]
+ capacity_type = lookup(each.value, "capacity_type", "ON_DEMAND")
+ append_labels = length(lookup(each.value, "k8s_labels", {})) > 0 ? ",${join(",", [for k, v in lookup(each.value, "k8s_labels", {}) : "${k}=${v}"])}" : ""
+ }
+ )
+ }
+}
+
+# This is based on the LT that EKS would create if no custom one is specified (aws ec2 describe-launch-template-versions --launch-template-id xxx)
+# there are several more options one could set but you probably dont need to modify them
+# you can take the default and add your custom AMI and/or custom tags
+#
+# Trivia: AWS transparently creates a copy of your LaunchTemplate and actually uses that copy then for the node group. If you DONT use a custom AMI,
+# then the default user-data for bootstrapping a cluster is merged in the copy.
+resource "aws_launch_template" "workers" {
+ for_each = { for k, v in local.node_groups_expanded : k => v if v["create_launch_template"] }
+
+ name_prefix = local.node_groups_names[each.key]
+ description = format("EKS Managed Node Group custom LT for %s", local.node_groups_names[each.key])
+ update_default_version = lookup(each.value, "update_default_version", true)
+
+ block_device_mappings {
+ device_name = "/dev/xvda"
+
+ ebs {
+ volume_size = lookup(each.value, "disk_size", null)
+ volume_type = lookup(each.value, "disk_type", null)
+ iops = lookup(each.value, "disk_iops", null)
+ throughput = lookup(each.value, "disk_throughput", null)
+ encrypted = lookup(each.value, "disk_encrypted", null)
+ kms_key_id = lookup(each.value, "disk_kms_key_id", null)
+ delete_on_termination = true
+ }
+ }
+
+ ebs_optimized = lookup(each.value, "ebs_optimized", !contains(var.ebs_optimized_not_supported, element(each.value.instance_types, 0)))
+
+ instance_type = each.value["set_instance_types_on_lt"] ? element(each.value.instance_types, 0) : null
+
+ monitoring {
+ enabled = lookup(each.value, "enable_monitoring", null)
+ }
+
+ network_interfaces {
+ associate_public_ip_address = lookup(each.value, "public_ip", null)
+ delete_on_termination = lookup(each.value, "eni_delete", null)
+ security_groups = compact(flatten([
+ var.worker_security_group_id,
+ var.worker_additional_security_group_ids,
+ lookup(
+ each.value,
+ "additional_security_group_ids",
+ null,
+ ),
+ ]))
+ }
+
+ # if you want to use a custom AMI
+ image_id = lookup(each.value, "ami_id", null)
+
+ # If you use a custom AMI, you need to supply via user-data, the bootstrap script as EKS DOESNT merge its managed user-data then
+ # you can add more than the minimum code you see in the template, e.g. install SSM agent, see https://github.com/aws/containers-roadmap/issues/593#issuecomment-577181345
+ #
+ # (optionally you can use https://registry.terraform.io/providers/hashicorp/cloudinit/latest/docs/data-sources/cloudinit_config to render the script, example: https://github.com/terraform-aws-modules/terraform-aws-eks/pull/997#issuecomment-705286151)
+
+ user_data = data.cloudinit_config.workers_userdata[each.key].rendered
+
+ key_name = lookup(each.value, "key_name", null)
+
+ metadata_options {
+ http_endpoint = lookup(each.value, "metadata_http_endpoint", null)
+ http_tokens = lookup(each.value, "metadata_http_tokens", null)
+ http_put_response_hop_limit = lookup(each.value, "metadata_http_put_response_hop_limit", null)
+ }
+
+ # Supplying custom tags to EKS instances is another use-case for LaunchTemplates
+ tag_specifications {
+ resource_type = "instance"
+
+ tags = merge(
+ var.tags,
+ {
+ Name = local.node_groups_names[each.key]
+ },
+ lookup(var.node_groups_defaults, "additional_tags", {}),
+ lookup(var.node_groups[each.key], "additional_tags", {})
+ )
+ }
+
+ # Supplying custom tags to EKS instances root volumes is another use-case for LaunchTemplates. (doesnt add tags to dynamically provisioned volumes via PVC tho)
+ tag_specifications {
+ resource_type = "volume"
+
+ tags = merge(
+ var.tags,
+ {
+ Name = local.node_groups_names[each.key]
+ },
+ lookup(var.node_groups_defaults, "additional_tags", {}),
+ lookup(var.node_groups[each.key], "additional_tags", {})
+ )
+ }
+
+ # Supplying custom tags to EKS instances ENI's is another use-case for LaunchTemplates
+ tag_specifications {
+ resource_type = "network-interface"
+
+ tags = merge(
+ var.tags,
+ {
+ Name = local.node_groups_names[each.key]
+ },
+ lookup(var.node_groups_defaults, "additional_tags", {}),
+ lookup(var.node_groups[each.key], "additional_tags", {})
+ )
+ }
+
+ # Tag the LT itself
+ tags = merge(
+ var.tags,
+ lookup(var.node_groups_defaults, "additional_tags", {}),
+ lookup(var.node_groups[each.key], "additional_tags", {}),
+ )
+
+ lifecycle {
+ create_before_destroy = true
+ }
+}
diff --git a/modules/eks-managed-node-group/locals.tf b/modules/eks-managed-node-group/locals.tf
new file mode 100644
index 0000000000..0a6c7cbffb
--- /dev/null
+++ b/modules/eks-managed-node-group/locals.tf
@@ -0,0 +1,51 @@
+locals {
+ # Merge defaults and per-group values to make code cleaner
+ node_groups_expanded = { for k, v in var.node_groups : k => merge(
+ {
+ desired_capacity = var.workers_group_defaults["asg_desired_capacity"]
+ iam_role_arn = var.default_iam_role_arn
+ instance_types = [var.workers_group_defaults["instance_type"]]
+ key_name = var.workers_group_defaults["key_name"]
+ launch_template_id = var.workers_group_defaults["launch_template_id"]
+ launch_template_version = var.workers_group_defaults["launch_template_version"]
+ set_instance_types_on_lt = false
+ max_capacity = var.workers_group_defaults["asg_max_size"]
+ min_capacity = var.workers_group_defaults["asg_min_size"]
+ subnets = var.workers_group_defaults["subnets"]
+ create_launch_template = false
+ bootstrap_env = {}
+ kubelet_extra_args = var.workers_group_defaults["kubelet_extra_args"]
+ disk_size = var.workers_group_defaults["root_volume_size"]
+ disk_type = var.workers_group_defaults["root_volume_type"]
+ disk_iops = var.workers_group_defaults["root_iops"]
+ disk_throughput = var.workers_group_defaults["root_volume_throughput"]
+ disk_encrypted = var.workers_group_defaults["root_encrypted"]
+ disk_kms_key_id = var.workers_group_defaults["root_kms_key_id"]
+ enable_monitoring = var.workers_group_defaults["enable_monitoring"]
+ eni_delete = var.workers_group_defaults["eni_delete"]
+ public_ip = var.workers_group_defaults["public_ip"]
+ pre_userdata = var.workers_group_defaults["pre_userdata"]
+ additional_security_group_ids = var.workers_group_defaults["additional_security_group_ids"]
+ taints = []
+ timeouts = var.workers_group_defaults["timeouts"]
+ update_default_version = true
+ ebs_optimized = null
+ metadata_http_endpoint = var.workers_group_defaults["metadata_http_endpoint"]
+ metadata_http_tokens = var.workers_group_defaults["metadata_http_tokens"]
+ metadata_http_put_response_hop_limit = var.workers_group_defaults["metadata_http_put_response_hop_limit"]
+ ami_is_eks_optimized = true
+ },
+ var.node_groups_defaults,
+ v,
+ ) if var.create_eks }
+
+ node_groups_names = { for k, v in local.node_groups_expanded : k => lookup(
+ v,
+ "name",
+ lookup(
+ v,
+ "name_prefix",
+ join("-", [var.cluster_name, k])
+ )
+ ) }
+}
diff --git a/modules/eks-managed-node-group/main.tf b/modules/eks-managed-node-group/main.tf
new file mode 100644
index 0000000000..75e6209730
--- /dev/null
+++ b/modules/eks-managed-node-group/main.tf
@@ -0,0 +1,105 @@
+resource "aws_eks_node_group" "workers" {
+ for_each = local.node_groups_expanded
+
+ node_group_name_prefix = lookup(each.value, "name", null) == null ? local.node_groups_names[each.key] : null
+ node_group_name = lookup(each.value, "name", null)
+
+ cluster_name = var.cluster_name
+ node_role_arn = each.value["iam_role_arn"]
+ subnet_ids = each.value["subnets"]
+
+ scaling_config {
+ desired_size = each.value["desired_capacity"]
+ max_size = each.value["max_capacity"]
+ min_size = each.value["min_capacity"]
+ }
+
+ ami_type = lookup(each.value, "ami_type", null)
+ disk_size = each.value["launch_template_id"] != null || each.value["create_launch_template"] ? null : lookup(each.value, "disk_size", null)
+ instance_types = !each.value["set_instance_types_on_lt"] ? each.value["instance_types"] : null
+ release_version = lookup(each.value, "ami_release_version", null)
+ capacity_type = lookup(each.value, "capacity_type", null)
+ force_update_version = lookup(each.value, "force_update_version", null)
+
+ dynamic "remote_access" {
+ for_each = each.value["key_name"] != "" && each.value["launch_template_id"] == null && !each.value["create_launch_template"] ? [{
+ ec2_ssh_key = each.value["key_name"]
+ source_security_group_ids = lookup(each.value, "source_security_group_ids", [])
+ }] : []
+
+ content {
+ ec2_ssh_key = remote_access.value["ec2_ssh_key"]
+ source_security_group_ids = remote_access.value["source_security_group_ids"]
+ }
+ }
+
+ dynamic "launch_template" {
+ for_each = each.value["launch_template_id"] != null ? [{
+ id = each.value["launch_template_id"]
+ version = each.value["launch_template_version"]
+ }] : []
+
+ content {
+ id = launch_template.value["id"]
+ version = launch_template.value["version"]
+ }
+ }
+
+ dynamic "launch_template" {
+ for_each = each.value["launch_template_id"] == null && each.value["create_launch_template"] ? [{
+ id = aws_launch_template.workers[each.key].id
+ version = each.value["launch_template_version"] == "$Latest" ? aws_launch_template.workers[each.key].latest_version : (
+ each.value["launch_template_version"] == "$Default" ? aws_launch_template.workers[each.key].default_version : each.value["launch_template_version"]
+ )
+ }] : []
+
+ content {
+ id = launch_template.value["id"]
+ version = launch_template.value["version"]
+ }
+ }
+
+ dynamic "taint" {
+ for_each = each.value["taints"]
+
+ content {
+ key = taint.value["key"]
+ value = taint.value["value"]
+ effect = taint.value["effect"]
+ }
+ }
+
+ dynamic "update_config" {
+ for_each = try(each.value.update_config.max_unavailable_percentage > 0, each.value.update_config.max_unavailable > 0, false) ? [true] : []
+
+ content {
+ max_unavailable_percentage = try(each.value.update_config.max_unavailable_percentage, null)
+ max_unavailable = try(each.value.update_config.max_unavailable, null)
+ }
+ }
+
+ timeouts {
+ create = lookup(each.value["timeouts"], "create", null)
+ update = lookup(each.value["timeouts"], "update", null)
+ delete = lookup(each.value["timeouts"], "delete", null)
+ }
+
+ version = lookup(each.value, "version", null)
+
+ labels = merge(
+ lookup(var.node_groups_defaults, "k8s_labels", {}),
+ lookup(var.node_groups[each.key], "k8s_labels", {})
+ )
+
+ tags = merge(
+ var.tags,
+ lookup(var.node_groups_defaults, "additional_tags", {}),
+ lookup(var.node_groups[each.key], "additional_tags", {}),
+ )
+
+ lifecycle {
+ create_before_destroy = true
+ ignore_changes = [scaling_config[0].desired_size]
+ }
+
+}
diff --git a/modules/eks-managed-node-group/outputs.tf b/modules/eks-managed-node-group/outputs.tf
new file mode 100644
index 0000000000..ad148ea514
--- /dev/null
+++ b/modules/eks-managed-node-group/outputs.tf
@@ -0,0 +1,14 @@
+output "node_groups" {
+ description = "Outputs from EKS node groups. Map of maps, keyed by `var.node_groups` keys. See `aws_eks_node_group` Terraform documentation for values"
+ value = aws_eks_node_group.workers
+}
+
+output "aws_auth_roles" {
+ description = "Roles for use in aws-auth ConfigMap"
+ value = [
+ for k, v in local.node_groups_expanded : {
+ worker_role_arn = lookup(v, "iam_role_arn", var.default_iam_role_arn)
+ platform = "linux"
+ }
+ ]
+}
diff --git a/modules/eks-managed-node-group/templates/userdata.sh.tpl b/modules/eks-managed-node-group/templates/userdata.sh.tpl
new file mode 100644
index 0000000000..321c17b427
--- /dev/null
+++ b/modules/eks-managed-node-group/templates/userdata.sh.tpl
@@ -0,0 +1,34 @@
+#!/bin/bash -e
+%{ if length(ami_id) == 0 ~}
+
+# Set bootstrap env
+printf '#!/bin/bash
+%{ for k, v in bootstrap_env ~}
+export ${k}="${v}"
+%{ endfor ~}
+export ADDITIONAL_KUBELET_EXTRA_ARGS="${kubelet_extra_args}"
+' > /etc/profile.d/eks-bootstrap-env.sh
+
+# Source extra environment variables in bootstrap script
+sed -i '/^set -o errexit/a\\nsource /etc/profile.d/eks-bootstrap-env.sh' /etc/eks/bootstrap.sh
+
+# Merge ADDITIONAL_KUBELET_EXTRA_ARGS into KUBELET_EXTRA_ARGS
+sed -i 's/^KUBELET_EXTRA_ARGS="$${KUBELET_EXTRA_ARGS:-}/KUBELET_EXTRA_ARGS="$${KUBELET_EXTRA_ARGS:-} $${ADDITIONAL_KUBELET_EXTRA_ARGS}/' /etc/eks/bootstrap.sh
+%{else ~}
+
+# Set variables for custom AMI
+API_SERVER_URL=${cluster_endpoint}
+B64_CLUSTER_CA=${cluster_auth_base64}
+%{ for k, v in bootstrap_env ~}
+${k}="${v}"
+%{ endfor ~}
+KUBELET_EXTRA_ARGS='--node-labels=eks.amazonaws.com/nodegroup-image=${ami_id},eks.amazonaws.com/capacityType=${capacity_type}${append_labels} ${kubelet_extra_args}'
+%{endif ~}
+
+# User supplied pre userdata
+${pre_userdata}
+%{ if length(ami_id) > 0 && ami_is_eks_optimized ~}
+
+# Call bootstrap for EKS optimised custom AMI
+/etc/eks/bootstrap.sh ${cluster_name} --apiserver-endpoint "$${API_SERVER_URL}" --b64-cluster-ca "$${B64_CLUSTER_CA}" --kubelet-extra-args "$${KUBELET_EXTRA_ARGS}"
+%{ endif ~}
diff --git a/modules/eks-managed-node-group/variables.tf b/modules/eks-managed-node-group/variables.tf
new file mode 100644
index 0000000000..1aa8cfe26d
--- /dev/null
+++ b/modules/eks-managed-node-group/variables.tf
@@ -0,0 +1,71 @@
+variable "create_eks" {
+ description = "Controls if EKS resources should be created (it affects almost all resources)"
+ type = bool
+ default = true
+}
+
+variable "cluster_name" {
+ description = "Name of parent cluster"
+ type = string
+ default = ""
+}
+
+variable "cluster_endpoint" {
+ description = "Endpoint of parent cluster"
+ type = string
+ default = ""
+}
+
+variable "cluster_auth_base64" {
+ description = "Base64 encoded CA of parent cluster"
+ type = string
+ default = ""
+}
+
+variable "default_iam_role_arn" {
+ description = "ARN of the default IAM worker role to use if one is not specified in `var.node_groups` or `var.node_groups_defaults`"
+ type = string
+ default = ""
+}
+
+variable "workers_group_defaults" {
+ description = "Workers group defaults from parent"
+ type = any
+ default = {}
+}
+
+variable "worker_security_group_id" {
+ description = "If provided, all workers will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the EKS cluster."
+ type = string
+ default = ""
+}
+
+variable "worker_additional_security_group_ids" {
+ description = "A list of additional security group ids to attach to worker instances"
+ type = list(string)
+ default = []
+}
+
+variable "tags" {
+ description = "A map of tags to add to all resources"
+ type = map(string)
+ default = {}
+}
+
+variable "node_groups_defaults" {
+ description = "map of maps of node groups to create. See \"`node_groups` and `node_groups_defaults` keys\" section in README.md for more details"
+ type = any
+ default = {}
+}
+
+variable "node_groups" {
+ description = "Map of maps of `eks_node_groups` to create. See \"`node_groups` and `node_groups_defaults` keys\" section in README.md for more details"
+ type = any
+ default = {}
+}
+
+variable "ebs_optimized_not_supported" {
+ description = "List of instance types that do not support EBS optimization"
+ type = list(string)
+ default = []
+}
diff --git a/modules/eks-managed-node-group/versions.tf b/modules/eks-managed-node-group/versions.tf
new file mode 100644
index 0000000000..5324b482ab
--- /dev/null
+++ b/modules/eks-managed-node-group/versions.tf
@@ -0,0 +1,8 @@
+terraform {
+ required_version = ">= 0.13.1"
+
+ required_providers {
+ aws = ">= 3.56.0"
+ cloudinit = ">= 2.0"
+ }
+}
diff --git a/modules/self-managed-node-group/README.md b/modules/self-managed-node-group/README.md
new file mode 100644
index 0000000000..6e99610bba
--- /dev/null
+++ b/modules/self-managed-node-group/README.md
@@ -0,0 +1,54 @@
+# Self Managed Node Group Module
+
+
+## Requirements
+
+| Name | Version |
+|------|---------|
+| [terraform](#requirement\_terraform) | >= 0.13.1 |
+| [aws](#requirement\_aws) | >= 3.56.0 |
+| [cloudinit](#requirement\_cloudinit) | >= 2.0 |
+
+## Providers
+
+| Name | Version |
+|------|---------|
+| [aws](#provider\_aws) | >= 3.56.0 |
+| [cloudinit](#provider\_cloudinit) | >= 2.0 |
+
+## Modules
+
+No modules.
+
+## Resources
+
+| Name | Type |
+|------|------|
+| [aws_eks_node_group.workers](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_node_group) | resource |
+| [aws_launch_template.workers](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource |
+| [cloudinit_config.workers_userdata](https://registry.terraform.io/providers/hashicorp/cloudinit/latest/docs/data-sources/config) | data source |
+
+## Inputs
+
+| Name | Description | Type | Default | Required |
+|------|-------------|------|---------|:--------:|
+| [cluster\_auth\_base64](#input\_cluster\_auth\_base64) | Base64 encoded CA of parent cluster | `string` | `""` | no |
+| [cluster\_endpoint](#input\_cluster\_endpoint) | Endpoint of parent cluster | `string` | `""` | no |
+| [cluster\_name](#input\_cluster\_name) | Name of parent cluster | `string` | `""` | no |
+| [create\_eks](#input\_create\_eks) | Controls if EKS resources should be created (it affects almost all resources) | `bool` | `true` | no |
+| [default\_iam\_role\_arn](#input\_default\_iam\_role\_arn) | ARN of the default IAM worker role to use if one is not specified in `var.node_groups` or `var.node_groups_defaults` | `string` | `""` | no |
+| [ebs\_optimized\_not\_supported](#input\_ebs\_optimized\_not\_supported) | List of instance types that do not support EBS optimization | `list(string)` | `[]` | no |
+| [node\_groups](#input\_node\_groups) | Map of maps of `eks_node_groups` to create. See "`node_groups` and `node_groups_defaults` keys" section in README.md for more details | `any` | `{}` | no |
+| [node\_groups\_defaults](#input\_node\_groups\_defaults) | map of maps of node groups to create. See "`node_groups` and `node_groups_defaults` keys" section in README.md for more details | `any` | `{}` | no |
+| [tags](#input\_tags) | A map of tags to add to all resources | `map(string)` | `{}` | no |
+| [worker\_additional\_security\_group\_ids](#input\_worker\_additional\_security\_group\_ids) | A list of additional security group ids to attach to worker instances | `list(string)` | `[]` | no |
+| [worker\_security\_group\_id](#input\_worker\_security\_group\_id) | If provided, all workers will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the EKS cluster. | `string` | `""` | no |
+| [workers\_group\_defaults](#input\_workers\_group\_defaults) | Workers group defaults from parent | `any` | `{}` | no |
+
+## Outputs
+
+| Name | Description |
+|------|-------------|
+| [aws\_auth\_roles](#output\_aws\_auth\_roles) | Roles for use in aws-auth ConfigMap |
+| [node\_groups](#output\_node\_groups) | Outputs from EKS node groups. Map of maps, keyed by `var.node_groups` keys. See `aws_eks_node_group` Terraform documentation for values |
+
diff --git a/modules/self-managed-node-group/main.tf b/modules/self-managed-node-group/main.tf
new file mode 100644
index 0000000000..636ead8735
--- /dev/null
+++ b/modules/self-managed-node-group/main.tf
@@ -0,0 +1,380 @@
+locals {
+ launch_template_name = var.create_launch_template ? aws_launch_template.this[0].name : var.launch_template_name
+ launch_template_version = var.create_launch_template && var.launch_template_version == null ? aws_launch_template.this[0].latest_version : var.launch_template_version
+
+ tags = concat(
+ [
+ {
+ key = "Name"
+ value = var.name
+ propagate_at_launch = var.propagate_name
+ },
+ ],
+ var.tags,
+ null_resource.tags_as_list_of_maps.*.triggers,
+ )
+}
+
+resource "null_resource" "tags_as_list_of_maps" {
+ count = length(keys(var.tags_as_map))
+
+ triggers = {
+ key = keys(var.tags_as_map)[count.index]
+ value = values(var.tags_as_map)[count.index]
+ propagate_at_launch = true
+ }
+}
+
+################################################################################
+# Launch template
+################################################################################
+
+resource "aws_launch_template" "this" {
+ count = var.create_launch_template ? 1 : 0
+
+ name = var.launch_template_use_name_prefix ? null : local.launch_template_name
+ name_prefix = var.launch_template_use_name_prefix ? "${local.launch_template_name}-" : null
+ description = var.description
+
+ ebs_optimized = var.ebs_optimized
+ image_id = var.image_id
+ instance_type = var.instance_type
+ key_name = var.key_name
+ user_data = var.user_data_base64
+
+ vpc_security_group_ids = var.security_group_ids
+
+ defaulaunch_template_version = var.defaulaunch_template_version
+ update_defaulaunch_template_version = var.update_defaulaunch_template_version
+ disable_api_termination = var.disable_api_termination
+ instance_initiated_shutdown_behavior = var.instance_initiated_shutdown_behavior
+ kernel_id = var.kernel_id
+ ram_disk_id = var.ram_disk_id
+
+ dynamic "block_device_mappings" {
+ for_each = var.block_device_mappings
+ content {
+ device_name = block_device_mappings.value.device_name
+ no_device = lookup(block_device_mappings.value, "no_device", null)
+ virtual_name = lookup(block_device_mappings.value, "virtual_name", null)
+
+ dynamic "ebs" {
+ for_each = flatten([lookup(block_device_mappings.value, "ebs", [])])
+ content {
+ delete_on_termination = lookup(ebs.value, "delete_on_termination", null)
+ encrypted = lookup(ebs.value, "encrypted", null)
+ kms_key_id = lookup(ebs.value, "kms_key_id", null)
+ iops = lookup(ebs.value, "iops", null)
+ throughput = lookup(ebs.value, "throughput", null)
+ snapshot_id = lookup(ebs.value, "snapshot_id", null)
+ volume_size = lookup(ebs.value, "volume_size", null)
+ volume_type = lookup(ebs.value, "volume_type", null)
+ }
+ }
+ }
+ }
+
+ dynamic "capacity_reservation_specification" {
+ for_each = var.capacity_reservation_specification != null ? [var.capacity_reservation_specification] : []
+ content {
+ capacity_reservation_preference = lookup(capacity_reservation_specification.value, "capacity_reservation_preference", null)
+
+ dynamic "capacity_reservation_target" {
+ for_each = lookup(capacity_reservation_specification.value, "capacity_reservation_target", [])
+ content {
+ capacity_reservation_id = lookup(capacity_reservation_target.value, "capacity_reservation_id", null)
+ }
+ }
+ }
+ }
+
+ dynamic "cpu_options" {
+ for_each = var.cpu_options != null ? [var.cpu_options] : []
+ content {
+ core_count = cpu_options.value.core_count
+ threads_per_core = cpu_options.value.threads_per_core
+ }
+ }
+
+ dynamic "credit_specification" {
+ for_each = var.credit_specification != null ? [var.credit_specification] : []
+ content {
+ cpu_credits = credit_specification.value.cpu_credits
+ }
+ }
+
+ dynamic "elastic_gpu_specifications" {
+ for_each = var.elastic_gpu_specifications != null ? [var.elastic_gpu_specifications] : []
+ content {
+ type = elastic_gpu_specifications.value.type
+ }
+ }
+
+ dynamic "elastic_inference_accelerator" {
+ for_each = var.elastic_inference_accelerator != null ? [var.elastic_inference_accelerator] : []
+ content {
+ type = elastic_inference_accelerator.value.type
+ }
+ }
+
+ dynamic "enclave_options" {
+ for_each = var.enclave_options != null ? [var.enclave_options] : []
+ content {
+ enabled = enclave_options.value.enabled
+ }
+ }
+
+ dynamic "hibernation_options" {
+ for_each = var.hibernation_options != null ? [var.hibernation_options] : []
+ content {
+ configured = hibernation_options.value.configured
+ }
+ }
+
+ dynamic "iam_instance_profile" {
+ for_each = var.iam_instance_profile_name != null || var.iam_instance_profile_arn != null ? [1] : []
+ content {
+ name = var.iam_instance_profile_name
+ arn = var.iam_instance_profile_arn
+ }
+ }
+
+ dynamic "instance_market_options" {
+ for_each = var.instance_market_options != null ? [var.instance_market_options] : []
+ content {
+ market_type = instance_market_options.value.market_type
+
+ dynamic "spot_options" {
+ for_each = lookup(instance_market_options.value, "spot_options", null) != null ? [instance_market_options.value.spot_options] : []
+ content {
+ block_duration_minutes = spot_options.value.block_duration_minutes
+ instance_interruption_behavior = lookup(spot_options.value, "instance_interruption_behavior", null)
+ max_price = lookup(spot_options.value, "max_price", null)
+ spot_instance_type = lookup(spot_options.value, "spot_instance_type", null)
+ valid_until = lookup(spot_options.value, "valid_until", null)
+ }
+ }
+ }
+ }
+
+ dynamic "license_specification" {
+ for_each = var.license_specifications != null ? [var.license_specifications] : []
+ content {
+ license_configuration_arn = license_specifications.value.license_configuration_arn
+ }
+ }
+
+ dynamic "metadata_options" {
+ for_each = var.metadata_options != null ? [var.metadata_options] : []
+ content {
+ http_endpoint = lookup(metadata_options.value, "http_endpoint", null)
+ http_tokens = lookup(metadata_options.value, "http_tokens", null)
+ http_put_response_hop_limit = lookup(metadata_options.value, "http_put_response_hop_limit", null)
+ }
+ }
+
+ dynamic "monitoring" {
+ for_each = var.enable_monitoring != null ? [1] : []
+ content {
+ enabled = var.enable_monitoring
+ }
+ }
+
+ dynamic "network_interfaces" {
+ for_each = var.network_interfaces
+ content {
+ associate_carrier_ip_address = lookup(network_interfaces.value, "associate_carrier_ip_address", null)
+ associate_public_ip_address = lookup(network_interfaces.value, "associate_public_ip_address", null)
+ delete_on_termination = lookup(network_interfaces.value, "delete_on_termination", null)
+ description = lookup(network_interfaces.value, "description", null)
+ device_index = lookup(network_interfaces.value, "device_index", null)
+ ipv4_addresses = lookup(network_interfaces.value, "ipv4_addresses", null) != null ? network_interfaces.value.ipv4_addresses : []
+ ipv4_address_count = lookup(network_interfaces.value, "ipv4_address_count", null)
+ ipv6_addresses = lookup(network_interfaces.value, "ipv6_addresses", null) != null ? network_interfaces.value.ipv6_addresses : []
+ ipv6_address_count = lookup(network_interfaces.value, "ipv6_address_count", null)
+ network_interface_id = lookup(network_interfaces.value, "network_interface_id", null)
+ private_ip_address = lookup(network_interfaces.value, "private_ip_address", null)
+ security_groups = lookup(network_interfaces.value, "security_groups", null) != null ? network_interfaces.value.security_groups : []
+ subnet_id = lookup(network_interfaces.value, "subnet_id", null)
+ }
+ }
+
+ dynamic "placement" {
+ for_each = var.placement != null ? [var.placement] : []
+ content {
+ affinity = lookup(placement.value, "affinity", null)
+ availability_zone = lookup(placement.value, "availability_zone", null)
+ group_name = lookup(placement.value, "group_name", null)
+ host_id = lookup(placement.value, "host_id", null)
+ spread_domain = lookup(placement.value, "spread_domain", null)
+ tenancy = lookup(placement.value, "tenancy", null)
+ partition_number = lookup(placement.value, "partition_number", null)
+ }
+ }
+
+ dynamic "tag_specifications" {
+ for_each = var.tag_specifications
+ content {
+ resource_type = tag_specifications.value.resource_type
+ tags = tag_specifications.value.tags
+ }
+ }
+
+ lifecycle {
+ create_before_destroy = true
+ }
+
+ tags = var.tags_as_map
+}
+
+################################################################################
+# Autoscaling group
+################################################################################
+
+resource "aws_autoscaling_group" "this" {
+ count = var.create_asg ? 1 : 0
+
+ name = var.use_name_prefix ? null : var.name
+ name_prefix = var.use_name_prefix ? "${var.name}-" : null
+
+ launch_template {
+ name = local.launch_template
+ version = local.launch_template_version
+ }
+
+ availability_zones = var.availability_zones
+ vpc_zone_identifier = var.subnet_ids
+
+ min_size = var.min_size
+ max_size = var.max_size
+ desired_capacity = var.desired_capacity
+ capacity_rebalance = var.capacity_rebalance
+ min_elb_capacity = var.min_elb_capacity
+ wait_for_elb_capacity = var.wait_for_elb_capacity
+ wait_for_capacity_timeout = var.wait_for_capacity_timeout
+ defaulaunch_template_cooldown = var.defaulaunch_template_cooldown
+ protect_from_scale_in = var.protect_from_scale_in
+
+ load_balancers = var.load_balancers
+ target_group_arns = var.target_group_arns
+ placement_group = var.placement_group
+ health_check_type = var.health_check_type
+ health_check_grace_period = var.health_check_grace_period
+
+ force_delete = var.force_delete
+ termination_policies = var.termination_policies
+ suspended_processes = var.suspended_processes
+ max_instance_lifetime = var.max_instance_lifetime
+
+ enabled_metrics = var.enabled_metrics
+ metrics_granularity = var.metrics_granularity
+ service_linked_role_arn = var.service_linked_role_arn
+
+ dynamic "initial_lifecycle_hook" {
+ for_each = var.initial_lifecycle_hooks
+ content {
+ name = initial_lifecycle_hook.value.name
+ defaulaunch_template_result = lookup(initial_lifecycle_hook.value, "defaulaunch_template_result", null)
+ heartbeat_timeout = lookup(initial_lifecycle_hook.value, "heartbeat_timeout", null)
+ lifecycle_transition = initial_lifecycle_hook.value.lifecycle_transition
+ notification_metadata = lookup(initial_lifecycle_hook.value, "notification_metadata", null)
+ notification_target_arn = lookup(initial_lifecycle_hook.value, "notification_target_arn", null)
+ role_arn = lookup(initial_lifecycle_hook.value, "role_arn", null)
+ }
+ }
+
+ dynamic "instance_refresh" {
+ for_each = var.instance_refresh != null ? [var.instance_refresh] : []
+ content {
+ strategy = instance_refresh.value.strategy
+ triggers = lookup(instance_refresh.value, "triggers", null)
+
+ dynamic "preferences" {
+ for_each = lookup(instance_refresh.value, "preferences", null) != null ? [instance_refresh.value.preferences] : []
+ content {
+ instance_warmup = lookup(preferences.value, "instance_warmup", null)
+ min_healthy_percentage = lookup(preferences.value, "min_healthy_percentage", null)
+ }
+ }
+ }
+ }
+
+ dynamic "mixed_instances_policy" {
+ for_each = var.use_mixed_instances_policy ? [var.mixed_instances_policy] : []
+ content {
+ dynamic "instances_distribution" {
+ for_each = lookup(mixed_instances_policy.value, "instances_distribution", null) != null ? [mixed_instances_policy.value.instances_distribution] : []
+ content {
+ on_demand_allocation_strategy = lookup(instances_distribution.value, "on_demand_allocation_strategy", null)
+ on_demand_base_capacity = lookup(instances_distribution.value, "on_demand_base_capacity", null)
+ on_demand_percentage_above_base_capacity = lookup(instances_distribution.value, "on_demand_percentage_above_base_capacity", null)
+ spot_allocation_strategy = lookup(instances_distribution.value, "spot_allocation_strategy", null)
+ spot_instance_pools = lookup(instances_distribution.value, "spot_instance_pools", null)
+ spot_max_price = lookup(instances_distribution.value, "spot_max_price", null)
+ }
+ }
+
+ launch_template {
+ launch_template_specification {
+ launch_template_name = local.launch_template
+ version = local.launch_template_version
+ }
+
+ dynamic "override" {
+ for_each = lookup(mixed_instances_policy.value, "override", null) != null ? mixed_instances_policy.value.override : []
+ content {
+ instance_type = lookup(override.value, "instance_type", null)
+ weighted_capacity = lookup(override.value, "weighted_capacity", null)
+
+ dynamic "launch_template_specification" {
+ for_each = lookup(override.value, "launch_template_specification", null) != null ? override.value.launch_template_specification : []
+ content {
+ launch_template_id = lookup(launch_template_specification.value, "launch_template_id", null)
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ dynamic "warm_pool" {
+ for_each = var.warm_pool != null ? [var.warm_pool] : []
+ content {
+ pool_state = lookup(warm_pool.value, "pool_state", null)
+ min_size = lookup(warm_pool.value, "min_size", null)
+ max_group_prepared_capacity = lookup(warm_pool.value, "max_group_prepared_capacity", null)
+ }
+ }
+
+ timeouts {
+ delete = var.delete_timeout
+ }
+
+ tags = local.tags
+
+ lifecycle {
+ create_before_destroy = true
+ }
+}
+
+################################################################################
+# Autoscaling group schedule
+################################################################################
+resource "aws_autoscaling_schedule" "this" {
+ for_each = var.create_asg && var.create_schedule ? var.schedules : {}
+
+ scheduled_action_name = each.key
+ autoscaling_group_name = aws_autoscaling_group.this[0].name
+
+ min_size = lookup(each.value, "min_size", null)
+ max_size = lookup(each.value, "max_size", null)
+ desired_capacity = lookup(each.value, "desired_capacity", null)
+ start_time = lookup(each.value, "start_time", null)
+ end_time = lookup(each.value, "end_time", null)
+ time_zone = lookup(each.value, "time_zone", null)
+
+ # [Minute] [Hour] [Day_of_Month] [Month_of_Year] [Day_of_Week]
+ # Cron examples: https://crontab.guru/examples.html
+ recurrence = lookup(each.value, "recurrence", null)
+}
diff --git a/modules/self-managed-node-group/outputs.tf b/modules/self-managed-node-group/outputs.tf
new file mode 100644
index 0000000000..876a114034
--- /dev/null
+++ b/modules/self-managed-node-group/outputs.tf
@@ -0,0 +1,96 @@
+################################################################################
+# Launch template
+################################################################################
+
+output "launch_template_id" {
+ description = "The ID of the launch template"
+ value = try(aws_launch_template.this[0].id, "")
+}
+
+output "launch_template_arn" {
+ description = "The ARN of the launch template"
+ value = try(aws_launch_template.this[0].arn, "")
+}
+
+output "launch_template_latest_version" {
+ description = "The latest version of the launch template"
+ value = try(aws_launch_template.this[0].latest_version, "")
+}
+
+################################################################################
+# Autoscaling group
+################################################################################
+
+output "autoscaling_group_id" {
+ description = "The autoscaling group id"
+ value = try(aws_autoscaling_group.this[0].id, "")
+}
+
+output "autoscaling_group_name" {
+ description = "The autoscaling group name"
+ value = try(aws_autoscaling_group.this[0].name, "")
+}
+
+output "autoscaling_group_arn" {
+ description = "The ARN for this AutoScaling Group"
+ value = try(aws_autoscaling_group.this[0].arn, "")
+}
+
+output "autoscaling_group_min_size" {
+ description = "The minimum size of the autoscale group"
+ value = try(aws_autoscaling_group.this[0].min_size, "")
+}
+
+output "autoscaling_group_max_size" {
+ description = "The maximum size of the autoscale group"
+ value = try(aws_autoscaling_group.this[0].max_size, "")
+}
+
+output "autoscaling_group_desired_capacity" {
+ description = "The number of Amazon EC2 instances that should be running in the group"
+ value = try(aws_autoscaling_group.this[0].desired_capacity, "")
+}
+
+output "autoscaling_group_default_cooldown" {
+ description = "Time between a scaling activity and the succeeding scaling activity"
+ value = try(aws_autoscaling_group.this[0].default_cooldown, "")
+}
+
+output "autoscaling_group_health_check_grace_period" {
+ description = "Time after instance comes into service before checking health"
+ value = try(aws_autoscaling_group.this[0].health_check_grace_period, "")
+}
+
+output "autoscaling_group_health_check_type" {
+ description = "EC2 or ELB. Controls how health checking is done"
+ value = try(aws_autoscaling_group.this[0].health_check_type, "")
+}
+
+output "autoscaling_group_availability_zones" {
+ description = "The availability zones of the autoscale group"
+ value = try(aws_autoscaling_group.this[0].availability_zones, "")
+}
+
+output "autoscaling_group_vpc_zone_identifier" {
+ description = "The VPC zone identifier"
+ value = try(aws_autoscaling_group.this[0].vpc_zone_identifier, "")
+}
+
+output "autoscaling_group_load_balancers" {
+ description = "The load balancer names associated with the autoscaling group"
+ value = try(aws_autoscaling_group.this[0].load_balancers, "")
+}
+
+output "autoscaling_group_target_group_arns" {
+ description = "List of Target Group ARNs that apply to this AutoScaling Group"
+ value = try(aws_autoscaling_group.this[0].target_group_arns, "")
+}
+
+################################################################################
+# Autoscaling group schedule
+################################################################################
+
+output "autoscaling_schedule_arns" {
+ description = "ARNs of autoscaling group schedules"
+ value = { for k, v in aws_autoscaling_schedule.this : k => v.arn }
+}
diff --git a/modules/self-managed-node-group/variables.tf b/modules/self-managed-node-group/variables.tf
new file mode 100644
index 0000000000..5cf4db92f5
--- /dev/null
+++ b/modules/self-managed-node-group/variables.tf
@@ -0,0 +1,449 @@
+################################################################################
+# Autoscaling group
+################################################################################
+
+variable "create" {
+ description = "Determines whether to create autoscaling group or not"
+ type = bool
+ default = true
+}
+
+variable "name" {
+ description = "Name used across the resources created"
+ type = string
+}
+
+variable "use_name_prefix" {
+ description = "Determines whether to use `name` as is or create a unique name beginning with the `name` as the prefix"
+ type = bool
+ default = true
+}
+
+variable "launch_template" {
+ description = "Name of an existing launch template to be used (created outside of this module)"
+ type = string
+ default = null
+}
+
+variable "launch_template_version" {
+ description = "Launch template version. Can be version number, `$Latest`, or `$Default`"
+ type = string
+ default = null
+}
+
+variable "availability_zones" {
+ description = "A list of one or more availability zones for the group. Used for EC2-Classic and default subnets when not specified with `subnet_ids` argument. Conflicts with `subnet_ids`"
+ type = list(string)
+ default = null
+}
+
+variable "subnet_ids" {
+ description = "A list of subnet IDs to launch resources in. Subnets automatically determine which availability zones the group will reside. Conflicts with `availability_zones`"
+ type = list(string)
+ default = null
+}
+
+variable "min_size" {
+ description = "The minimum size of the autoscaling group"
+ type = number
+ default = null
+}
+
+variable "max_size" {
+ description = "The maximum size of the autoscaling group"
+ type = number
+ default = null
+}
+
+variable "desired_capacity" {
+ description = "The number of Amazon EC2 instances that should be running in the autoscaling group"
+ type = number
+ default = null
+}
+
+variable "capacity_rebalance" {
+ description = "Indicates whether capacity rebalance is enabled"
+ type = bool
+ default = null
+}
+
+variable "min_elb_capacity" {
+ description = "Setting this causes Terraform to wait for this number of instances to show up healthy in the ELB only on creation. Updates will not wait on ELB instance number changes"
+ type = number
+ default = null
+}
+
+variable "wait_for_elb_capacity" {
+ description = "Setting this will cause Terraform to wait for exactly this number of healthy instances in all attached load balancers on both create and update operations. Takes precedence over `min_elb_capacity` behavior."
+ type = number
+ default = null
+}
+
+variable "wait_for_capacity_timeout" {
+ description = "A maximum duration that Terraform should wait for ASG instances to be healthy before timing out. (See also Waiting for Capacity below.) Setting this to '0' causes Terraform to skip all Capacity Waiting behavior."
+ type = string
+ default = null
+}
+
+variable "defaulaunch_template_cooldown" {
+ description = "The amount of time, in seconds, after a scaling activity completes before another scaling activity can start"
+ type = number
+ default = null
+}
+
+variable "protect_from_scale_in" {
+ description = "Allows setting instance protection. The autoscaling group will not select instances with this setting for termination during scale in events."
+ type = bool
+ default = false
+}
+
+variable "load_balancers" {
+ description = "A list of elastic load balancer names to add to the autoscaling group names. Only valid for classic load balancers. For ALBs, use `target_group_arns` instead"
+ type = list(string)
+ default = []
+}
+
+variable "target_group_arns" {
+ description = "A set of `aws_alb_target_group` ARNs, for use with Application or Network Load Balancing"
+ type = list(string)
+ default = []
+}
+
+variable "placement_group" {
+ description = "The name of the placement group into which you'll launch your instances, if any"
+ type = string
+ default = null
+}
+
+variable "health_check_type" {
+ description = "`EC2` or `ELB`. Controls how health checking is done"
+ type = string
+ default = null
+}
+
+variable "health_check_grace_period" {
+ description = "Time (in seconds) after instance comes into service before checking health"
+ type = number
+ default = null
+}
+
+variable "force_delete" {
+ description = "Allows deleting the Auto Scaling Group without waiting for all instances in the pool to terminate. You can force an Auto Scaling Group to delete even if it's in the process of scaling a resource. Normally, Terraform drains all the instances before deleting the group. This bypasses that behavior and potentially leaves resources dangling"
+ type = bool
+ default = null
+}
+
+variable "termination_policies" {
+ description = "A list of policies to decide how the instances in the Auto Scaling Group should be terminated. The allowed values are `OldestInstance`, `NewestInstance`, `OldestLaunchConfiguration`, `ClosestToNextInstanceHour`, `OldestLaunchTemplate`, `AllocationStrategy`, `Default`"
+ type = list(string)
+ default = null
+}
+
+variable "suspended_processes" {
+ description = "A list of processes to suspend for the Auto Scaling Group. The allowed values are `Launch`, `Terminate`, `HealthCheck`, `ReplaceUnhealthy`, `AZRebalance`, `AlarmNotification`, `ScheduledActions`, `AddToLoadBalancer`. Note that if you suspend either the `Launch` or `Terminate` process types, it can prevent your Auto Scaling Group from functioning properly"
+ type = list(string)
+ default = null
+}
+
+variable "max_instance_lifetime" {
+ description = "The maximum amount of time, in seconds, that an instance can be in service, values must be either equal to 0 or between 604800 and 31536000 seconds"
+ type = number
+ default = null
+}
+
+variable "enabled_metrics" {
+ description = "A list of metrics to collect. The allowed values are `GroupDesiredCapacity`, `GroupInServiceCapacity`, `GroupPendingCapacity`, `GroupMinSize`, `GroupMaxSize`, `GroupInServiceInstances`, `GroupPendingInstances`, `GroupStandbyInstances`, `GroupStandbyCapacity`, `GroupTerminatingCapacity`, `GroupTerminatingInstances`, `GroupTotalCapacity`, `GroupTotalInstances`"
+ type = list(string)
+ default = null
+}
+
+variable "metrics_granularity" {
+ description = "The granularity to associate with the metrics to collect. The only valid value is `1Minute`"
+ type = string
+ default = null
+}
+
+variable "service_linked_role_arn" {
+ description = "The ARN of the service-linked role that the ASG will use to call other AWS services"
+ type = string
+ default = null
+}
+
+variable "initial_lifecycle_hooks" {
+ description = "One or more Lifecycle Hooks to attach to the Auto Scaling Group before instances are launched. The syntax is exactly the same as the separate `aws_autoscaling_lifecycle_hook` resource, without the `autoscaling_group_name` attribute. Please note that this will only work when creating a new Auto Scaling Group. For all other use-cases, please use `aws_autoscaling_lifecycle_hook` resource"
+ type = list(map(string))
+ default = []
+}
+
+variable "instance_refresh" {
+ description = "If this block is configured, start an Instance Refresh when this Auto Scaling Group is updated"
+ type = any
+ default = null
+}
+
+variable "use_mixed_instances_policy" {
+ description = "Determines whether to use a mixed instances policy in the autoscaling group or not"
+ type = bool
+ default = false
+}
+
+variable "mixed_instances_policy" {
+ description = "Configuration block containing settings to define launch targets for Auto Scaling groups"
+ type = any
+ default = null
+}
+
+variable "delete_timeout" {
+ description = "Delete timeout to wait for destroying autoscaling group"
+ type = string
+ default = null
+}
+
+variable "tags" {
+ description = "A list of tag blocks. Each element should have keys named key, value, and propagate_at_launch"
+ type = list(map(string))
+ default = []
+}
+
+variable "tags_as_map" {
+ description = "A map of tags and values in the same format as other resources accept. This will be converted into the non-standard format that the aws_autoscaling_group requires."
+ type = map(string)
+ default = {}
+}
+
+variable "propagate_name" {
+ description = "Determines whether to propagate the ASG Name tag or not"
+ type = bool
+ default = true
+}
+
+variable "warm_pool" {
+ description = "If this block is configured, add a Warm Pool to the specified Auto Scaling group"
+ type = any
+ default = null
+}
+
+################################################################################
+# Launch template
+################################################################################
+
+variable "create_lt" {
+ description = "Determines whether to create launch template or not"
+ type = bool
+ default = false
+}
+
+variable "use_lt" {
+ description = "Determines whether to use a launch template in the autoscaling group or not"
+ type = bool
+ default = false
+}
+
+variable "launch_template_name" {
+ description = "Name of launch template to be created"
+ type = string
+ default = ""
+}
+
+variable "launch_template_use_name_prefix" {
+ description = "Determines whether to use `launch_template_name` as is or create a unique name beginning with the `launch_template_name` as the prefix"
+ type = bool
+ default = true
+}
+
+variable "description" {
+ description = "(LT) Description of the launch template"
+ type = string
+ default = null
+}
+
+variable "defaulaunch_template_version" {
+ description = "(LT) Default Version of the launch template"
+ type = string
+ default = null
+}
+
+variable "update_defaulaunch_template_version" {
+ description = "(LT) Whether to update Default Version each update. Conflicts with `defaulaunch_template_version`"
+ type = string
+ default = null
+}
+
+variable "disable_api_termination" {
+ description = "(LT) If true, enables EC2 instance termination protection"
+ type = bool
+ default = null
+}
+
+variable "instance_initiated_shutdown_behavior" {
+ description = "(LT) Shutdown behavior for the instance. Can be `stop` or `terminate`. (Default: `stop`)"
+ type = string
+ default = null
+}
+
+variable "kernel_id" {
+ description = "(LT) The kernel ID"
+ type = string
+ default = null
+}
+
+variable "ram_disk_id" {
+ description = "(LT) The ID of the ram disk"
+ type = string
+ default = null
+}
+
+variable "block_device_mappings" {
+ description = "(LT) Specify volumes to attach to the instance besides the volumes specified by the AMI"
+ type = list(any)
+ default = []
+}
+
+variable "capacity_reservation_specification" {
+ description = "(LT) Targeting for EC2 capacity reservations"
+ type = any
+ default = null
+}
+
+variable "cpu_options" {
+ description = "(LT) The CPU options for the instance"
+ type = map(string)
+ default = null
+}
+
+variable "credit_specification" {
+ description = "(LT) Customize the credit specification of the instance"
+ type = map(string)
+ default = null
+}
+
+variable "elastic_gpu_specifications" {
+ description = "(LT) The elastic GPU to attach to the instance"
+ type = map(string)
+ default = null
+}
+
+variable "elastic_inference_accelerator" {
+ description = "(LT) Configuration block containing an Elastic Inference Accelerator to attach to the instance"
+ type = map(string)
+ default = null
+}
+
+variable "enclave_options" {
+ description = "(LT) Enable Nitro Enclaves on launched instances"
+ type = map(string)
+ default = null
+}
+
+variable "hibernation_options" {
+ description = "(LT) The hibernation options for the instance"
+ type = map(string)
+ default = null
+}
+
+variable "iam_instance_profile_arn" {
+ description = "(LT) The IAM Instance Profile ARN to launch the instance with"
+ type = string
+ default = null
+}
+
+variable "instance_market_options" {
+ description = "(LT) The market (purchasing) option for the instance"
+ type = any
+ default = null
+}
+
+variable "license_specifications" {
+ description = "(LT) A list of license specifications to associate with"
+ type = map(string)
+ default = null
+}
+
+variable "network_interfaces" {
+ description = "(LT) Customize network interfaces to be attached at instance boot time"
+ type = list(any)
+ default = []
+}
+
+variable "placement" {
+ description = "(LT) The placement of the instance"
+ type = map(string)
+ default = null
+}
+
+variable "tag_specifications" {
+ description = "(LT) The tags to apply to the resources during launch"
+ type = list(any)
+ default = []
+}
+
+
+variable "ebs_optimized" {
+ description = "If true, the launched EC2 instance will be EBS-optimized"
+ type = bool
+ default = null
+}
+
+variable "iam_instance_profile_name" {
+ description = "The name attribute of the IAM instance profile to associate with launched instances"
+ type = string
+ default = null
+}
+
+variable "image_id" {
+ description = "The AMI from which to launch the instance"
+ type = string
+ default = ""
+}
+
+variable "instance_type" {
+ description = "The type of the instance to launch"
+ type = string
+ default = ""
+}
+
+variable "key_name" {
+ description = "The key name that should be used for the instance"
+ type = string
+ default = null
+}
+
+variable "user_data_base64" {
+ description = "The Base64-encoded user data to provide when launching the instance. You should use this for Launch Templates instead user_data"
+ type = string
+ default = null
+}
+
+variable "security_groups" {
+ description = "A list of security group IDs to associate"
+ type = list(string)
+ default = null
+}
+
+variable "enable_monitoring" {
+ description = "Enables/disables detailed monitoring"
+ type = bool
+ default = null
+}
+
+variable "metadata_options" {
+ description = "Customize the metadata options for the instance"
+ type = map(string)
+ default = null
+}
+
+################################################################################
+# Autoscaling group schedule
+################################################################################
+
+variable "create_schedule" {
+ description = "Determines whether to create autoscaling group schedule or not"
+ type = bool
+ default = true
+}
+
+variable "schedules" {
+ description = "Map of autoscaling group schedule to create"
+ type = map(any)
+ default = {}
+}
diff --git a/modules/self-managed-node-group/versions.tf b/modules/self-managed-node-group/versions.tf
new file mode 100644
index 0000000000..5bc71fb16b
--- /dev/null
+++ b/modules/self-managed-node-group/versions.tf
@@ -0,0 +1,15 @@
+terraform {
+ required_version = ">= 0.13.1"
+
+ required_providers {
+ aws = {
+ source = "hashicorp/aws"
+ version = ">= 3.53"
+ }
+
+ null = {
+ source = "hashicorp/null"
+ version = ">= 2.0"
+ }
+ }
+}
From 04f749d379309a430712990ef627dcba019709ce Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Tue, 9 Nov 2021 18:14:18 -0500
Subject: [PATCH 12/83] chore: testing with self managed node group first
---
README.md | 27 +-
.../README.md | 2 -
.../disk_encryption_policy.tf | 0
.../launchtemplate.tf | 0
.../main.tf | 10 +-
.../outputs.tf | 0
.../templates/userdata.sh.tpl | 0
.../variables.tf | 0
.../versions.tf | 0
examples/launch_templates/README.md | 63 --
examples/launch_templates/main.tf | 149 -----
examples/launch_templates/outputs.tf | 9 -
examples/launch_templates/pre_userdata.sh | 1 -
examples/launch_templates/versions.tf | 11 -
examples/managed_node_groups/variables.tf | 0
examples/managed_node_groups/versions.tf | 10 -
.../README.md | 9 +-
.../main.tf | 112 ++--
.../outputs.tf | 0
.../variables.tf | 0
examples/self_managed_node_groups/versions.tf | 10 +
locals.tf | 19 +-
main.tf | 8 +-
modules/self-managed-node-group/README.md | 113 +++-
modules/self-managed-node-group/main.tf | 100 ++-
modules/self-managed-node-group/variables.tf | 101 ++-
modules/self-managed-node-group/versions.tf | 5 -
variables.tf | 153 +----
workers.tf | 628 ++++--------------
29 files changed, 411 insertions(+), 1129 deletions(-)
rename examples/{launch_templates_with_managed_node_groups => eks_managed_node_group}/README.md (93%)
rename examples/{launch_templates_with_managed_node_groups => eks_managed_node_group}/disk_encryption_policy.tf (100%)
rename examples/{launch_templates_with_managed_node_groups => eks_managed_node_group}/launchtemplate.tf (100%)
rename examples/{launch_templates_with_managed_node_groups => eks_managed_node_group}/main.tf (95%)
rename examples/{launch_templates_with_managed_node_groups => eks_managed_node_group}/outputs.tf (100%)
rename examples/{launch_templates_with_managed_node_groups => eks_managed_node_group}/templates/userdata.sh.tpl (100%)
rename examples/{launch_templates => eks_managed_node_group}/variables.tf (100%)
rename examples/{launch_templates_with_managed_node_groups => eks_managed_node_group}/versions.tf (100%)
delete mode 100644 examples/launch_templates/README.md
delete mode 100644 examples/launch_templates/main.tf
delete mode 100644 examples/launch_templates/outputs.tf
delete mode 100644 examples/launch_templates/pre_userdata.sh
delete mode 100644 examples/launch_templates/versions.tf
delete mode 100644 examples/managed_node_groups/variables.tf
delete mode 100644 examples/managed_node_groups/versions.tf
rename examples/{managed_node_groups => self_managed_node_groups}/README.md (79%)
rename examples/{managed_node_groups => self_managed_node_groups}/main.tf (62%)
rename examples/{managed_node_groups => self_managed_node_groups}/outputs.tf (100%)
rename examples/{launch_templates_with_managed_node_groups => self_managed_node_groups}/variables.tf (100%)
create mode 100644 examples/self_managed_node_groups/versions.tf
diff --git a/README.md b/README.md
index 6ecfe87110..8b716166f3 100644
--- a/README.md
+++ b/README.md
@@ -132,12 +132,12 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
| Name | Source | Version |
|------|--------|---------|
| [fargate](#module\_fargate) | ./modules/fargate | n/a |
+| [self\_managed\_node\_group](#module\_self\_managed\_node\_group) | ./modules/self-managed-node-group | n/a |
## Resources
| Name | Type |
|------|------|
-| [aws_autoscaling_group.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/autoscaling_group) | resource |
| [aws_cloudwatch_log_group.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_log_group) | resource |
| [aws_eks_cluster.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_cluster) | resource |
| [aws_iam_instance_profile.worker](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_instance_profile) | resource |
@@ -145,7 +145,6 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
| [aws_iam_policy.cluster_additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
| [aws_iam_role.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
| [aws_iam_role.worker](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
-| [aws_launch_template.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource |
| [aws_security_group.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
| [aws_security_group.worker](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
| [aws_security_group_rule.cluster_egress_internet](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
@@ -204,7 +203,7 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
| [create\_fargate](#input\_create\_fargate) | Determines whether Fargate resources are created | `bool` | `false` | no |
| [create\_fargate\_pod\_execution\_role](#input\_create\_fargate\_pod\_execution\_role) | Controls if the EKS Fargate pod execution IAM role should be created | `bool` | `true` | no |
| [create\_worker\_iam\_role](#input\_create\_worker\_iam\_role) | Determines whether a worker IAM role is created or to use an existing IAM role | `bool` | `true` | no |
-| [default\_platform](#input\_default\_platform) | Default platform name. Valid options are `linux` and `windows` | `string` | `"linux"` | no |
+| [create\_worker\_security\_group](#input\_create\_worker\_security\_group) | Whether to create a security group for the worker nodes | `bool` | `true` | no |
| [enable\_irsa](#input\_enable\_irsa) | Whether to create OpenID Connect Provider for EKS to enable IRSA | `bool` | `false` | no |
| [fargate\_iam\_role\_path](#input\_fargate\_iam\_role\_path) | Fargate IAM role path | `string` | `null` | no |
| [fargate\_iam\_role\_permissions\_boundary](#input\_fargate\_iam\_role\_permissions\_boundary) | ARN of the policy that is used to set the permissions boundary for the Fargate role | `string` | `null` | no |
@@ -212,39 +211,25 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
| [fargate\_profiles](#input\_fargate\_profiles) | Fargate profiles to create. See `fargate_profile` keys section in Fargate submodule's README.md for more details | `any` | `{}` | no |
| [fargate\_subnet\_ids](#input\_fargate\_subnet\_ids) | A list of subnet IDs to place Fargate workers within (if different from `subnet_ids`) | `list(string)` | `[]` | no |
| [fargate\_tags](#input\_fargate\_tags) | A map of additional tags to add to the Fargate resources created | `map(string)` | `{}` | no |
-| [group\_default\_settings](#input\_group\_default\_settings) | Override default values for autoscaling group, node group settings | `any` | `{}` | no |
-| [iam\_path](#input\_iam\_path) | If provided, all IAM roles will be created on this path | `string` | `"/"` | no |
-| [kubeconfig\_api\_version](#input\_kubeconfig\_api\_version) | KubeConfig API version. Defaults to client.authentication.k8s.io/v1alpha1 | `string` | `"client.authentication.k8s.io/v1alpha1"` | no |
-| [kubeconfig\_aws\_authenticator\_additional\_args](#input\_kubeconfig\_aws\_authenticator\_additional\_args) | Any additional arguments to pass to the authenticator such as the role to assume. e.g. ["-r", "MyEksRole"] | `list(string)` | `[]` | no |
-| [kubeconfig\_aws\_authenticator\_command](#input\_kubeconfig\_aws\_authenticator\_command) | Command to use to fetch AWS EKS credentials | `string` | `"aws-iam-authenticator"` | no |
-| [kubeconfig\_aws\_authenticator\_command\_args](#input\_kubeconfig\_aws\_authenticator\_command\_args) | Default arguments passed to the authenticator command. Defaults to [token -i $cluster\_name] | `list(string)` | `[]` | no |
-| [kubeconfig\_aws\_authenticator\_env\_variables](#input\_kubeconfig\_aws\_authenticator\_env\_variables) | Environment variables that should be used when executing the authenticator. e.g. { AWS\_PROFILE = "eks"} | `map(string)` | `{}` | no |
-| [kubeconfig\_name](#input\_kubeconfig\_name) | Override the default name used for items kubeconfig | `string` | `""` | no |
-| [launch\_templates](#input\_launch\_templates) | Map of launch template definitions to create | `map(any)` | `{}` | no |
-| [manage\_worker\_iam\_resources](#input\_manage\_worker\_iam\_resources) | Whether to let the module manage worker IAM resources. If set to false, iam\_instance\_profile\_name must be specified for workers | `bool` | `true` | no |
-| [node\_groups](#input\_node\_groups) | Map of map of node groups to create. See `node_groups` module's documentation for more details | `any` | `{}` | no |
-| [node\_groups\_defaults](#input\_node\_groups\_defaults) | Map of values to be applied to all node groups. See `node_groups` module's documentation for more details | `any` | `{}` | no |
| [openid\_connect\_audiences](#input\_openid\_connect\_audiences) | List of OpenID Connect audience client IDs to add to the IRSA provider | `list(string)` | `[]` | no |
-| [permissions\_boundary](#input\_permissions\_boundary) | If provided, all IAM roles will be created with this permissions boundary attached | `string` | `null` | no |
+| [self\_managed\_node\_groups](#input\_self\_managed\_node\_groups) | Map of self-managed node group definitions to create | `any` | `{}` | no |
| [subnet\_ids](#input\_subnet\_ids) | A list of subnet IDs to place the EKS cluster and workers within | `list(string)` | `[]` | no |
| [tags](#input\_tags) | A map of tags to add to all resources. Tags added to launch configuration or templates override these values for ASG Tags only | `map(string)` | `{}` | no |
| [vpc\_id](#input\_vpc\_id) | ID of the VPC where the cluster and workers will be provisioned | `string` | `null` | no |
| [worker\_additional\_policies](#input\_worker\_additional\_policies) | Additional policies to be added to workers | `list(string)` | `[]` | no |
| [worker\_ami\_name\_filter](#input\_worker\_ami\_name\_filter) | Name filter for AWS EKS worker AMI. If not provided, the latest official AMI for the specified 'cluster\_version' is used | `string` | `""` | no |
-| [worker\_ami\_name\_filter\_windows](#input\_worker\_ami\_name\_filter\_windows) | Name filter for AWS EKS Windows worker AMI. If not provided, the latest official AMI for the specified 'cluster\_version' is used | `string` | `""` | no |
| [worker\_ami\_owner\_id](#input\_worker\_ami\_owner\_id) | The ID of the owner for the AMI to use for the AWS EKS workers. Valid values are an AWS account ID, 'self' (the current account), or an AWS owner alias (e.g. 'amazon', 'aws-marketplace', 'microsoft') | `string` | `"amazon"` | no |
-| [worker\_ami\_owner\_id\_windows](#input\_worker\_ami\_owner\_id\_windows) | The ID of the owner for the AMI to use for the AWS EKS Windows workers. Valid values are an AWS account ID, 'self' (the current account), or an AWS owner alias (e.g. 'amazon', 'aws-marketplace', 'microsoft') | `string` | `"amazon"` | no |
| [worker\_create\_cluster\_primary\_security\_group\_rules](#input\_worker\_create\_cluster\_primary\_security\_group\_rules) | Whether to create security group rules to allow communication between pods on workers and pods using the primary cluster security group | `bool` | `false` | no |
-| [worker\_create\_security\_group](#input\_worker\_create\_security\_group) | Whether to create a security group for the workers or attach the workers to `worker_security_group_id` | `bool` | `true` | no |
| [worker\_egress\_cidrs](#input\_worker\_egress\_cidrs) | List of CIDR blocks that are permitted for workers egress traffic | `list(string)` |
[ "0.0.0.0/0" ]
| no |
-| [worker\_groups](#input\_worker\_groups) | A map of maps defining worker group configurations to be defined using AWS Launch Template | `any` | `{}` | no |
| [worker\_iam\_role\_name](#input\_worker\_iam\_role\_name) | Name to use on worker role created | `string` | `null` | no |
| [worker\_iam\_role\_path](#input\_worker\_iam\_role\_path) | Worker IAM role path | `string` | `null` | no |
| [worker\_iam\_role\_permissions\_boundary](#input\_worker\_iam\_role\_permissions\_boundary) | ARN of the policy that is used to set the permissions boundary for the worker role | `string` | `null` | no |
| [worker\_iam\_role\_tags](#input\_worker\_iam\_role\_tags) | A map of additional tags to add to the worker IAM role created | `map(string)` | `{}` | no |
| [worker\_iam\_role\_use\_name\_prefix](#input\_worker\_iam\_role\_use\_name\_prefix) | Determines whether worker IAM role name (`worker_iam_role_name`) is used as a prefix | `string` | `true` | no |
-| [worker\_role\_name](#input\_worker\_role\_name) | User defined workers role name | `string` | `""` | no |
| [worker\_security\_group\_id](#input\_worker\_security\_group\_id) | If provided, all workers will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the EKS cluster | `string` | `""` | no |
+| [worker\_security\_group\_name](#input\_worker\_security\_group\_name) | Name to use on worker role created | `string` | `null` | no |
+| [worker\_security\_group\_tags](#input\_worker\_security\_group\_tags) | A map of additional tags to add to the worker security group created | `map(string)` | `{}` | no |
+| [worker\_security\_group\_use\_name\_prefix](#input\_worker\_security\_group\_use\_name\_prefix) | Determines whether the worker security group name (`worker_security_group_name`) is used as a prefix | `string` | `true` | no |
| [worker\_sg\_ingress\_from\_port](#input\_worker\_sg\_ingress\_from\_port) | Minimum port number from which pods will accept communication. Must be changed to a lower value if some pods in your cluster will expose a port lower than 1025 (e.g. 22, 80, or 443) | `number` | `1025` | no |
## Outputs
diff --git a/examples/launch_templates_with_managed_node_groups/README.md b/examples/eks_managed_node_group/README.md
similarity index 93%
rename from examples/launch_templates_with_managed_node_groups/README.md
rename to examples/eks_managed_node_group/README.md
index b877b58a9d..bc1a155e65 100644
--- a/examples/launch_templates_with_managed_node_groups/README.md
+++ b/examples/eks_managed_node_group/README.md
@@ -35,7 +35,6 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Version |
|------|---------|
| [aws](#provider\_aws) | >= 3.22.0 |
-| [random](#provider\_random) | >= 2.1 |
## Modules
@@ -50,7 +49,6 @@ Note that this example may create resources which cost money. Run `terraform des
|------|------|
| [aws_iam_service_linked_role.autoscaling](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_service_linked_role) | resource |
| [aws_launch_template.default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource |
-| [random_string.suffix](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/string) | resource |
| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source |
| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source |
| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
diff --git a/examples/launch_templates_with_managed_node_groups/disk_encryption_policy.tf b/examples/eks_managed_node_group/disk_encryption_policy.tf
similarity index 100%
rename from examples/launch_templates_with_managed_node_groups/disk_encryption_policy.tf
rename to examples/eks_managed_node_group/disk_encryption_policy.tf
diff --git a/examples/launch_templates_with_managed_node_groups/launchtemplate.tf b/examples/eks_managed_node_group/launchtemplate.tf
similarity index 100%
rename from examples/launch_templates_with_managed_node_groups/launchtemplate.tf
rename to examples/eks_managed_node_group/launchtemplate.tf
diff --git a/examples/launch_templates_with_managed_node_groups/main.tf b/examples/eks_managed_node_group/main.tf
similarity index 95%
rename from examples/launch_templates_with_managed_node_groups/main.tf
rename to examples/eks_managed_node_group/main.tf
index 227c034fcb..ca2bb6ffe4 100644
--- a/examples/launch_templates_with_managed_node_groups/main.tf
+++ b/examples/eks_managed_node_group/main.tf
@@ -3,7 +3,7 @@ provider "aws" {
}
locals {
- name = "lt_with_mng-${random_string.suffix.result}"
+ name = "ex-${replace(basename(path.cwd), "_", "-")}"
cluster_version = "1.20"
region = "eu-west-1"
}
@@ -110,13 +110,7 @@ provider "kubernetes" {
# Supporting Resources
################################################################################
-data "aws_availability_zones" "available" {
-}
-
-resource "random_string" "suffix" {
- length = 8
- special = false
-}
+data "aws_availability_zones" "available" {}
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
diff --git a/examples/launch_templates_with_managed_node_groups/outputs.tf b/examples/eks_managed_node_group/outputs.tf
similarity index 100%
rename from examples/launch_templates_with_managed_node_groups/outputs.tf
rename to examples/eks_managed_node_group/outputs.tf
diff --git a/examples/launch_templates_with_managed_node_groups/templates/userdata.sh.tpl b/examples/eks_managed_node_group/templates/userdata.sh.tpl
similarity index 100%
rename from examples/launch_templates_with_managed_node_groups/templates/userdata.sh.tpl
rename to examples/eks_managed_node_group/templates/userdata.sh.tpl
diff --git a/examples/launch_templates/variables.tf b/examples/eks_managed_node_group/variables.tf
similarity index 100%
rename from examples/launch_templates/variables.tf
rename to examples/eks_managed_node_group/variables.tf
diff --git a/examples/launch_templates_with_managed_node_groups/versions.tf b/examples/eks_managed_node_group/versions.tf
similarity index 100%
rename from examples/launch_templates_with_managed_node_groups/versions.tf
rename to examples/eks_managed_node_group/versions.tf
diff --git a/examples/launch_templates/README.md b/examples/launch_templates/README.md
deleted file mode 100644
index 90650cd89e..0000000000
--- a/examples/launch_templates/README.md
+++ /dev/null
@@ -1,63 +0,0 @@
-# Launch templates example
-
-This is EKS example using workers launch template with worker groups feature.
-
-See [the official documentation](https://docs.aws.amazon.com/eks/latest/userguide/worker.html) for more details.
-
-## Usage
-
-To run this example you need to execute:
-
-```bash
-$ terraform init
-$ terraform plan
-$ terraform apply
-```
-
-Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources.
-
-
-## Requirements
-
-| Name | Version |
-|------|---------|
-| [terraform](#requirement\_terraform) | >= 0.13.1 |
-| [aws](#requirement\_aws) | >= 3.22.0 |
-| [kubernetes](#requirement\_kubernetes) | ~> 2.0 |
-| [local](#requirement\_local) | >= 1.4 |
-| [random](#requirement\_random) | >= 2.1 |
-
-## Providers
-
-| Name | Version |
-|------|---------|
-| [aws](#provider\_aws) | >= 3.22.0 |
-| [random](#provider\_random) | >= 2.1 |
-
-## Modules
-
-| Name | Source | Version |
-|------|--------|---------|
-| [eks](#module\_eks) | ../.. | n/a |
-| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 |
-
-## Resources
-
-| Name | Type |
-|------|------|
-| [random_string.suffix](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/string) | resource |
-| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source |
-| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source |
-| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
-
-## Inputs
-
-No inputs.
-
-## Outputs
-
-| Name | Description |
-|------|-------------|
-| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for EKS control plane. |
-| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Security group ids attached to the cluster control plane. |
-
diff --git a/examples/launch_templates/main.tf b/examples/launch_templates/main.tf
deleted file mode 100644
index 4a1412341f..0000000000
--- a/examples/launch_templates/main.tf
+++ /dev/null
@@ -1,149 +0,0 @@
-provider "aws" {
- region = local.region
-}
-
-locals {
- name = "launch_template-${random_string.suffix.result}"
- cluster_version = "1.20"
- region = "eu-west-1"
-
- tags = {
- Example = local.name
- GithubRepo = "terraform-aws-eks"
- GithubOrg = "terraform-aws-modules"
- }
-}
-
-################################################################################
-# EKS Module
-################################################################################
-
-module "eks" {
- source = "../.."
-
- cluster_name = local.name
- cluster_version = local.cluster_version
-
- vpc_id = module.vpc.vpc_id
- subnet_ids = module.vpc.private_subnets
-
- cluster_endpoint_private_access = true
- cluster_endpoint_public_access = true
-
- launch_templates = {
- lt_default = {}
- lt_two = {
- instance_type = "t3.small"
- }
- }
-
- group_default_settings = {
- launch_template_key = "tl_default"
- instance_type = "t3.medium"
- }
-
- worker_groups = {
- # one = {
- # name = "worker-group-1"
- # asg_desired_capacity = 2
- # public_ip = true
- # tags = {
- # ExtraTag = "TagValue"
- # }
- # propogated_tags = [{
- # key = "ExtraPropgatedTag"
- # value = "PropogatedTagValue"
- # propagate_at_launch = false
- # }]
- # }
- # two = {
- # name = "worker-group-2"
- # launch_template_key = "lt_two"
- # asg_desired_capacity = 1
- # public_ip = true
- # ebs_optimized = true
- # }
- # three = {
- # name = "worker-group-3"
- # instance_type = "t2.large"
- # asg_desired_capacity = 1
- # public_ip = true
- # elastic_inference_accelerator = "eia2.medium"
- # }
- # four = {
- # name = "worker-group-4"
- # instance_type = "t3.small"
- # asg_desired_capacity = 1
- # public_ip = true
- # root_volume_size = 150
- # root_volume_type = "gp3"
- # root_volume_throughput = 300
- # additional_ebs_volumes = [
- # {
- # block_device_name = "/dev/xvdb"
- # volume_size = 100
- # volume_type = "gp3"
- # throughput = 150
- # },
- # ]
- # },
- }
-
- tags = local.tags
-}
-
-################################################################################
-# Kubernetes provider configuration
-################################################################################
-
-data "aws_eks_cluster" "cluster" {
- name = module.eks.cluster_id
-}
-
-data "aws_eks_cluster_auth" "cluster" {
- name = module.eks.cluster_id
-}
-
-provider "kubernetes" {
- host = data.aws_eks_cluster.cluster.endpoint
- cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data)
- token = data.aws_eks_cluster_auth.cluster.token
-}
-
-################################################################################
-# Supporting Resources
-################################################################################
-
-data "aws_availability_zones" "available" {
-}
-
-resource "random_string" "suffix" {
- length = 8
- special = false
-}
-
-module "vpc" {
- source = "terraform-aws-modules/vpc/aws"
- version = "~> 3.0"
-
- name = local.name
- cidr = "10.0.0.0/16"
- azs = data.aws_availability_zones.available.names
- private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
- public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
- enable_nat_gateway = true
- single_nat_gateway = true
- enable_dns_hostnames = true
-
- public_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/elb" = "1"
- }
-
- private_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/internal-elb" = "1"
- }
-
- tags = local.tags
-}
diff --git a/examples/launch_templates/outputs.tf b/examples/launch_templates/outputs.tf
deleted file mode 100644
index 256cb0b74e..0000000000
--- a/examples/launch_templates/outputs.tf
+++ /dev/null
@@ -1,9 +0,0 @@
-output "cluster_endpoint" {
- description = "Endpoint for EKS control plane."
- value = module.eks.cluster_endpoint
-}
-
-output "cluster_security_group_id" {
- description = "Security group ids attached to the cluster control plane."
- value = module.eks.cluster_security_group_id
-}
diff --git a/examples/launch_templates/pre_userdata.sh b/examples/launch_templates/pre_userdata.sh
deleted file mode 100644
index 4cbf0d114b..0000000000
--- a/examples/launch_templates/pre_userdata.sh
+++ /dev/null
@@ -1 +0,0 @@
-yum update -y
diff --git a/examples/launch_templates/versions.tf b/examples/launch_templates/versions.tf
deleted file mode 100644
index 9c1dbfa3e8..0000000000
--- a/examples/launch_templates/versions.tf
+++ /dev/null
@@ -1,11 +0,0 @@
-terraform {
- required_version = ">= 0.13.1"
-
- required_providers {
- aws = ">= 3.22.0"
- local = ">= 1.4"
- random = ">= 2.1"
- kubernetes = "~> 2.0"
- }
-}
-
diff --git a/examples/managed_node_groups/variables.tf b/examples/managed_node_groups/variables.tf
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/examples/managed_node_groups/versions.tf b/examples/managed_node_groups/versions.tf
deleted file mode 100644
index bbcf893252..0000000000
--- a/examples/managed_node_groups/versions.tf
+++ /dev/null
@@ -1,10 +0,0 @@
-terraform {
- required_version = ">= 0.13.1"
-
- required_providers {
- aws = ">= 3.22.0"
- local = ">= 1.4"
- random = ">= 2.1"
- kubernetes = "~> 2.0"
- }
-}
diff --git a/examples/managed_node_groups/README.md b/examples/self_managed_node_groups/README.md
similarity index 79%
rename from examples/managed_node_groups/README.md
rename to examples/self_managed_node_groups/README.md
index fda54c8aa0..5d76a53321 100644
--- a/examples/managed_node_groups/README.md
+++ b/examples/self_managed_node_groups/README.md
@@ -25,17 +25,13 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Version |
|------|---------|
| [terraform](#requirement\_terraform) | >= 0.13.1 |
-| [aws](#requirement\_aws) | >= 3.22.0 |
-| [kubernetes](#requirement\_kubernetes) | ~> 2.0 |
-| [local](#requirement\_local) | >= 1.4 |
-| [random](#requirement\_random) | >= 2.1 |
+| [aws](#requirement\_aws) | >= 3.56.0 |
## Providers
| Name | Version |
|------|---------|
-| [aws](#provider\_aws) | >= 3.22.0 |
-| [random](#provider\_random) | >= 2.1 |
+| [aws](#provider\_aws) | >= 3.56.0 |
## Modules
@@ -48,7 +44,6 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Type |
|------|------|
-| [random_string.suffix](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/string) | resource |
| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source |
| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source |
| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
diff --git a/examples/managed_node_groups/main.tf b/examples/self_managed_node_groups/main.tf
similarity index 62%
rename from examples/managed_node_groups/main.tf
rename to examples/self_managed_node_groups/main.tf
index 3129207a44..1b2fbf6a55 100644
--- a/examples/managed_node_groups/main.tf
+++ b/examples/self_managed_node_groups/main.tf
@@ -3,7 +3,7 @@ provider "aws" {
}
locals {
- name = "managed_node_groups-${random_string.suffix.result}"
+ name = "ex-${replace(basename(path.cwd), "_", "-")}"
cluster_version = "1.20"
region = "eu-west-1"
}
@@ -24,57 +24,57 @@ module "eks" {
cluster_endpoint_private_access = true
cluster_endpoint_public_access = true
- node_groups_defaults = {
- ami_type = "AL2_x86_64"
- disk_size = 50
- }
-
- node_groups = {
- example = {
- desired_capacity = 1
- max_capacity = 10
- min_capacity = 1
-
- instance_types = ["t3.large"]
- capacity_type = "SPOT"
- k8s_labels = {
- Example = "managed_node_groups"
- GithubRepo = "terraform-aws-eks"
- GithubOrg = "terraform-aws-modules"
- }
- additional_tags = {
- ExtraTag = "example"
- }
- taints = [
- {
- key = "dedicated"
- value = "gpuGroup"
- effect = "NO_SCHEDULE"
- }
- ]
- update_config = {
- max_unavailable_percentage = 50 # or set `max_unavailable`
- }
- }
- example2 = {
- desired_capacity = 1
- max_capacity = 10
- min_capacity = 1
-
- instance_types = ["t3.medium"]
- k8s_labels = {
- Example = "managed_node_groups"
- GithubRepo = "terraform-aws-eks"
- GithubOrg = "terraform-aws-modules"
- }
- additional_tags = {
- ExtraTag = "example2"
- }
- update_config = {
- max_unavailable_percentage = 50 # or set `max_unavailable`
- }
- }
- }
+ # node_group_defaults = {
+ # ami_type = "AL2_x86_64"
+ # disk_size = 50
+ # }
+
+ # self_managed_node_groups = {
+ # example1 = {
+ # desired_capacity = 1
+ # max_capacity = 10
+ # min_capacity = 1
+
+ # instance_types = ["t3.large"]
+ # capacity_type = "SPOT"
+ # k8s_labels = {
+ # Example = "managed_node_groups"
+ # GithubRepo = "terraform-aws-eks"
+ # GithubOrg = "terraform-aws-modules"
+ # }
+ # additional_tags = {
+ # ExtraTag = "example"
+ # }
+ # taints = [
+ # {
+ # key = "dedicated"
+ # value = "gpuGroup"
+ # effect = "NO_SCHEDULE"
+ # }
+ # ]
+ # update_config = {
+ # max_unavailable_percentage = 50 # or set `max_unavailable`
+ # }
+ # }
+ # example2 = {
+ # desired_capacity = 1
+ # max_capacity = 10
+ # min_capacity = 1
+
+ # instance_types = ["t3.medium"]
+ # k8s_labels = {
+ # Example = "managed_node_groups"
+ # GithubRepo = "terraform-aws-eks"
+ # GithubOrg = "terraform-aws-modules"
+ # }
+ # additional_tags = {
+ # ExtraTag = "example2"
+ # }
+ # update_config = {
+ # max_unavailable_percentage = 50 # or set `max_unavailable`
+ # }
+ # }
+ # }
tags = {
Example = local.name
@@ -105,13 +105,7 @@ provider "kubernetes" {
# Supporting Resources
################################################################################
-data "aws_availability_zones" "available" {
-}
-
-resource "random_string" "suffix" {
- length = 8
- special = false
-}
+data "aws_availability_zones" "available" {}
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
diff --git a/examples/managed_node_groups/outputs.tf b/examples/self_managed_node_groups/outputs.tf
similarity index 100%
rename from examples/managed_node_groups/outputs.tf
rename to examples/self_managed_node_groups/outputs.tf
diff --git a/examples/launch_templates_with_managed_node_groups/variables.tf b/examples/self_managed_node_groups/variables.tf
similarity index 100%
rename from examples/launch_templates_with_managed_node_groups/variables.tf
rename to examples/self_managed_node_groups/variables.tf
diff --git a/examples/self_managed_node_groups/versions.tf b/examples/self_managed_node_groups/versions.tf
new file mode 100644
index 0000000000..97955e9bc8
--- /dev/null
+++ b/examples/self_managed_node_groups/versions.tf
@@ -0,0 +1,10 @@
+terraform {
+ required_version = ">= 0.13.1"
+
+ required_providers {
+ aws = {
+ source = "hashicorp/aws"
+ version = ">= 3.56.0"
+ }
+ }
+}
diff --git a/locals.tf b/locals.tf
index d0fcac425e..f8a466b2a3 100644
--- a/locals.tf
+++ b/locals.tf
@@ -10,23 +10,8 @@ locals {
cluster_security_group_id = var.create_cluster_security_group ? join("", aws_security_group.cluster.*.id) : var.cluster_security_group_id
# Worker groups
- worker_security_group_id = var.worker_create_security_group ? join("", aws_security_group.worker.*.id) : var.worker_security_group_id
- worker_groups_platforms = [for x in var.worker_groups : try(x.platform, var.default_platform)]
-
- ec2_principal = "ec2.${data.aws_partition.current.dns_suffix}"
- sts_principal = "sts.${data.aws_partition.current.dns_suffix}"
- policy_arn_prefix = "arn:${data.aws_partition.current.partition}:iam::aws:policy"
-
- kubeconfig = var.create ? templatefile("${path.module}/templates/kubeconfig.tpl", {
- kubeconfig_name = coalesce(var.kubeconfig_name, "eks_${var.cluster_name}")
- endpoint = local.cluster_endpoint
- cluster_auth_base64 = local.cluster_auth_base64
- aws_authenticator_kubeconfig_apiversion = var.kubeconfig_api_version
- aws_authenticator_command = var.kubeconfig_aws_authenticator_command
- aws_authenticator_command_args = coalescelist(var.kubeconfig_aws_authenticator_command_args, ["token", "-i", var.cluster_name])
- aws_authenticator_additional_args = var.kubeconfig_aws_authenticator_additional_args
- aws_authenticator_env_variables = var.kubeconfig_aws_authenticator_env_variables
- }) : ""
+ worker_security_group_id = var.create_worker_security_group ? join("", aws_security_group.worker.*.id) : var.worker_security_group_id
+ policy_arn_prefix = "arn:${data.aws_partition.current.partition}:iam::aws:policy"
# launch_template_userdata_rendered = var.create ? [
# for key, group in var.worker_groups : templatefile(
diff --git a/main.tf b/main.tf
index 26278d929f..a456eb4b8f 100644
--- a/main.tf
+++ b/main.tf
@@ -66,7 +66,7 @@ resource "aws_cloudwatch_log_group" "this" {
################################################################################
locals {
- cluster_sg_name = coalesce(var.cluster_security_group_name, var.cluster_name)
+ cluster_sg_name = coalesce(var.cluster_security_group_name, "${var.cluster_name}-cluster")
create_cluster_sg = var.create && var.create_cluster_security_group
enable_cluster_private_endpoint_sg_access = local.create_cluster_sg && var.cluster_create_endpoint_private_access_sg_rule && var.cluster_endpoint_private_access
}
@@ -101,7 +101,7 @@ resource "aws_security_group_rule" "cluster_egress_internet" {
}
resource "aws_security_group_rule" "cluster_https_worker_ingress" {
- count = local.create_cluster_sg && var.worker_create_security_group ? 1 : 0
+ count = local.create_cluster_sg && var.create_worker_security_group ? 1 : 0
description = "Allow pods to communicate with the EKS cluster API"
protocol = "tcp"
@@ -151,7 +151,7 @@ data "tls_certificate" "this" {
resource "aws_iam_openid_connect_provider" "oidc_provider" {
count = var.create && var.enable_irsa ? 1 : 0
- client_id_list = distinct(compact(concat([local.sts_principal], var.openid_connect_audiences)))
+ client_id_list = distinct(compact(concat(["sts.${data.aws_partition.current.dns_suffix}"], var.openid_connect_audiences)))
thumbprint_list = [data.tls_certificate.this[0].certificates[0].sha1_fingerprint]
url = aws_eks_cluster.this[0].identity[0].oidc[0].issuer
@@ -168,7 +168,7 @@ resource "aws_iam_openid_connect_provider" "oidc_provider" {
################################################################################
locals {
- cluster_iam_role_name = coalesce(var.cluster_iam_role_name, var.cluster_name)
+ cluster_iam_role_name = coalesce(var.cluster_iam_role_name, "${var.cluster_name}-cluster")
}
resource "aws_iam_role" "cluster" {
diff --git a/modules/self-managed-node-group/README.md b/modules/self-managed-node-group/README.md
index 6e99610bba..37977082c6 100644
--- a/modules/self-managed-node-group/README.md
+++ b/modules/self-managed-node-group/README.md
@@ -6,15 +6,13 @@
| Name | Version |
|------|---------|
| [terraform](#requirement\_terraform) | >= 0.13.1 |
-| [aws](#requirement\_aws) | >= 3.56.0 |
-| [cloudinit](#requirement\_cloudinit) | >= 2.0 |
+| [aws](#requirement\_aws) | >= 3.53 |
## Providers
| Name | Version |
|------|---------|
-| [aws](#provider\_aws) | >= 3.56.0 |
-| [cloudinit](#provider\_cloudinit) | >= 2.0 |
+| [aws](#provider\_aws) | >= 3.53 |
## Modules
@@ -24,31 +22,104 @@ No modules.
| Name | Type |
|------|------|
-| [aws_eks_node_group.workers](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_node_group) | resource |
-| [aws_launch_template.workers](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource |
-| [cloudinit_config.workers_userdata](https://registry.terraform.io/providers/hashicorp/cloudinit/latest/docs/data-sources/config) | data source |
+| [aws_autoscaling_group.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/autoscaling_group) | resource |
+| [aws_autoscaling_schedule.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/autoscaling_schedule) | resource |
+| [aws_launch_template.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource |
## Inputs
| Name | Description | Type | Default | Required |
|------|-------------|------|---------|:--------:|
-| [cluster\_auth\_base64](#input\_cluster\_auth\_base64) | Base64 encoded CA of parent cluster | `string` | `""` | no |
-| [cluster\_endpoint](#input\_cluster\_endpoint) | Endpoint of parent cluster | `string` | `""` | no |
-| [cluster\_name](#input\_cluster\_name) | Name of parent cluster | `string` | `""` | no |
-| [create\_eks](#input\_create\_eks) | Controls if EKS resources should be created (it affects almost all resources) | `bool` | `true` | no |
-| [default\_iam\_role\_arn](#input\_default\_iam\_role\_arn) | ARN of the default IAM worker role to use if one is not specified in `var.node_groups` or `var.node_groups_defaults` | `string` | `""` | no |
-| [ebs\_optimized\_not\_supported](#input\_ebs\_optimized\_not\_supported) | List of instance types that do not support EBS optimization | `list(string)` | `[]` | no |
-| [node\_groups](#input\_node\_groups) | Map of maps of `eks_node_groups` to create. See "`node_groups` and `node_groups_defaults` keys" section in README.md for more details | `any` | `{}` | no |
-| [node\_groups\_defaults](#input\_node\_groups\_defaults) | map of maps of node groups to create. See "`node_groups` and `node_groups_defaults` keys" section in README.md for more details | `any` | `{}` | no |
-| [tags](#input\_tags) | A map of tags to add to all resources | `map(string)` | `{}` | no |
-| [worker\_additional\_security\_group\_ids](#input\_worker\_additional\_security\_group\_ids) | A list of additional security group ids to attach to worker instances | `list(string)` | `[]` | no |
-| [worker\_security\_group\_id](#input\_worker\_security\_group\_id) | If provided, all workers will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the EKS cluster. | `string` | `""` | no |
-| [workers\_group\_defaults](#input\_workers\_group\_defaults) | Workers group defaults from parent | `any` | `{}` | no |
+| [availability\_zones](#input\_availability\_zones) | A list of one or more availability zones for the group. Used for EC2-Classic and default subnets when not specified with `subnet_ids` argument. Conflicts with `subnet_ids` | `list(string)` | `null` | no |
+| [block\_device\_mappings](#input\_block\_device\_mappings) | Specify volumes to attach to the instance besides the volumes specified by the AMI | `list(any)` | `[]` | no |
+| [capacity\_rebalance](#input\_capacity\_rebalance) | Indicates whether capacity rebalance is enabled | `bool` | `null` | no |
+| [capacity\_reservation\_specification](#input\_capacity\_reservation\_specification) | Targeting for EC2 capacity reservations | `any` | `null` | no |
+| [cpu\_options](#input\_cpu\_options) | The CPU options for the instance | `map(string)` | `null` | no |
+| [create](#input\_create) | Determines whether to create autoscaling group or not | `bool` | `true` | no |
+| [create\_launch\_template](#input\_create\_launch\_template) | Determines whether to create launch template or not | `bool` | `false` | no |
+| [create\_schedule](#input\_create\_schedule) | Determines whether to create autoscaling group schedule or not | `bool` | `true` | no |
+| [credit\_specification](#input\_credit\_specification) | Customize the credit specification of the instance | `map(string)` | `null` | no |
+| [default\_cooldown](#input\_default\_cooldown) | The amount of time, in seconds, after a scaling activity completes before another scaling activity can start | `number` | `null` | no |
+| [default\_version](#input\_default\_version) | Default Version of the launch template | `string` | `null` | no |
+| [delete\_timeout](#input\_delete\_timeout) | Delete timeout to wait for destroying autoscaling group | `string` | `null` | no |
+| [description](#input\_description) | Description of the launch template | `string` | `null` | no |
+| [desired\_capacity](#input\_desired\_capacity) | The number of Amazon EC2 instances that should be running in the autoscaling group | `number` | `null` | no |
+| [disable\_api\_termination](#input\_disable\_api\_termination) | If true, enables EC2 instance termination protection | `bool` | `null` | no |
+| [ebs\_optimized](#input\_ebs\_optimized) | If true, the launched EC2 instance will be EBS-optimized | `bool` | `null` | no |
+| [elastic\_gpu\_specifications](#input\_elastic\_gpu\_specifications) | The elastic GPU to attach to the instance | `map(string)` | `null` | no |
+| [elastic\_inference\_accelerator](#input\_elastic\_inference\_accelerator) | Configuration block containing an Elastic Inference Accelerator to attach to the instance | `map(string)` | `null` | no |
+| [enable\_monitoring](#input\_enable\_monitoring) | Enables/disables detailed monitoring | `bool` | `null` | no |
+| [enabled\_metrics](#input\_enabled\_metrics) | A list of metrics to collect. The allowed values are `GroupDesiredCapacity`, `GroupInServiceCapacity`, `GroupPendingCapacity`, `GroupMinSize`, `GroupMaxSize`, `GroupInServiceInstances`, `GroupPendingInstances`, `GroupStandbyInstances`, `GroupStandbyCapacity`, `GroupTerminatingCapacity`, `GroupTerminatingInstances`, `GroupTotalCapacity`, `GroupTotalInstances` | `list(string)` | `null` | no |
+| [enclave\_options](#input\_enclave\_options) | Enable Nitro Enclaves on launched instances | `map(string)` | `null` | no |
+| [force\_delete](#input\_force\_delete) | Allows deleting the Auto Scaling Group without waiting for all instances in the pool to terminate. You can force an Auto Scaling Group to delete even if it's in the process of scaling a resource. Normally, Terraform drains all the instances before deleting the group. This bypasses that behavior and potentially leaves resources dangling | `bool` | `null` | no |
+| [health\_check\_grace\_period](#input\_health\_check\_grace\_period) | Time (in seconds) after instance comes into service before checking health | `number` | `null` | no |
+| [health\_check\_type](#input\_health\_check\_type) | `EC2` or `ELB`. Controls how health checking is done | `string` | `null` | no |
+| [hibernation\_options](#input\_hibernation\_options) | The hibernation options for the instance | `map(string)` | `null` | no |
+| [iam\_instance\_profile\_arn](#input\_iam\_instance\_profile\_arn) | The IAM Instance Profile ARN to launch the instance with | `string` | `null` | no |
+| [iam\_instance\_profile\_name](#input\_iam\_instance\_profile\_name) | The name attribute of the IAM instance profile to associate with launched instances | `string` | `null` | no |
+| [image\_id](#input\_image\_id) | The AMI from which to launch the instance | `string` | `""` | no |
+| [initial\_lifecycle\_hooks](#input\_initial\_lifecycle\_hooks) | One or more Lifecycle Hooks to attach to the Auto Scaling Group before instances are launched. The syntax is exactly the same as the separate `aws_autoscaling_lifecycle_hook` resource, without the `autoscaling_group_name` attribute. Please note that this will only work when creating a new Auto Scaling Group. For all other use-cases, please use `aws_autoscaling_lifecycle_hook` resource | `list(map(string))` | `[]` | no |
+| [instance\_initiated\_shutdown\_behavior](#input\_instance\_initiated\_shutdown\_behavior) | Shutdown behavior for the instance. Can be `stop` or `terminate`. (Default: `stop`) | `string` | `null` | no |
+| [instance\_market\_options](#input\_instance\_market\_options) | The market (purchasing) option for the instance | `any` | `null` | no |
+| [instance\_refresh](#input\_instance\_refresh) | If this block is configured, start an Instance Refresh when this Auto Scaling Group is updated | `any` | `null` | no |
+| [instance\_type](#input\_instance\_type) | The type of the instance to launch | `string` | `""` | no |
+| [kernel\_id](#input\_kernel\_id) | The kernel ID | `string` | `null` | no |
+| [key\_name](#input\_key\_name) | The key name that should be used for the instance | `string` | `null` | no |
+| [launch\_template\_name](#input\_launch\_template\_name) | Launch template name - either to be created (`var.create_launch_template` = `true`) or existing (`var.create_launch_template` = `false`) | `string` | n/a | yes |
+| [launch\_template\_use\_name\_prefix](#input\_launch\_template\_use\_name\_prefix) | Determines whether to use `launch_template_name` as is or create a unique name beginning with the `launch_template_name` as the prefix | `bool` | `true` | no |
+| [launch\_template\_version](#input\_launch\_template\_version) | Launch template version. Can be version number, `$Latest`, or `$Default` | `string` | `null` | no |
+| [license\_specifications](#input\_license\_specifications) | A list of license specifications to associate with | `map(string)` | `null` | no |
+| [max\_instance\_lifetime](#input\_max\_instance\_lifetime) | The maximum amount of time, in seconds, that an instance can be in service, values must be either equal to 0 or between 604800 and 31536000 seconds | `number` | `null` | no |
+| [max\_size](#input\_max\_size) | The maximum size of the autoscaling group | `number` | `null` | no |
+| [metadata\_options](#input\_metadata\_options) | Customize the metadata options for the instance | `map(string)` | `null` | no |
+| [metrics\_granularity](#input\_metrics\_granularity) | The granularity to associate with the metrics to collect. The only valid value is `1Minute` | `string` | `null` | no |
+| [min\_elb\_capacity](#input\_min\_elb\_capacity) | Setting this causes Terraform to wait for this number of instances to show up healthy in the ELB only on creation. Updates will not wait on ELB instance number changes | `number` | `null` | no |
+| [min\_size](#input\_min\_size) | The minimum size of the autoscaling group | `number` | `null` | no |
+| [mixed\_instances\_policy](#input\_mixed\_instances\_policy) | Configuration block containing settings to define launch targets for Auto Scaling groups | `any` | `null` | no |
+| [name](#input\_name) | Name used across the resources created | `string` | n/a | yes |
+| [network\_interfaces](#input\_network\_interfaces) | Customize network interfaces to be attached at instance boot time | `list(any)` | `[]` | no |
+| [placement](#input\_placement) | The placement of the instance | `map(string)` | `null` | no |
+| [placement\_group](#input\_placement\_group) | The name of the placement group into which you'll launch your instances, if any | `string` | `null` | no |
+| [propagate\_name](#input\_propagate\_name) | Determines whether to propagate the ASG Name tag or not | `bool` | `true` | no |
+| [propagate\_tags](#input\_propagate\_tags) | A list of tag blocks. Each element should have keys named key, value, and propagate\_at\_launch | `list(map(string))` | `[]` | no |
+| [protect\_from\_scale\_in](#input\_protect\_from\_scale\_in) | Allows setting instance protection. The autoscaling group will not select instances with this setting for termination during scale in events. | `bool` | `false` | no |
+| [ram\_disk\_id](#input\_ram\_disk\_id) | The ID of the ram disk | `string` | `null` | no |
+| [schedules](#input\_schedules) | Map of autoscaling group schedule to create | `map(any)` | `{}` | no |
+| [service\_linked\_role\_arn](#input\_service\_linked\_role\_arn) | The ARN of the service-linked role that the ASG will use to call other AWS services | `string` | `null` | no |
+| [subnet\_ids](#input\_subnet\_ids) | A list of subnet IDs to launch resources in. Subnets automatically determine which availability zones the group will reside. Conflicts with `availability_zones` | `list(string)` | `null` | no |
+| [suspended\_processes](#input\_suspended\_processes) | A list of processes to suspend for the Auto Scaling Group. The allowed values are `Launch`, `Terminate`, `HealthCheck`, `ReplaceUnhealthy`, `AZRebalance`, `AlarmNotification`, `ScheduledActions`, `AddToLoadBalancer`. Note that if you suspend either the `Launch` or `Terminate` process types, it can prevent your Auto Scaling Group from functioning properly | `list(string)` | `null` | no |
+| [tag\_specifications](#input\_tag\_specifications) | The tags to apply to the resources during launch | `list(any)` | `[]` | no |
+| [tags](#input\_tags) | A map of tags and values in the same format as other resources accept. This will be converted into the non-standard format that the aws\_autoscaling\_group requires. | `map(string)` | `{}` | no |
+| [target\_group\_arns](#input\_target\_group\_arns) | A set of `aws_alb_target_group` ARNs, for use with Application or Network Load Balancing | `list(string)` | `[]` | no |
+| [termination\_policies](#input\_termination\_policies) | A list of policies to decide how the instances in the Auto Scaling Group should be terminated. The allowed values are `OldestInstance`, `NewestInstance`, `OldestLaunchConfiguration`, `ClosestToNextInstanceHour`, `OldestLaunchTemplate`, `AllocationStrategy`, `Default` | `list(string)` | `null` | no |
+| [update\_default\_version](#input\_update\_default\_version) | Whether to update Default Version each update. Conflicts with `default_version` | `string` | `null` | no |
+| [use\_mixed\_instances\_policy](#input\_use\_mixed\_instances\_policy) | Determines whether to use a mixed instances policy in the autoscaling group or not | `bool` | `false` | no |
+| [use\_name\_prefix](#input\_use\_name\_prefix) | Determines whether to use `name` as is or create a unique name beginning with the `name` as the prefix | `bool` | `true` | no |
+| [user\_data](#input\_user\_data) | The Base64-encoded user data to provide when launching the instance. You should use this for Launch Templates instead user\_data | `string` | `null` | no |
+| [vpc\_security\_group\_ids](#input\_vpc\_security\_group\_ids) | A list of security group IDs to associate | `list(string)` | `null` | no |
+| [wait\_for\_capacity\_timeout](#input\_wait\_for\_capacity\_timeout) | A maximum duration that Terraform should wait for ASG instances to be healthy before timing out. (See also Waiting for Capacity below.) Setting this to '0' causes Terraform to skip all Capacity Waiting behavior. | `string` | `null` | no |
+| [wait\_for\_elb\_capacity](#input\_wait\_for\_elb\_capacity) | Setting this will cause Terraform to wait for exactly this number of healthy instances in all attached load balancers on both create and update operations. Takes precedence over `min_elb_capacity` behavior. | `number` | `null` | no |
+| [warm\_pool](#input\_warm\_pool) | If this block is configured, add a Warm Pool to the specified Auto Scaling group | `any` | `null` | no |
## Outputs
| Name | Description |
|------|-------------|
-| [aws\_auth\_roles](#output\_aws\_auth\_roles) | Roles for use in aws-auth ConfigMap |
-| [node\_groups](#output\_node\_groups) | Outputs from EKS node groups. Map of maps, keyed by `var.node_groups` keys. See `aws_eks_node_group` Terraform documentation for values |
+| [autoscaling\_group\_arn](#output\_autoscaling\_group\_arn) | The ARN for this AutoScaling Group |
+| [autoscaling\_group\_availability\_zones](#output\_autoscaling\_group\_availability\_zones) | The availability zones of the autoscale group |
+| [autoscaling\_group\_default\_cooldown](#output\_autoscaling\_group\_default\_cooldown) | Time between a scaling activity and the succeeding scaling activity |
+| [autoscaling\_group\_desired\_capacity](#output\_autoscaling\_group\_desired\_capacity) | The number of Amazon EC2 instances that should be running in the group |
+| [autoscaling\_group\_health\_check\_grace\_period](#output\_autoscaling\_group\_health\_check\_grace\_period) | Time after instance comes into service before checking health |
+| [autoscaling\_group\_health\_check\_type](#output\_autoscaling\_group\_health\_check\_type) | EC2 or ELB. Controls how health checking is done |
+| [autoscaling\_group\_id](#output\_autoscaling\_group\_id) | The autoscaling group id |
+| [autoscaling\_group\_load\_balancers](#output\_autoscaling\_group\_load\_balancers) | The load balancer names associated with the autoscaling group |
+| [autoscaling\_group\_max\_size](#output\_autoscaling\_group\_max\_size) | The maximum size of the autoscale group |
+| [autoscaling\_group\_min\_size](#output\_autoscaling\_group\_min\_size) | The minimum size of the autoscale group |
+| [autoscaling\_group\_name](#output\_autoscaling\_group\_name) | The autoscaling group name |
+| [autoscaling\_group\_target\_group\_arns](#output\_autoscaling\_group\_target\_group\_arns) | List of Target Group ARNs that apply to this AutoScaling Group |
+| [autoscaling\_group\_vpc\_zone\_identifier](#output\_autoscaling\_group\_vpc\_zone\_identifier) | The VPC zone identifier |
+| [autoscaling\_schedule\_arns](#output\_autoscaling\_schedule\_arns) | ARNs of autoscaling group schedules |
+| [launch\_template\_arn](#output\_launch\_template\_arn) | The ARN of the launch template |
+| [launch\_template\_id](#output\_launch\_template\_id) | The ID of the launch template |
+| [launch\_template\_latest\_version](#output\_launch\_template\_latest\_version) | The latest version of the launch template |
diff --git a/modules/self-managed-node-group/main.tf b/modules/self-managed-node-group/main.tf
index 636ead8735..1881d88448 100644
--- a/modules/self-managed-node-group/main.tf
+++ b/modules/self-managed-node-group/main.tf
@@ -1,28 +1,5 @@
locals {
- launch_template_name = var.create_launch_template ? aws_launch_template.this[0].name : var.launch_template_name
launch_template_version = var.create_launch_template && var.launch_template_version == null ? aws_launch_template.this[0].latest_version : var.launch_template_version
-
- tags = concat(
- [
- {
- key = "Name"
- value = var.name
- propagate_at_launch = var.propagate_name
- },
- ],
- var.tags,
- null_resource.tags_as_list_of_maps.*.triggers,
- )
-}
-
-resource "null_resource" "tags_as_list_of_maps" {
- count = length(keys(var.tags_as_map))
-
- triggers = {
- key = keys(var.tags_as_map)[count.index]
- value = values(var.tags_as_map)[count.index]
- propagate_at_launch = true
- }
}
################################################################################
@@ -30,22 +7,22 @@ resource "null_resource" "tags_as_list_of_maps" {
################################################################################
resource "aws_launch_template" "this" {
- count = var.create_launch_template ? 1 : 0
+ count = var.create && var.create_launch_template ? 1 : 0
- name = var.launch_template_use_name_prefix ? null : local.launch_template_name
- name_prefix = var.launch_template_use_name_prefix ? "${local.launch_template_name}-" : null
+ name = var.launch_template_use_name_prefix ? null : var.launch_template_name
+ name_prefix = var.launch_template_use_name_prefix ? "${var.launch_template_name}-" : null
description = var.description
ebs_optimized = var.ebs_optimized
image_id = var.image_id
instance_type = var.instance_type
key_name = var.key_name
- user_data = var.user_data_base64
+ user_data = var.user_data
- vpc_security_group_ids = var.security_group_ids
+ vpc_security_group_ids = var.vpc_security_group_ids
- defaulaunch_template_version = var.defaulaunch_template_version
- update_defaulaunch_template_version = var.update_defaulaunch_template_version
+ default_version = var.default_version
+ update_default_version = var.update_default_version
disable_api_termination = var.disable_api_termination
instance_initiated_shutdown_behavior = var.instance_initiated_shutdown_behavior
kernel_id = var.kernel_id
@@ -224,7 +201,7 @@ resource "aws_launch_template" "this" {
create_before_destroy = true
}
- tags = var.tags_as_map
+ tags = var.tags
}
################################################################################
@@ -232,30 +209,29 @@ resource "aws_launch_template" "this" {
################################################################################
resource "aws_autoscaling_group" "this" {
- count = var.create_asg ? 1 : 0
+ count = var.create ? 1 : 0
name = var.use_name_prefix ? null : var.name
name_prefix = var.use_name_prefix ? "${var.name}-" : null
launch_template {
- name = local.launch_template
+ name = var.launch_template_name
version = local.launch_template_version
}
availability_zones = var.availability_zones
vpc_zone_identifier = var.subnet_ids
- min_size = var.min_size
- max_size = var.max_size
- desired_capacity = var.desired_capacity
- capacity_rebalance = var.capacity_rebalance
- min_elb_capacity = var.min_elb_capacity
- wait_for_elb_capacity = var.wait_for_elb_capacity
- wait_for_capacity_timeout = var.wait_for_capacity_timeout
- defaulaunch_template_cooldown = var.defaulaunch_template_cooldown
- protect_from_scale_in = var.protect_from_scale_in
-
- load_balancers = var.load_balancers
+ min_size = var.min_size
+ max_size = var.max_size
+ desired_capacity = var.desired_capacity
+ capacity_rebalance = var.capacity_rebalance
+ min_elb_capacity = var.min_elb_capacity
+ wait_for_elb_capacity = var.wait_for_elb_capacity
+ wait_for_capacity_timeout = var.wait_for_capacity_timeout
+ default_cooldown = var.default_cooldown
+ protect_from_scale_in = var.protect_from_scale_in
+
target_group_arns = var.target_group_arns
placement_group = var.placement_group
health_check_type = var.health_check_type
@@ -273,13 +249,13 @@ resource "aws_autoscaling_group" "this" {
dynamic "initial_lifecycle_hook" {
for_each = var.initial_lifecycle_hooks
content {
- name = initial_lifecycle_hook.value.name
- defaulaunch_template_result = lookup(initial_lifecycle_hook.value, "defaulaunch_template_result", null)
- heartbeat_timeout = lookup(initial_lifecycle_hook.value, "heartbeat_timeout", null)
- lifecycle_transition = initial_lifecycle_hook.value.lifecycle_transition
- notification_metadata = lookup(initial_lifecycle_hook.value, "notification_metadata", null)
- notification_target_arn = lookup(initial_lifecycle_hook.value, "notification_target_arn", null)
- role_arn = lookup(initial_lifecycle_hook.value, "role_arn", null)
+ name = initial_lifecycle_hook.value.name
+ default_result = lookup(initial_lifecycle_hook.value, "default_result", null)
+ heartbeat_timeout = lookup(initial_lifecycle_hook.value, "heartbeat_timeout", null)
+ lifecycle_transition = initial_lifecycle_hook.value.lifecycle_transition
+ notification_metadata = lookup(initial_lifecycle_hook.value, "notification_metadata", null)
+ notification_target_arn = lookup(initial_lifecycle_hook.value, "notification_target_arn", null)
+ role_arn = lookup(initial_lifecycle_hook.value, "role_arn", null)
}
}
@@ -351,18 +327,34 @@ resource "aws_autoscaling_group" "this" {
delete = var.delete_timeout
}
- tags = local.tags
-
lifecycle {
create_before_destroy = true
}
+
+ tags = concat(
+ [
+ {
+ key = "Name"
+ value = var.name
+ propagate_at_launch = var.propagate_name
+ },
+ ],
+ var.propagate_tags,
+ [for k, v in var.tags :
+ {
+ key = k
+ value = v
+ propagate_at_launch = true
+ }
+ ]
+ )
}
################################################################################
# Autoscaling group schedule
################################################################################
resource "aws_autoscaling_schedule" "this" {
- for_each = var.create_asg && var.create_schedule ? var.schedules : {}
+ for_each = var.create && var.create_schedule ? var.schedules : {}
scheduled_action_name = each.key
autoscaling_group_name = aws_autoscaling_group.this[0].name
diff --git a/modules/self-managed-node-group/variables.tf b/modules/self-managed-node-group/variables.tf
index 5cf4db92f5..9df5efb98a 100644
--- a/modules/self-managed-node-group/variables.tf
+++ b/modules/self-managed-node-group/variables.tf
@@ -19,10 +19,9 @@ variable "use_name_prefix" {
default = true
}
-variable "launch_template" {
- description = "Name of an existing launch template to be used (created outside of this module)"
+variable "launch_template_name" {
+ description = "Launch template name - either to be created (`var.create_launch_template` = `true`) or existing (`var.create_launch_template` = `false`)"
type = string
- default = null
}
variable "launch_template_version" {
@@ -85,7 +84,7 @@ variable "wait_for_capacity_timeout" {
default = null
}
-variable "defaulaunch_template_cooldown" {
+variable "default_cooldown" {
description = "The amount of time, in seconds, after a scaling activity completes before another scaling activity can start"
type = number
default = null
@@ -97,12 +96,6 @@ variable "protect_from_scale_in" {
default = false
}
-variable "load_balancers" {
- description = "A list of elastic load balancer names to add to the autoscaling group names. Only valid for classic load balancers. For ALBs, use `target_group_arns` instead"
- type = list(string)
- default = []
-}
-
variable "target_group_arns" {
description = "A set of `aws_alb_target_group` ARNs, for use with Application or Network Load Balancing"
type = list(string)
@@ -193,6 +186,12 @@ variable "mixed_instances_policy" {
default = null
}
+variable "warm_pool" {
+ description = "If this block is configured, add a Warm Pool to the specified Auto Scaling group"
+ type = any
+ default = null
+}
+
variable "delete_timeout" {
description = "Delete timeout to wait for destroying autoscaling group"
type = string
@@ -200,51 +199,33 @@ variable "delete_timeout" {
}
variable "tags" {
- description = "A list of tag blocks. Each element should have keys named key, value, and propagate_at_launch"
- type = list(map(string))
- default = []
-}
-
-variable "tags_as_map" {
description = "A map of tags and values in the same format as other resources accept. This will be converted into the non-standard format that the aws_autoscaling_group requires."
type = map(string)
default = {}
}
+variable "propagate_tags" {
+ description = "A list of tag blocks. Each element should have keys named key, value, and propagate_at_launch"
+ type = list(map(string))
+ default = []
+}
+
variable "propagate_name" {
description = "Determines whether to propagate the ASG Name tag or not"
type = bool
default = true
}
-variable "warm_pool" {
- description = "If this block is configured, add a Warm Pool to the specified Auto Scaling group"
- type = any
- default = null
-}
-
################################################################################
# Launch template
################################################################################
-variable "create_lt" {
+variable "create_launch_template" {
description = "Determines whether to create launch template or not"
type = bool
default = false
}
-variable "use_lt" {
- description = "Determines whether to use a launch template in the autoscaling group or not"
- type = bool
- default = false
-}
-
-variable "launch_template_name" {
- description = "Name of launch template to be created"
- type = string
- default = ""
-}
-
variable "launch_template_use_name_prefix" {
description = "Determines whether to use `launch_template_name` as is or create a unique name beginning with the `launch_template_name` as the prefix"
type = bool
@@ -252,127 +233,127 @@ variable "launch_template_use_name_prefix" {
}
variable "description" {
- description = "(LT) Description of the launch template"
+ description = "Description of the launch template"
type = string
default = null
}
-variable "defaulaunch_template_version" {
- description = "(LT) Default Version of the launch template"
+variable "default_version" {
+ description = "Default Version of the launch template"
type = string
default = null
}
-variable "update_defaulaunch_template_version" {
- description = "(LT) Whether to update Default Version each update. Conflicts with `defaulaunch_template_version`"
+variable "update_default_version" {
+ description = "Whether to update Default Version each update. Conflicts with `default_version`"
type = string
default = null
}
variable "disable_api_termination" {
- description = "(LT) If true, enables EC2 instance termination protection"
+ description = "If true, enables EC2 instance termination protection"
type = bool
default = null
}
variable "instance_initiated_shutdown_behavior" {
- description = "(LT) Shutdown behavior for the instance. Can be `stop` or `terminate`. (Default: `stop`)"
+ description = "Shutdown behavior for the instance. Can be `stop` or `terminate`. (Default: `stop`)"
type = string
default = null
}
variable "kernel_id" {
- description = "(LT) The kernel ID"
+ description = "The kernel ID"
type = string
default = null
}
variable "ram_disk_id" {
- description = "(LT) The ID of the ram disk"
+ description = "The ID of the ram disk"
type = string
default = null
}
variable "block_device_mappings" {
- description = "(LT) Specify volumes to attach to the instance besides the volumes specified by the AMI"
+ description = "Specify volumes to attach to the instance besides the volumes specified by the AMI"
type = list(any)
default = []
}
variable "capacity_reservation_specification" {
- description = "(LT) Targeting for EC2 capacity reservations"
+ description = "Targeting for EC2 capacity reservations"
type = any
default = null
}
variable "cpu_options" {
- description = "(LT) The CPU options for the instance"
+ description = "The CPU options for the instance"
type = map(string)
default = null
}
variable "credit_specification" {
- description = "(LT) Customize the credit specification of the instance"
+ description = "Customize the credit specification of the instance"
type = map(string)
default = null
}
variable "elastic_gpu_specifications" {
- description = "(LT) The elastic GPU to attach to the instance"
+ description = "The elastic GPU to attach to the instance"
type = map(string)
default = null
}
variable "elastic_inference_accelerator" {
- description = "(LT) Configuration block containing an Elastic Inference Accelerator to attach to the instance"
+ description = "Configuration block containing an Elastic Inference Accelerator to attach to the instance"
type = map(string)
default = null
}
variable "enclave_options" {
- description = "(LT) Enable Nitro Enclaves on launched instances"
+ description = "Enable Nitro Enclaves on launched instances"
type = map(string)
default = null
}
variable "hibernation_options" {
- description = "(LT) The hibernation options for the instance"
+ description = "The hibernation options for the instance"
type = map(string)
default = null
}
variable "iam_instance_profile_arn" {
- description = "(LT) The IAM Instance Profile ARN to launch the instance with"
+ description = "The IAM Instance Profile ARN to launch the instance with"
type = string
default = null
}
variable "instance_market_options" {
- description = "(LT) The market (purchasing) option for the instance"
+ description = "The market (purchasing) option for the instance"
type = any
default = null
}
variable "license_specifications" {
- description = "(LT) A list of license specifications to associate with"
+ description = "A list of license specifications to associate with"
type = map(string)
default = null
}
variable "network_interfaces" {
- description = "(LT) Customize network interfaces to be attached at instance boot time"
+ description = "Customize network interfaces to be attached at instance boot time"
type = list(any)
default = []
}
variable "placement" {
- description = "(LT) The placement of the instance"
+ description = "The placement of the instance"
type = map(string)
default = null
}
variable "tag_specifications" {
- description = "(LT) The tags to apply to the resources during launch"
+ description = "The tags to apply to the resources during launch"
type = list(any)
default = []
}
@@ -408,13 +389,13 @@ variable "key_name" {
default = null
}
-variable "user_data_base64" {
+variable "user_data" {
description = "The Base64-encoded user data to provide when launching the instance. You should use this for Launch Templates instead user_data"
type = string
default = null
}
-variable "security_groups" {
+variable "vpc_security_group_ids" {
description = "A list of security group IDs to associate"
type = list(string)
default = null
diff --git a/modules/self-managed-node-group/versions.tf b/modules/self-managed-node-group/versions.tf
index 5bc71fb16b..009bedfded 100644
--- a/modules/self-managed-node-group/versions.tf
+++ b/modules/self-managed-node-group/versions.tf
@@ -6,10 +6,5 @@ terraform {
source = "hashicorp/aws"
version = ">= 3.53"
}
-
- null = {
- source = "hashicorp/null"
- version = ">= 2.0"
- }
}
}
diff --git a/variables.tf b/variables.tf
index c9ef5c0241..3ed6dcfa46 100644
--- a/variables.tf
+++ b/variables.tf
@@ -142,6 +142,34 @@ variable "cluster_security_group_tags" {
default = {}
}
+################################################################################
+# Worker Security Group
+################################################################################
+
+variable "create_worker_security_group" {
+ description = "Whether to create a security group for the worker nodes"
+ type = bool
+ default = true
+}
+
+variable "worker_security_group_name" {
+ description = "Name to use on worker role created"
+ type = string
+ default = null
+}
+
+variable "worker_security_group_use_name_prefix" {
+ description = "Determines whether the worker security group name (`worker_security_group_name`) is used as a prefix"
+ type = string
+ default = true
+}
+
+variable "worker_security_group_tags" {
+ description = "A map of additional tags to add to the worker security group created"
+ type = map(string)
+ default = {}
+}
+
################################################################################
# IRSA
################################################################################
@@ -291,38 +319,18 @@ variable "fargate_tags" {
}
################################################################################
-# Node Groups
+# Self Managed Node Group
################################################################################
-
-
-
-
-
-
-variable "default_platform" {
- description = "Default platform name. Valid options are `linux` and `windows`"
- type = string
- default = "linux"
-}
-
-variable "launch_templates" {
- description = "Map of launch template definitions to create"
- type = map(any)
- default = {}
-}
-
-variable "worker_groups" {
- description = "A map of maps defining worker group configurations to be defined using AWS Launch Template"
+variable "self_managed_node_groups" {
+ description = "Map of self-managed node group definitions to create"
type = any
default = {}
}
-variable "group_default_settings" {
- description = "Override default values for autoscaling group, node group settings"
- type = any
- default = {}
-}
+
+
+
variable "worker_security_group_id" {
description = "If provided, all workers will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the EKS cluster"
@@ -336,24 +344,12 @@ variable "worker_ami_name_filter" {
default = ""
}
-variable "worker_ami_name_filter_windows" {
- description = "Name filter for AWS EKS Windows worker AMI. If not provided, the latest official AMI for the specified 'cluster_version' is used"
- type = string
- default = ""
-}
-
variable "worker_ami_owner_id" {
description = "The ID of the owner for the AMI to use for the AWS EKS workers. Valid values are an AWS account ID, 'self' (the current account), or an AWS owner alias (e.g. 'amazon', 'aws-marketplace', 'microsoft')"
type = string
default = "amazon"
}
-variable "worker_ami_owner_id_windows" {
- description = "The ID of the owner for the AMI to use for the AWS EKS Windows workers. Valid values are an AWS account ID, 'self' (the current account), or an AWS owner alias (e.g. 'amazon', 'aws-marketplace', 'microsoft')"
- type = string
- default = "amazon"
-}
-
# variable "worker_additional_security_group_ids" {
# description = "A list of additional security group ids to attach to worker instances"
# type = list(string)
@@ -371,49 +367,6 @@ variable "worker_additional_policies" {
type = list(string)
default = []
}
-variable "kubeconfig_api_version" {
- description = "KubeConfig API version. Defaults to client.authentication.k8s.io/v1alpha1"
- type = string
- default = "client.authentication.k8s.io/v1alpha1"
-
-}
-variable "kubeconfig_aws_authenticator_command" {
- description = "Command to use to fetch AWS EKS credentials"
- type = string
- default = "aws-iam-authenticator"
-}
-
-variable "kubeconfig_aws_authenticator_command_args" {
- description = "Default arguments passed to the authenticator command. Defaults to [token -i $cluster_name]"
- type = list(string)
- default = []
-}
-
-variable "kubeconfig_aws_authenticator_additional_args" {
- description = "Any additional arguments to pass to the authenticator such as the role to assume. e.g. [\"-r\", \"MyEksRole\"]"
- type = list(string)
- default = []
-}
-
-variable "kubeconfig_aws_authenticator_env_variables" {
- description = "Environment variables that should be used when executing the authenticator. e.g. { AWS_PROFILE = \"eks\"}"
- type = map(string)
- default = {}
-}
-
-variable "kubeconfig_name" {
- description = "Override the default name used for items kubeconfig"
- type = string
- default = ""
-}
-
-
-
-variable "worker_create_security_group" {
- description = "Whether to create a security group for the workers or attach the workers to `worker_security_group_id`"
- type = bool
- default = true
-}
variable "worker_create_cluster_primary_security_group_rules" {
description = "Whether to create security group rules to allow communication between pods on workers and pods using the primary cluster security group"
@@ -421,18 +374,6 @@ variable "worker_create_cluster_primary_security_group_rules" {
default = false
}
-variable "permissions_boundary" {
- description = "If provided, all IAM roles will be created with this permissions boundary attached"
- type = string
- default = null
-}
-
-variable "iam_path" {
- description = "If provided, all IAM roles will be created on this path"
- type = string
- default = "/"
-}
-
variable "cluster_create_endpoint_private_access_sg_rule" {
description = "Whether to create security group rules for the access to the Amazon EKS private API server endpoint. When is `true`, `cluster_endpoint_private_access_cidrs` must be setted"
type = bool
@@ -451,38 +392,12 @@ variable "cluster_endpoint_private_access_sg" {
default = null
}
-
-
-variable "manage_worker_iam_resources" {
- description = "Whether to let the module manage worker IAM resources. If set to false, iam_instance_profile_name must be specified for workers"
- type = bool
- default = true
-}
-
-variable "worker_role_name" {
- description = "User defined workers role name"
- type = string
- default = ""
-}
-
variable "attach_worker_cni_policy" {
description = "Whether to attach the Amazon managed `AmazonEKS_CNI_Policy` IAM policy to the default worker IAM role. WARNING: If set `false` the permissions must be assigned to the `aws-node` DaemonSet pods via another method or nodes will not be able to join the cluster"
type = bool
default = true
}
-variable "node_groups_defaults" {
- description = "Map of values to be applied to all node groups. See `node_groups` module's documentation for more details"
- type = any
- default = {}
-}
-
-variable "node_groups" {
- description = "Map of map of node groups to create. See `node_groups` module's documentation for more details"
- type = any
- default = {}
-}
-
variable "cluster_egress_cidrs" {
description = "List of CIDR blocks that are permitted for cluster egress traffic"
type = list(string)
diff --git a/workers.tf b/workers.tf
index b9cc3654bd..23dd494fae 100644
--- a/workers.tf
+++ b/workers.tf
@@ -21,521 +21,127 @@ module "fargate" {
}
################################################################################
-# Fargate
+# EKS Managed Node Group
################################################################################
-locals {
- # Abstracted to a local so that it can be shared with node group as well
- # Only valus that are common between ASG and Node Group are pulled out to this local map
- group_default_settings = {
- min_size = try(var.group_default_settings.min_size, 1)
- max_size = try(var.group_default_settings.max_size, 3)
- desired_capacity = try(var.group_default_settings.desired_capacity, 1)
- }
-}
-
-resource "aws_launch_template" "this" {
- for_each = var.create ? var.launch_templates : {}
-
- name_prefix = "${aws_eks_cluster.this[0].name}-${try(each.value.name, each.key)}"
- description = try(each.value.description, var.group_default_settings.description, null)
-
- ebs_optimized = try(each.value.ebs_optimized, var.group_default_settings.ebs_optimized, null)
- image_id = try(each.value.image_id, var.group_default_settings.image_id, data.aws_ami.eks_worker[0].image_id)
- instance_type = try(each.value.instance_type, var.group_default_settings.instance_type, "m6i.large")
- key_name = try(each.value.key_name, var.group_default_settings.key_name, null)
- user_data = try(each.value.user_data, var.group_default_settings.user_data, null)
-
- vpc_security_group_ids = compact(concat(
- [try(aws_security_group.worker[0].id, "")],
- try(each.value.vpc_security_group_ids, var.group_default_settings.vpc_security_group_ids, [])
- ))
-
- default_version = try(each.value.default_version, var.group_default_settings.default_version, null)
- update_default_version = try(each.value.update_default_version, var.group_default_settings.update_default_version, null)
- disable_api_termination = try(each.value.disable_api_termination, var.group_default_settings.disable_api_termination, null)
- instance_initiated_shutdown_behavior = try(each.value.instance_initiated_shutdown_behavior, var.group_default_settings.instance_initiated_shutdown_behavior, null)
- kernel_id = try(each.value.kernel_id, var.group_default_settings.kernel_id, null)
- ram_disk_id = try(each.value.ram_disk_id, var.group_default_settings.ram_disk_id, null)
-
- dynamic "block_device_mappings" {
- for_each = try(each.value.block_device_mappings, var.group_default_settings.block_device_mappings, [])
- content {
- device_name = block_device_mappings.value.device_name
- no_device = lookup(block_device_mappings.value, "no_device", null)
- virtual_name = lookup(block_device_mappings.value, "virtual_name", null)
-
- dynamic "ebs" {
- for_each = flatten([lookup(block_device_mappings.value, "ebs", [])])
- content {
- delete_on_termination = lookup(ebs.value, "delete_on_termination", null)
- encrypted = lookup(ebs.value, "encrypted", null)
- kms_key_id = lookup(ebs.value, "kms_key_id", null)
- iops = lookup(ebs.value, "iops", null)
- throughput = lookup(ebs.value, "throughput", null)
- snapshot_id = lookup(ebs.value, "snapshot_id", null)
- volume_size = lookup(ebs.value, "volume_size", null)
- volume_type = lookup(ebs.value, "volume_type", null)
- }
- }
- }
- }
-
- dynamic "capacity_reservation_specification" {
- for_each = try(each.value.capacity_reservation_specification, var.group_default_settings.capacity_reservation_specification, [])
- content {
- capacity_reservation_preference = lookup(capacity_reservation_specification.value, "capacity_reservation_preference", null)
-
- dynamic "capacity_reservation_target" {
- for_each = lookup(capacity_reservation_specification.value, "capacity_reservation_target", [])
- content {
- capacity_reservation_id = lookup(capacity_reservation_target.value, "capacity_reservation_id", null)
- }
- }
- }
- }
-
- dynamic "cpu_options" {
- for_each = try(each.value.cpu_options, var.group_default_settings.cpu_options, [])
- content {
- core_count = cpu_options.value.core_count
- threads_per_core = cpu_options.value.threads_per_core
- }
- }
-
- dynamic "credit_specification" {
- for_each = try(each.value.credit_specification, var.group_default_settings.credit_specification, [])
- content {
- cpu_credits = credit_specification.value.cpu_credits
- }
- }
-
- dynamic "elastic_gpu_specifications" {
- for_each = try(each.value.elastic_gpu_specifications, var.group_default_settings.elastic_gpu_specifications, [])
- content {
- type = elastic_gpu_specifications.value.type
- }
- }
-
- dynamic "elastic_inference_accelerator" {
- for_each = try(each.value.elastic_inference_accelerator, var.group_default_settings.elastic_inference_accelerator, [])
- content {
- type = elastic_inference_accelerator.value.type
- }
- }
-
- dynamic "enclave_options" {
- for_each = try(each.value.enclave_options, var.group_default_settings.enclave_options, [])
- content {
- enabled = enclave_options.value.enabled
- }
- }
-
- dynamic "hibernation_options" {
- for_each = try(each.value.hibernation_options, var.group_default_settings.hibernation_options, [])
- content {
- configured = hibernation_options.value.configured
- }
- }
-
- dynamic "iam_instance_profile" {
- for_each = [{
- "arn" = try(each.value.iam_instance_profile_arn, aws_iam_instance_profile.worker[0].arn, {})
- }]
- content {
- arn = lookup(iam_instance_profile.value, "arn", null)
- }
- }
-
- dynamic "instance_market_options" {
- for_each = try(each.value.instance_market_options, var.group_default_settings.instance_market_options, [])
- content {
- market_type = instance_market_options.value.market_type
-
- dynamic "spot_options" {
- for_each = try([instance_market_options.value.spot_options], [])
- content {
- block_duration_minutes = spot_options.value.block_duration_minutes
- instance_interruption_behavior = lookup(spot_options.value, "instance_interruption_behavior", null)
- max_price = lookup(spot_options.value, "max_price", null)
- spot_instance_type = lookup(spot_options.value, "spot_instance_type", null)
- valid_until = lookup(spot_options.value, "valid_until", null)
- }
- }
- }
- }
-
- dynamic "license_specification" {
- for_each = try(each.value.license_specifications, var.group_default_settings.license_specifications, [])
- content {
- license_configuration_arn = license_specifications.value.license_configuration_arn
- }
- }
-
- dynamic "metadata_options" {
- for_each = try([each.value.metadata_options], [var.group_default_settings.metadata_options], [])
- content {
- http_endpoint = lookup(metadata_options.value, "http_endpoint", null)
- http_tokens = lookup(metadata_options.value, "http_tokens", null)
- http_put_response_hop_limit = lookup(metadata_options.value, "http_put_response_hop_limit", null)
- }
- }
-
- dynamic "monitoring" {
- for_each = try(each.value.enable_monitoring, var.group_default_settings.enable_monitoring, [])
- content {
- enabled = each.value
- }
- }
-
- dynamic "network_interfaces" {
- for_each = try(each.value.network_interfaces, var.group_default_settings.network_interfaces, [])
- iterator = interface
- content {
- associate_carrier_ip_address = lookup(interface.value, "associate_carrier_ip_address", null)
- associate_public_ip_address = lookup(interface.value, "associate_public_ip_address", null)
- delete_on_termination = lookup(interface.value, "delete_on_termination", null)
- description = lookup(interface.value, "description", null)
- device_index = lookup(interface.value, "device_index", null)
- ipv4_addresses = lookup(interface.value, "ipv4_addresses", null) != null ? interface.value.ipv4_addresses : []
- ipv4_address_count = lookup(interface.value, "ipv4_address_count", null)
- ipv6_addresses = lookup(interface.value, "ipv6_addresses", null) != null ? interface.value.ipv6_addresses : []
- ipv6_address_count = lookup(interface.value, "ipv6_address_count", null)
- network_interface_id = lookup(interface.value, "network_interface_id", null)
- private_ip_address = lookup(interface.value, "private_ip_address", null)
- security_groups = lookup(interface.value, "security_groups", null) != null ? interface.value.security_groups : []
- subnet_id = lookup(interface.value, "subnet_id", null)
- }
- }
-
- dynamic "placement" {
- for_each = try(each.value.placement, var.group_default_settings.placement, [])
- content {
- affinity = lookup(placement.value, "affinity", null)
- availability_zone = lookup(placement.value, "availability_zone", null)
- group_name = lookup(placement.value, "group_name", null)
- host_id = lookup(placement.value, "host_id", null)
- spread_domain = lookup(placement.value, "spread_domain", null)
- tenancy = lookup(placement.value, "tenancy", null)
- partition_number = lookup(placement.value, "partition_number", null)
- }
- }
+# module "eks_managed_node_groups" {
+# source = "./modules/eks-managed-node-group"
- # tag_specifications {
- # resource_type = "volume"
- # tags = merge(
- # var.tags,
- # lookup(each.value, "tags", {}),
- # { "Name" = try(each.value.name, "${aws_eks_cluster.this[0].name}-${each.key}") }
- # )
- # }
-
- # tag_specifications {
- # resource_type = "instance"
- # tags = merge(
- # var.tags,
- # lookup(each.value, "tags", {}),
- # { "Name" = try(each.value.name, "${aws_eks_cluster.this[0].name}-${each.key}") }
- # )
- # }
-
- # tag_specifications {
- # resource_type = "network-interface"
- # tags = merge(
- # var.tags,
- # lookup(each.value, "tags", {}),
- # { "Name" = try(each.value.name, "${aws_eks_cluster.this[0].name}-${each.key}") }
- # )
- # }
-
- # Prevent premature access of security group roles and policies by pods that
- # require permissions on create/destroy that depend on worker.
- depends_on = [
- aws_security_group_rule.worker_egress_internet,
- aws_security_group_rule.worker_ingress_self,
- aws_security_group_rule.worker_ingress_cluster,
- aws_security_group_rule.worker_ingress_cluster_kubelet,
- aws_security_group_rule.worker_ingress_cluster_https,
- aws_security_group_rule.worker_ingress_cluster_primary,
- aws_security_group_rule.cluster_primary_ingress_worker,
- ]
+# create_eks = var.create_eks
- lifecycle {
- create_before_destroy = true
- }
+# cluster_name = local.cluster_name
+# cluster_endpoint = local.cluster_endpoint
+# cluster_auth_base64 = local.cluster_auth_base64
- # tags = merge(var.tags, lookup(each.value, "tags", {}))
-}
+# default_iam_role_arn = coalescelist(aws_iam_role.workers[*].arn, [""])[0]
+# ebs_optimized_not_supported = local.ebs_optimized_not_supported
+# workers_group_defaults = local.workers_group_defaults
+# worker_security_group_id = local.worker_security_group_id
+# worker_additional_security_group_ids = var.worker_additional_security_group_ids
-################################################################################
-# Node Groups
-################################################################################
+# node_groups_defaults = var.node_groups_defaults
+# node_groups = var.node_groups
+# tags = var.tags
-# resource "aws_eks_node_group" "worker" {
-# for_each = var.create : var.node_groups : {}
-
-# node_group_name_prefix = lookup(each.value, "name", null) == null ? local.node_groups_names[each.key] : null
-# node_group_name = lookup(each.value, "name_prefix", null)
-
-# cluster_name = var.cluster_name
-# node_role_arn = try(each.value.iam_role_arn, var.default_iam_role_arn)
-# subnet_ids = coalescelist(each.value["subnet_ids"], var.subnet_ids, [""])
-
-# scaling_config {
-# desired_size = each.value["desired_capacity"]
-# max_size = each.value["max_capacity"]
-# min_size = each.value["min_capacity"]
-# }
-
-# ami_type = lookup(each.value, "ami_type", null)
-# disk_size = lookup(each.value, "disk_size", null)
-# instance_types = lookup(each.value, "instance_types", null)
-# release_version = lookup(each.value, "ami_release_version", null)
-# capacity_type = lookup(each.value, "capacity_type", null)
-# force_update_version = lookup(each.value, "force_update_version", null)
-
-# dynamic "remote_access" {
-# for_each = each.value["key_name"] != "" && each.value["launch_template_id"] == null && !each.value["create_launch_template"] ? [{
-# ec2_ssh_key = each.value["key_name"]
-# source_security_group_ids = lookup(each.value, "source_security_group_ids", [])
-# }] : []
-
-# content {
-# ec2_ssh_key = remote_access.value["ec2_ssh_key"]
-# source_security_group_ids = remote_access.value["source_security_group_ids"]
-# }
-# }
-
-# dynamic "launch_template" {
-# for_each = [try(each.value.launch_template, {})]
-
-# content {
-# id = lookup(launch_template.value, "id", null)
-# name = lookup(launch_template.value, "name", null)
-# version = launch_template.value.version
-# }
-# }
-
-# dynamic "taint" {
-# for_each = each.value["taints"]
-
-# content {
-# key = taint.value["key"]
-# value = taint.value["value"]
-# effect = taint.value["effect"]
-# }
-# }
-
-# dynamic "update_config" {
-# for_each = try(each.value.update_config.max_unavailable_percentage > 0, each.value.update_config.max_unavailable > 0, false) ? [true] : []
-
-# content {
-# max_unavailable_percentage = try(each.value.update_config.max_unavailable_percentage, null)
-# max_unavailable = try(each.value.update_config.max_unavailable, null)
-# }
-# }
-
-# timeouts {
-# create = lookup(each.value["timeouts"], "create", null)
-# update = lookup(each.value["timeouts"], "update", null)
-# delete = lookup(each.value["timeouts"], "delete", null)
-# }
-
-# version = lookup(each.value, "version", null)
-
-# labels = merge(
-# lookup(var.node_groups_defaults, "k8s_labels", {}),
-# lookup(each.value, "k8s_labels", {})
-# )
-
-# tags = merge(
-# var.tags,
-# lookup(var.node_groups_defaults, "additional_tags", {}),
-# lookup(each.value, "additional_tags", {}),
-# )
-
-# lifecycle {
-# create_before_destroy = true
-# ignore_changes = [scaling_config[0].desired_size]
-# }
+# depends_on = [
+# aws_eks_cluster.this,
+# ]
# }
################################################################################
-# Autoscaling Group
+# Self Managed Node Group
################################################################################
-resource "aws_autoscaling_group" "this" {
- for_each = var.create ? var.worker_groups : object({})
-
- name_prefix = "${join("-", [aws_eks_cluster.this[0].name, try(each.value.name, each.key)])}-"
-
- launch_template {
- name = try(
- aws_launch_template.this[each.value.launch_template_key].name,
- each.value.launch_template_name,
- # defaults should be last
- aws_launch_template.this[var.group_default_settings.launch_template_key].name,
- var.group_default_settings.launch_template_name,
- )
- version = try(each.value.launch_template_version, var.group_default_settings.launch_template_version, "$Latest")
- }
-
- availability_zones = try(each.value.availability_zones, var.group_default_settings.availability_zones, null)
- vpc_zone_identifier = try(each.value.vpc_zone_identifier, var.group_default_settings.vpc_zone_identifier, var.subnet_ids)
-
- min_size = try(each.value.min_size, local.group_default_settings.min_size)
- max_size = try(each.value.max_size, local.group_default_settings.max_size)
- desired_capacity = try(each.value.desired_capacity, local.group_default_settings.desired_capacity)
- capacity_rebalance = try(each.value.capacity_rebalance, var.group_default_settings.capacity_rebalance, null)
- default_cooldown = try(each.value.default_cooldown, var.group_default_settings.default_cooldown, null)
- protect_from_scale_in = try(each.value.protect_from_scale_in, var.group_default_settings.protect_from_scale_in, null)
-
- load_balancers = try(each.value.load_balancers, var.group_default_settings.load_balancers, null)
- target_group_arns = try(each.value.target_group_arns, var.group_default_settings.target_group_arns, null)
- placement_group = try(each.value.placement_group, var.group_default_settings.placement_group, null)
- health_check_type = try(each.value.health_check_type, var.group_default_settings.health_check_type, null)
- health_check_grace_period = try(each.value.health_check_grace_period, var.group_default_settings.health_check_grace_period, null)
-
- force_delete = try(each.value.force_delete, var.group_default_settings.force_delete, false)
- termination_policies = try(each.value.termination_policies, var.group_default_settings.termination_policies, null)
- suspended_processes = try(each.value.suspended_processes, var.group_default_settings.suspended_processes, ["AZRebalance"])
- max_instance_lifetime = try(each.value.max_instance_lifetime, var.group_default_settings.max_instance_lifetime, null)
-
- enabled_metrics = try(each.value.enabled_metrics, var.group_default_settings.enabled_metrics, null)
- metrics_granularity = try(each.value.metrics_granularity, var.group_default_settings.metrics_granularity, null)
- service_linked_role_arn = try(each.value.service_linked_role_arn, var.group_default_settings.service_linked_role_arn, null)
-
- dynamic "initial_lifecycle_hook" {
- for_each = try(each.value.initial_lifecycle_hook, var.group_default_settings.initial_lifecycle_hook, {})
- iterator = hook
-
- content {
- name = hook.value.name
- default_result = lookup(hook.value, "default_result", null)
- heartbeat_timeout = lookup(hook.value, "heartbeat_timeout", null)
- lifecycle_transition = hook.value.lifecycle_transition
- notification_metadata = lookup(hook.value, "notification_metadata", null)
- notification_target_arn = lookup(hook.value, "notification_target_arn", null)
- role_arn = lookup(hook.value, "role_arn", null)
- }
- }
-
- dynamic "instance_refresh" {
- for_each = try(each.value.instance_refresh, var.group_default_settings.instance_refresh, {})
- iterator = refresh
-
- content {
- strategy = refresh.value.strategy
- triggers = lookup(refresh.value, "triggers", null)
-
- dynamic "preferences" {
- for_each = try(refresh.value.preferences, [])
- content {
- instance_warmup = lookup(preferences.value, "instance_warmup", null)
- min_healthy_percentage = lookup(preferences.value, "min_healthy_percentage", null)
- }
- }
- }
- }
-
- dynamic "mixed_instances_policy" {
- for_each = try(each.value.mixed_instances_policy, var.group_default_settings.mixed_instances_policy, {})
- iterator = mixed
-
- content {
- dynamic "instances_distribution" {
- for_each = try(mixed.value.instances_distribution, {})
- iterator = distro
-
- content {
- on_demand_allocation_strategy = lookup(distro.value, "on_demand_allocation_strategy", null)
- on_demand_base_capacity = lookup(distro.value, "on_demand_base_capacity", null)
- on_demand_percentage_above_base_capacity = lookup(distro.value, "on_demand_percentage_above_base_capacity", null)
- spot_allocation_strategy = lookup(distro.value, "spot_allocation_strategy", null)
- spot_instance_pools = lookup(distro.value, "spot_instance_pools", null)
- spot_max_price = lookup(distro.value, "spot_max_price", null)
- }
- }
-
- launch_template {
- launch_template_specification {
- launch_template_name = local.launch_template
- version = local.launch_template_version
- }
-
- dynamic "override" {
- for_each = try(mixed.value.override, {})
- content {
- instance_type = lookup(override.value, "instance_type", null)
- weighted_capacity = lookup(override.value, "weighted_capacity", null)
-
- dynamic "launch_template_specification" {
- for_each = try(override.value.launch_template_specification, {})
- content {
- launch_template_id = lookup(launch_template_specification.value, "launch_template_id", null)
- }
- }
- }
- }
- }
- }
- }
-
- dynamic "warm_pool" {
- for_each = try(each.value.warm_pool, var.group_default_settings.warm_pool, {})
-
- content {
- pool_state = lookup(warm_pool.value, "pool_state", null)
- min_size = lookup(warm_pool.value, "min_size", null)
- max_group_prepared_capacity = lookup(warm_pool.value, "max_group_prepared_capacity", null)
- }
- }
-
- dynamic "tag" {
- for_each = concat(
- [
- {
- "key" = "Name"
- "value" = "${join("-", [aws_eks_cluster.this[0].name, lookup(each.value, "name", each.key)])}-eks-asg"
- "propagate_at_launch" = true
- },
- {
- "key" = "kubernetes.io/cluster/${aws_eks_cluster.this[0].name}"
- "value" = "owned"
- "propagate_at_launch" = true
- },
- {
- "key" = "k8s.io/cluster/${aws_eks_cluster.this[0].name}"
- "value" = "owned"
- "propagate_at_launch" = true
- },
- ],
- [
- for k, v in merge(var.tags, lookup(each.value, "tags", {})) :
- tomap({
- key = k
- value = v
- propagate_at_launch = true
- })
- ],
- lookup(each.value, "propogated_tags", [])
- )
- content {
- key = tag.value.key
- value = tag.value.value
- propagate_at_launch = tag.value.propagate_at_launch
- }
- }
-
- lifecycle {
- create_before_destroy = true
- ignore_changes = [desired_capacity]
- }
-
- depends_on = [
- aws_launch_template.this
- ]
+module "self_managed_node_group" {
+ source = "./modules/self-managed-node-group"
+
+ for_each = var.create ? var.self_managed_node_groups : {}
+
+ create = var.create
+
+ # Autoscaling Group
+ name = try(each.value.name, "TODO")
+ use_name_prefix = try(each.value.use_name_prefix, null)
+
+ launch_template_name = try(each.value.launch_template_name, var.cluster_name)
+ launch_template_version = try(each.value.launch_template_version, null)
+ availability_zones = try(each.value.availability_zones, null)
+ subnet_ids = try(each.value.subnet_ids, null)
+
+ min_size = try(each.value.min_size, null)
+ max_size = try(each.value.max_size, null)
+ desired_capacity = try(each.value.desired_capacity, null)
+ capacity_rebalance = try(each.value.capacity_rebalance, null)
+ min_elb_capacity = try(each.value.min_elb_capacity, null)
+ wait_for_elb_capacity = try(each.value.wait_for_elb_capacity, null)
+ wait_for_capacity_timeout = try(each.value.wait_for_capacity_timeout, null)
+ default_cooldown = try(each.value.default_cooldown, null)
+ protect_from_scale_in = try(each.value.protect_from_scale_in, null)
+
+ target_group_arns = try(each.value.target_group_arns, null)
+ placement_group = try(each.value.placement_group, null)
+ health_check_type = try(each.value.health_check_type, null)
+ health_check_grace_period = try(each.value.health_check_grace_period, null)
+
+ force_delete = try(each.value.force_delete, null)
+ termination_policies = try(each.value.termination_policies, null)
+ suspended_processes = try(each.value.suspended_processes, null)
+ max_instance_lifetime = try(each.value.max_instance_lifetime, null)
+
+ enabled_metrics = try(each.value.enabled_metrics, null)
+ metrics_granularity = try(each.value.metrics_granularity, null)
+ service_linked_role_arn = try(each.value.service_linked_role_arn, null)
+
+ initial_lifecycle_hooks = try(each.value.initial_lifecycle_hooks, null)
+ instance_refresh = try(each.value.instance_refresh, null)
+ use_mixed_instances_policy = try(each.value.use_mixed_instances_policy, null)
+ warm_pool = try(each.value.warm_pool, null)
+
+ create_schedule = try(each.value.create_schedule, null)
+ schedules = try(each.value.schedules, null)
+
+ delete_timeout = try(each.value.delete_timeout, null)
+
+ # Launch Template
+ description = try(each.value.description, null)
+
+ ebs_optimized = try(each.value.ebs_optimized, null)
+ image_id = try(each.value.image_id, null)
+ instance_type = try(each.value.instance_type, null)
+ key_name = try(each.value.key_name, null)
+ user_data = try(each.value.user_data, null)
+
+ vpc_security_group_ids = try(each.value.vpc_security_group_ids, null)
+
+ default_version = try(each.value.default_version, null)
+ update_default_version = try(each.value.update_default_version, null)
+ disable_api_termination = try(each.value.disable_api_termination, null)
+ instance_initiated_shutdown_behavior = try(each.value.instance_initiated_shutdown_behavior, null)
+ kernel_id = try(each.value.kernel_id, null)
+ ram_disk_id = try(each.value.ram_disk_id, null)
+
+ block_device_mappings = try(each.value.block_device_mappings, null)
+ capacity_reservation_specification = try(each.value.capacity_reservation_specification, null)
+ cpu_options = try(each.value.cpu_options, null)
+ credit_specification = try(each.value.credit_specification, null)
+ elastic_gpu_specifications = try(each.value.elastic_gpu_specifications, null)
+ elastic_inference_accelerator = try(each.value.elastic_inference_accelerator, null)
+ enclave_options = try(each.value.enclave_options, null)
+ hibernation_options = try(each.value.hibernation_options, null)
+ iam_instance_profile_name = try(each.value.iam_instance_profile_name, null)
+ iam_instance_profile_arn = try(each.value.iam_instance_profile_arn, null)
+ instance_market_options = try(each.value.instance_market_options, null)
+ license_specifications = try(each.value.license_specifications, null)
+ metadata_options = try(each.value.metadata_options, null)
+ enable_monitoring = try(each.value.enable_monitoring, null)
+ network_interfaces = try(each.value.network_interfaces, null)
+ placement = try(each.value.placement, null)
+ tag_specifications = try(each.value.tag_specifications, null)
+
+ tags = try(each.value.tags, null)
+ propagate_tags = try(each.value.propagate_tags, [])
+ propagate_name = try(each.value.propagate_name, null)
}
################################################################################
@@ -543,7 +149,7 @@ resource "aws_autoscaling_group" "this" {
################################################################################
locals {
- worker_iam_role_name = coalesce(var.worker_iam_role_name, var.cluster_name)
+ worker_iam_role_name = coalesce(var.worker_iam_role_name, "${var.cluster_name}-worker")
}
resource "aws_iam_role" "worker" {
@@ -574,7 +180,7 @@ data "aws_iam_policy_document" "worker_assume_role_policy" {
principals {
type = "Service"
- identifiers = [local.ec2_principal]
+ identifiers = ["ec2.${data.aws_partition.current.dns_suffix}"]
}
}
}
@@ -599,21 +205,25 @@ resource "aws_iam_instance_profile" "worker" {
################################################################################
locals {
- create_worker_sg = var.create && var.worker_create_security_group
+ worker_sg_name = coalesce(var.worker_security_group_name, "${var.cluster_name}-worker")
+ create_worker_sg = var.create && var.create_worker_security_group
}
resource "aws_security_group" "worker" {
count = local.create_worker_sg ? 1 : 0
- name_prefix = var.cluster_name
- description = "Security group for all nodes in the cluster."
+ name = var.worker_security_group_use_name_prefix ? null : local.worker_sg_name
+ name_prefix = var.worker_security_group_use_name_prefix ? try("${local.worker_sg_name}-", local.worker_sg_name) : null
+ description = "EKS worker security group"
vpc_id = var.vpc_id
+
tags = merge(
var.tags,
{
- "Name" = "${var.cluster_name}-eks_worker_sg"
+ "Name" = local.worker_sg_name
"kubernetes.io/cluster/${var.cluster_name}" = "owned"
},
+ var.worker_security_group_tags
)
}
@@ -683,7 +293,7 @@ resource "aws_security_group_rule" "worker_ingress_cluster_primary" {
description = "Allow pods running on worker to receive communication from cluster primary security group (e.g. Fargate pods)."
protocol = "all"
security_group_id = local.worker_security_group_id
- source_security_group_id = try(aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id, "")
+ source_security_group_id = aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id
from_port = 0
to_port = 65535
type = "ingress"
From 77b64d3d24076654fd127adb6f29ac707c1eef8d Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Tue, 9 Nov 2021 20:03:08 -0500
Subject: [PATCH 13/83] chore: updates from testing
---
examples/self_managed_node_groups/main.tf | 68 +++++++-------------
modules/self-managed-node-group/README.md | 4 +-
modules/self-managed-node-group/main.tf | 31 ++++++---
modules/self-managed-node-group/variables.tf | 13 ++--
workers.tf | 36 +++++------
5 files changed, 69 insertions(+), 83 deletions(-)
diff --git a/examples/self_managed_node_groups/main.tf b/examples/self_managed_node_groups/main.tf
index 1b2fbf6a55..f922dd97e5 100644
--- a/examples/self_managed_node_groups/main.tf
+++ b/examples/self_managed_node_groups/main.tf
@@ -29,52 +29,28 @@ module "eks" {
# disk_size = 50
# }
- # self_managed_node_groups = {
- # example1 = {
- # desired_capacity = 1
- # max_capacity = 10
- # min_capacity = 1
-
- # instance_types = ["t3.large"]
- # capacity_type = "SPOT"
- # k8s_labels = {
- # Example = "managed_node_groups"
- # GithubRepo = "terraform-aws-eks"
- # GithubOrg = "terraform-aws-modules"
- # }
- # additional_tags = {
- # ExtraTag = "example"
- # }
- # taints = [
- # {
- # key = "dedicated"
- # value = "gpuGroup"
- # effect = "NO_SCHEDULE"
- # }
- # ]
- # update_config = {
- # max_unavailable_percentage = 50 # or set `max_unavailable`
- # }
- # }
- # example2 = {
- # desired_capacity = 1
- # max_capacity = 10
- # min_capacity = 1
-
- # instance_types = ["t3.medium"]
- # k8s_labels = {
- # Example = "managed_node_groups"
- # GithubRepo = "terraform-aws-eks"
- # GithubOrg = "terraform-aws-modules"
- # }
- # additional_tags = {
- # ExtraTag = "example2"
- # }
- # update_config = {
- # max_unavailable_percentage = 50 # or set `max_unavailable`
- # }
- # }
- # }
+ self_managed_node_groups = {
+ example1 = {
+ min_size = 1
+ max_size = 10
+ desired_capacity = 1
+
+ instance_type = "t3.large"
+ # capacity_type = "SPOT"
+ # additional_tags = {
+ # ExtraTag = "example"
+ # }
+ }
+ example2 = {
+ min_size = 1
+ max_size = 10
+ desired_capacity = 1
+ instance_type = "t3.medium"
+ # additional_tags = {
+ # ExtraTag = "example2"
+ # }
+ }
+ }
tags = {
Example = local.name
diff --git a/modules/self-managed-node-group/README.md b/modules/self-managed-node-group/README.md
index 37977082c6..6a076eb6df 100644
--- a/modules/self-managed-node-group/README.md
+++ b/modules/self-managed-node-group/README.md
@@ -34,9 +34,10 @@ No modules.
| [block\_device\_mappings](#input\_block\_device\_mappings) | Specify volumes to attach to the instance besides the volumes specified by the AMI | `list(any)` | `[]` | no |
| [capacity\_rebalance](#input\_capacity\_rebalance) | Indicates whether capacity rebalance is enabled | `bool` | `null` | no |
| [capacity\_reservation\_specification](#input\_capacity\_reservation\_specification) | Targeting for EC2 capacity reservations | `any` | `null` | no |
+| [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster that the node group will be associated with | `string` | `null` | no |
| [cpu\_options](#input\_cpu\_options) | The CPU options for the instance | `map(string)` | `null` | no |
| [create](#input\_create) | Determines whether to create autoscaling group or not | `bool` | `true` | no |
-| [create\_launch\_template](#input\_create\_launch\_template) | Determines whether to create launch template or not | `bool` | `false` | no |
+| [create\_launch\_template](#input\_create\_launch\_template) | Determines whether to create launch template or not | `bool` | `true` | no |
| [create\_schedule](#input\_create\_schedule) | Determines whether to create autoscaling group schedule or not | `bool` | `true` | no |
| [credit\_specification](#input\_credit\_specification) | Customize the credit specification of the instance | `map(string)` | `null` | no |
| [default\_cooldown](#input\_default\_cooldown) | The amount of time, in seconds, after a scaling activity completes before another scaling activity can start | `number` | `null` | no |
@@ -80,7 +81,6 @@ No modules.
| [network\_interfaces](#input\_network\_interfaces) | Customize network interfaces to be attached at instance boot time | `list(any)` | `[]` | no |
| [placement](#input\_placement) | The placement of the instance | `map(string)` | `null` | no |
| [placement\_group](#input\_placement\_group) | The name of the placement group into which you'll launch your instances, if any | `string` | `null` | no |
-| [propagate\_name](#input\_propagate\_name) | Determines whether to propagate the ASG Name tag or not | `bool` | `true` | no |
| [propagate\_tags](#input\_propagate\_tags) | A list of tag blocks. Each element should have keys named key, value, and propagate\_at\_launch | `list(map(string))` | `[]` | no |
| [protect\_from\_scale\_in](#input\_protect\_from\_scale\_in) | Allows setting instance protection. The autoscaling group will not select instances with this setting for termination during scale in events. | `bool` | `false` | no |
| [ram\_disk\_id](#input\_ram\_disk\_id) | The ID of the ram disk | `string` | `null` | no |
diff --git a/modules/self-managed-node-group/main.tf b/modules/self-managed-node-group/main.tf
index 1881d88448..542f0edc2f 100644
--- a/modules/self-managed-node-group/main.tf
+++ b/modules/self-managed-node-group/main.tf
@@ -1,7 +1,3 @@
-locals {
- launch_template_version = var.create_launch_template && var.launch_template_version == null ? aws_launch_template.this[0].latest_version : var.launch_template_version
-}
-
################################################################################
# Launch template
################################################################################
@@ -190,10 +186,10 @@ resource "aws_launch_template" "this" {
}
dynamic "tag_specifications" {
- for_each = var.tag_specifications
+ for_each = toset(["instance", "volume", "network-interface"])
content {
- resource_type = tag_specifications.value.resource_type
- tags = tag_specifications.value.tags
+ resource_type = tag_specifications.key
+ tags = merge(var.tags, { Name = var.name })
}
}
@@ -208,6 +204,11 @@ resource "aws_launch_template" "this" {
# Autoscaling group
################################################################################
+locals {
+ launch_template_name = var.create_launch_template ? aws_launch_template.this[0].name : var.launch_template_name
+ launch_template_version = var.create_launch_template && var.launch_template_version == null ? aws_launch_template.this[0].latest_version : var.launch_template_version
+}
+
resource "aws_autoscaling_group" "this" {
count = var.create ? 1 : 0
@@ -215,7 +216,7 @@ resource "aws_autoscaling_group" "this" {
name_prefix = var.use_name_prefix ? "${var.name}-" : null
launch_template {
- name = var.launch_template_name
+ name = local.launch_template_name
version = local.launch_template_version
}
@@ -292,7 +293,7 @@ resource "aws_autoscaling_group" "this" {
launch_template {
launch_template_specification {
- launch_template_name = local.launch_template
+ launch_template_name = local.launch_template_name
version = local.launch_template_version
}
@@ -336,7 +337,17 @@ resource "aws_autoscaling_group" "this" {
{
key = "Name"
value = var.name
- propagate_at_launch = var.propagate_name
+ propagate_at_launch = true
+ },
+ {
+ key = "kubernetes.io/cluster/${var.cluster_name}"
+ value = "owned"
+ propagate_at_launch = true
+ },
+ {
+ key = "k8s.io/cluster/${var.cluster_name}"
+ value = "owned"
+ propagate_at_launch = true
},
],
var.propagate_tags,
diff --git a/modules/self-managed-node-group/variables.tf b/modules/self-managed-node-group/variables.tf
index 9df5efb98a..31511d5e48 100644
--- a/modules/self-managed-node-group/variables.tf
+++ b/modules/self-managed-node-group/variables.tf
@@ -198,6 +198,11 @@ variable "delete_timeout" {
default = null
}
+variable "cluster_name" {
+ description = "Name of the EKS cluster that the node group will be associated with"
+ type = string
+ default = null
+}
variable "tags" {
description = "A map of tags and values in the same format as other resources accept. This will be converted into the non-standard format that the aws_autoscaling_group requires."
type = map(string)
@@ -210,12 +215,6 @@ variable "propagate_tags" {
default = []
}
-variable "propagate_name" {
- description = "Determines whether to propagate the ASG Name tag or not"
- type = bool
- default = true
-}
-
################################################################################
# Launch template
################################################################################
@@ -223,7 +222,7 @@ variable "propagate_name" {
variable "create_launch_template" {
description = "Determines whether to create launch template or not"
type = bool
- default = false
+ default = true
}
variable "launch_template_use_name_prefix" {
diff --git a/workers.tf b/workers.tf
index 23dd494fae..59cd9c16da 100644
--- a/workers.tf
+++ b/workers.tf
@@ -58,20 +58,20 @@ module "self_managed_node_group" {
for_each = var.create ? var.self_managed_node_groups : {}
- create = var.create
+ cluster_name = var.cluster_name
# Autoscaling Group
- name = try(each.value.name, "TODO")
- use_name_prefix = try(each.value.use_name_prefix, null)
+ name = try(each.value.name, var.cluster_name)
+ use_name_prefix = try(each.value.use_name_prefix, false)
launch_template_name = try(each.value.launch_template_name, var.cluster_name)
launch_template_version = try(each.value.launch_template_version, null)
availability_zones = try(each.value.availability_zones, null)
- subnet_ids = try(each.value.subnet_ids, null)
+ subnet_ids = try(each.value.subnet_ids, var.subnet_ids)
- min_size = try(each.value.min_size, null)
- max_size = try(each.value.max_size, null)
- desired_capacity = try(each.value.desired_capacity, null)
+ min_size = try(each.value.min_size, 0)
+ max_size = try(each.value.max_size, 0)
+ desired_capacity = try(each.value.desired_capacity, 0)
capacity_rebalance = try(each.value.capacity_rebalance, null)
min_elb_capacity = try(each.value.min_elb_capacity, null)
wait_for_elb_capacity = try(each.value.wait_for_elb_capacity, null)
@@ -93,22 +93,23 @@ module "self_managed_node_group" {
metrics_granularity = try(each.value.metrics_granularity, null)
service_linked_role_arn = try(each.value.service_linked_role_arn, null)
- initial_lifecycle_hooks = try(each.value.initial_lifecycle_hooks, null)
+ initial_lifecycle_hooks = try(each.value.initial_lifecycle_hooks, [])
instance_refresh = try(each.value.instance_refresh, null)
- use_mixed_instances_policy = try(each.value.use_mixed_instances_policy, null)
+ use_mixed_instances_policy = try(each.value.use_mixed_instances_policy, false)
warm_pool = try(each.value.warm_pool, null)
- create_schedule = try(each.value.create_schedule, null)
+ create_schedule = try(each.value.create_schedule, false)
schedules = try(each.value.schedules, null)
delete_timeout = try(each.value.delete_timeout, null)
# Launch Template
- description = try(each.value.description, null)
+ create_launch_template = try(each.value.create_launch_template, true)
+ description = try(each.value.description, null)
ebs_optimized = try(each.value.ebs_optimized, null)
- image_id = try(each.value.image_id, null)
- instance_type = try(each.value.instance_type, null)
+ image_id = try(each.value.image_id, data.aws_ami.eks_worker[0].image_id)
+ instance_type = try(each.value.instance_type, "m6i.large")
key_name = try(each.value.key_name, null)
user_data = try(each.value.user_data, null)
@@ -121,7 +122,7 @@ module "self_managed_node_group" {
kernel_id = try(each.value.kernel_id, null)
ram_disk_id = try(each.value.ram_disk_id, null)
- block_device_mappings = try(each.value.block_device_mappings, null)
+ block_device_mappings = try(each.value.block_device_mappings, [])
capacity_reservation_specification = try(each.value.capacity_reservation_specification, null)
cpu_options = try(each.value.cpu_options, null)
credit_specification = try(each.value.credit_specification, null)
@@ -135,13 +136,12 @@ module "self_managed_node_group" {
license_specifications = try(each.value.license_specifications, null)
metadata_options = try(each.value.metadata_options, null)
enable_monitoring = try(each.value.enable_monitoring, null)
- network_interfaces = try(each.value.network_interfaces, null)
+ network_interfaces = try(each.value.network_interfaces, [])
placement = try(each.value.placement, null)
- tag_specifications = try(each.value.tag_specifications, null)
+ tag_specifications = try(each.value.tag_specifications, [])
- tags = try(each.value.tags, null)
+ tags = try(each.value.tags, {})
propagate_tags = try(each.value.propagate_tags, [])
- propagate_name = try(each.value.propagate_name, null)
}
################################################################################
From 86758be5af44f2d47b0bf4c7068fee4fdc999e99 Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Wed, 10 Nov 2021 08:57:04 -0500
Subject: [PATCH 14/83] chore: updating eks managed node group sub-module
---
examples/self_managed_node_groups/main.tf | 26 +-
modules/eks-managed-node-group/README.md | 3 +-
.../eks-managed-node-group/launch_template.tf | 360 ++++++++++++++----
modules/eks-managed-node-group/main.tf | 111 ++----
modules/eks-managed-node-group/outputs.tf | 10 -
5 files changed, 335 insertions(+), 175 deletions(-)
diff --git a/examples/self_managed_node_groups/main.tf b/examples/self_managed_node_groups/main.tf
index f922dd97e5..1708b803d2 100644
--- a/examples/self_managed_node_groups/main.tf
+++ b/examples/self_managed_node_groups/main.tf
@@ -6,6 +6,12 @@ locals {
name = "ex-${replace(basename(path.cwd), "_", "-")}"
cluster_version = "1.20"
region = "eu-west-1"
+
+ tags = {
+ Example = local.name
+ GithubRepo = "terraform-aws-eks"
+ GithubOrg = "terraform-aws-modules"
+ }
}
################################################################################
@@ -40,6 +46,14 @@ module "eks" {
# additional_tags = {
# ExtraTag = "example"
# }
+ propagate_tags = [
+ for k, v in local.tags :
+ {
+ key = k
+ value = v
+ propogate_at_launch = true
+ }
+ ]
}
example2 = {
min_size = 1
@@ -52,11 +66,7 @@ module "eks" {
}
}
- tags = {
- Example = local.name
- GithubRepo = "terraform-aws-eks"
- GithubOrg = "terraform-aws-modules"
- }
+ tags = local.tags
}
################################################################################
@@ -106,9 +116,5 @@ module "vpc" {
"kubernetes.io/role/internal-elb" = "1"
}
- tags = {
- Example = local.name
- GithubRepo = "terraform-aws-eks"
- GithubOrg = "terraform-aws-modules"
- }
+ tags = local.tags
}
diff --git a/modules/eks-managed-node-group/README.md b/modules/eks-managed-node-group/README.md
index cbfaabc97f..ecb13e934b 100644
--- a/modules/eks-managed-node-group/README.md
+++ b/modules/eks-managed-node-group/README.md
@@ -83,7 +83,7 @@ No modules.
| Name | Type |
|------|------|
-| [aws_eks_node_group.workers](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_node_group) | resource |
+| [aws_eks_node_group.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_node_group) | resource |
| [aws_launch_template.workers](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource |
| [cloudinit_config.workers_userdata](https://registry.terraform.io/providers/hashicorp/cloudinit/latest/docs/data-sources/config) | data source |
@@ -108,6 +108,5 @@ No modules.
| Name | Description |
|------|-------------|
-| [aws\_auth\_roles](#output\_aws\_auth\_roles) | Roles for use in aws-auth ConfigMap |
| [node\_groups](#output\_node\_groups) | Outputs from EKS node groups. Map of maps, keyed by `var.node_groups` keys. See `aws_eks_node_group` Terraform documentation for values |
diff --git a/modules/eks-managed-node-group/launch_template.tf b/modules/eks-managed-node-group/launch_template.tf
index 6abe358d5a..9cf32abe99 100644
--- a/modules/eks-managed-node-group/launch_template.tf
+++ b/modules/eks-managed-node-group/launch_template.tf
@@ -1,5 +1,5 @@
data "cloudinit_config" "workers_userdata" {
- for_each = { for k, v in local.node_groups_expanded : k => v if v["create_launch_template"] }
+ count = var.create && var.create_launch_template ? 1 : 0
gzip = false
base64_encode = true
@@ -12,7 +12,7 @@ data "cloudinit_config" "workers_userdata" {
cluster_name = var.cluster_name
cluster_endpoint = var.cluster_endpoint
cluster_auth_base64 = var.cluster_auth_base64
- ami_id = lookup(each.value, "ami_id", "")
+ ami_id = var.ami_id
ami_is_eks_optimized = each.value["ami_is_eks_optimized"]
bootstrap_env = each.value["bootstrap_env"]
kubelet_extra_args = each.value["kubelet_extra_args"]
@@ -31,116 +31,310 @@ data "cloudinit_config" "workers_userdata" {
# Trivia: AWS transparently creates a copy of your LaunchTemplate and actually uses that copy then for the node group. If you DONT use a custom AMI,
# then the default user-data for bootstrapping a cluster is merged in the copy.
resource "aws_launch_template" "workers" {
- for_each = { for k, v in local.node_groups_expanded : k => v if v["create_launch_template"] }
+ count = var.create && var.create_launch_template ? 1 : 0
- name_prefix = local.node_groups_names[each.key]
- description = format("EKS Managed Node Group custom LT for %s", local.node_groups_names[each.key])
- update_default_version = lookup(each.value, "update_default_version", true)
+ name = var.launch_template_use_name_prefix ? null : var.launch_template_name
+ name_prefix = var.launch_template_use_name_prefix ? "${var.launch_template_name}-" : null
+ description = coalesce(var.description, "EKS Managed Node Group custom LT for ${var.name}")
- block_device_mappings {
- device_name = "/dev/xvda"
- ebs {
- volume_size = lookup(each.value, "disk_size", null)
- volume_type = lookup(each.value, "disk_type", null)
- iops = lookup(each.value, "disk_iops", null)
- throughput = lookup(each.value, "disk_throughput", null)
- encrypted = lookup(each.value, "disk_encrypted", null)
- kms_key_id = lookup(each.value, "disk_kms_key_id", null)
- delete_on_termination = true
+ ebs_optimized = var.ebs_optimized
+ image_id = var.image_id
+ instance_type = var.instance_type
+ key_name = var.key_name
+ user_data = var.user_data
+
+ vpc_security_group_ids = var.vpc_security_group_ids
+
+ default_version = var.default_version
+ update_default_version = var.update_default_version
+ disable_api_termination = var.disable_api_termination
+ instance_initiated_shutdown_behavior = var.instance_initiated_shutdown_behavior
+ kernel_id = var.kernel_id
+ ram_disk_id = var.ram_disk_id
+
+ dynamic "block_device_mappings" {
+ for_each = var.block_device_mappings
+ content {
+ device_name = block_device_mappings.value.device_name
+ no_device = lookup(block_device_mappings.value, "no_device", null)
+ virtual_name = lookup(block_device_mappings.value, "virtual_name", null)
+
+ dynamic "ebs" {
+ for_each = flatten([lookup(block_device_mappings.value, "ebs", [])])
+ content {
+ delete_on_termination = lookup(ebs.value, "delete_on_termination", null)
+ encrypted = lookup(ebs.value, "encrypted", null)
+ kms_key_id = lookup(ebs.value, "kms_key_id", null)
+ iops = lookup(ebs.value, "iops", null)
+ throughput = lookup(ebs.value, "throughput", null)
+ snapshot_id = lookup(ebs.value, "snapshot_id", null)
+ volume_size = lookup(ebs.value, "volume_size", null)
+ volume_type = lookup(ebs.value, "volume_type", null)
+ }
+ }
}
}
- ebs_optimized = lookup(each.value, "ebs_optimized", !contains(var.ebs_optimized_not_supported, element(each.value.instance_types, 0)))
+ dynamic "capacity_reservation_specification" {
+ for_each = var.capacity_reservation_specification != null ? [var.capacity_reservation_specification] : []
+ content {
+ capacity_reservation_preference = lookup(capacity_reservation_specification.value, "capacity_reservation_preference", null)
- instance_type = each.value["set_instance_types_on_lt"] ? element(each.value.instance_types, 0) : null
+ dynamic "capacity_reservation_target" {
+ for_each = lookup(capacity_reservation_specification.value, "capacity_reservation_target", [])
+ content {
+ capacity_reservation_id = lookup(capacity_reservation_target.value, "capacity_reservation_id", null)
+ }
+ }
+ }
+ }
- monitoring {
- enabled = lookup(each.value, "enable_monitoring", null)
+ dynamic "cpu_options" {
+ for_each = var.cpu_options != null ? [var.cpu_options] : []
+ content {
+ core_count = cpu_options.value.core_count
+ threads_per_core = cpu_options.value.threads_per_core
+ }
}
- network_interfaces {
- associate_public_ip_address = lookup(each.value, "public_ip", null)
- delete_on_termination = lookup(each.value, "eni_delete", null)
- security_groups = compact(flatten([
- var.worker_security_group_id,
- var.worker_additional_security_group_ids,
- lookup(
- each.value,
- "additional_security_group_ids",
- null,
- ),
- ]))
+ dynamic "credit_specification" {
+ for_each = var.credit_specification != null ? [var.credit_specification] : []
+ content {
+ cpu_credits = credit_specification.value.cpu_credits
+ }
}
- # if you want to use a custom AMI
- image_id = lookup(each.value, "ami_id", null)
+ dynamic "elastic_gpu_specifications" {
+ for_each = var.elastic_gpu_specifications != null ? [var.elastic_gpu_specifications] : []
+ content {
+ type = elastic_gpu_specifications.value.type
+ }
+ }
- # If you use a custom AMI, you need to supply via user-data, the bootstrap script as EKS DOESNT merge its managed user-data then
- # you can add more than the minimum code you see in the template, e.g. install SSM agent, see https://github.com/aws/containers-roadmap/issues/593#issuecomment-577181345
- #
- # (optionally you can use https://registry.terraform.io/providers/hashicorp/cloudinit/latest/docs/data-sources/cloudinit_config to render the script, example: https://github.com/terraform-aws-modules/terraform-aws-eks/pull/997#issuecomment-705286151)
+ dynamic "elastic_inference_accelerator" {
+ for_each = var.elastic_inference_accelerator != null ? [var.elastic_inference_accelerator] : []
+ content {
+ type = elastic_inference_accelerator.value.type
+ }
+ }
- user_data = data.cloudinit_config.workers_userdata[each.key].rendered
+ dynamic "enclave_options" {
+ for_each = var.enclave_options != null ? [var.enclave_options] : []
+ content {
+ enabled = enclave_options.value.enabled
+ }
+ }
- key_name = lookup(each.value, "key_name", null)
+ dynamic "hibernation_options" {
+ for_each = var.hibernation_options != null ? [var.hibernation_options] : []
+ content {
+ configured = hibernation_options.value.configured
+ }
+ }
- metadata_options {
- http_endpoint = lookup(each.value, "metadata_http_endpoint", null)
- http_tokens = lookup(each.value, "metadata_http_tokens", null)
- http_put_response_hop_limit = lookup(each.value, "metadata_http_put_response_hop_limit", null)
+ dynamic "iam_instance_profile" {
+ for_each = var.iam_instance_profile_name != null || var.iam_instance_profile_arn != null ? [1] : []
+ content {
+ name = var.iam_instance_profile_name
+ arn = var.iam_instance_profile_arn
+ }
}
- # Supplying custom tags to EKS instances is another use-case for LaunchTemplates
- tag_specifications {
- resource_type = "instance"
+ dynamic "instance_market_options" {
+ for_each = var.instance_market_options != null ? [var.instance_market_options] : []
+ content {
+ market_type = instance_market_options.value.market_type
- tags = merge(
- var.tags,
- {
- Name = local.node_groups_names[each.key]
- },
- lookup(var.node_groups_defaults, "additional_tags", {}),
- lookup(var.node_groups[each.key], "additional_tags", {})
- )
+ dynamic "spot_options" {
+ for_each = lookup(instance_market_options.value, "spot_options", null) != null ? [instance_market_options.value.spot_options] : []
+ content {
+ block_duration_minutes = spot_options.value.block_duration_minutes
+ instance_interruption_behavior = lookup(spot_options.value, "instance_interruption_behavior", null)
+ max_price = lookup(spot_options.value, "max_price", null)
+ spot_instance_type = lookup(spot_options.value, "spot_instance_type", null)
+ valid_until = lookup(spot_options.value, "valid_until", null)
+ }
+ }
+ }
}
- # Supplying custom tags to EKS instances root volumes is another use-case for LaunchTemplates. (doesnt add tags to dynamically provisioned volumes via PVC tho)
- tag_specifications {
- resource_type = "volume"
+ dynamic "license_specification" {
+ for_each = var.license_specifications != null ? [var.license_specifications] : []
+ content {
+ license_configuration_arn = license_specifications.value.license_configuration_arn
+ }
+ }
- tags = merge(
- var.tags,
- {
- Name = local.node_groups_names[each.key]
- },
- lookup(var.node_groups_defaults, "additional_tags", {}),
- lookup(var.node_groups[each.key], "additional_tags", {})
- )
+ dynamic "metadata_options" {
+ for_each = var.metadata_options != null ? [var.metadata_options] : []
+ content {
+ http_endpoint = lookup(metadata_options.value, "http_endpoint", null)
+ http_tokens = lookup(metadata_options.value, "http_tokens", null)
+ http_put_response_hop_limit = lookup(metadata_options.value, "http_put_response_hop_limit", null)
+ }
}
- # Supplying custom tags to EKS instances ENI's is another use-case for LaunchTemplates
- tag_specifications {
- resource_type = "network-interface"
+ dynamic "monitoring" {
+ for_each = var.enable_monitoring != null ? [1] : []
+ content {
+ enabled = var.enable_monitoring
+ }
+ }
- tags = merge(
- var.tags,
- {
- Name = local.node_groups_names[each.key]
- },
- lookup(var.node_groups_defaults, "additional_tags", {}),
- lookup(var.node_groups[each.key], "additional_tags", {})
- )
+ dynamic "network_interfaces" {
+ for_each = var.network_interfaces
+ content {
+ associate_carrier_ip_address = lookup(network_interfaces.value, "associate_carrier_ip_address", null)
+ associate_public_ip_address = lookup(network_interfaces.value, "associate_public_ip_address", null)
+ delete_on_termination = lookup(network_interfaces.value, "delete_on_termination", null)
+ description = lookup(network_interfaces.value, "description", null)
+ device_index = lookup(network_interfaces.value, "device_index", null)
+ ipv4_addresses = lookup(network_interfaces.value, "ipv4_addresses", null) != null ? network_interfaces.value.ipv4_addresses : []
+ ipv4_address_count = lookup(network_interfaces.value, "ipv4_address_count", null)
+ ipv6_addresses = lookup(network_interfaces.value, "ipv6_addresses", null) != null ? network_interfaces.value.ipv6_addresses : []
+ ipv6_address_count = lookup(network_interfaces.value, "ipv6_address_count", null)
+ network_interface_id = lookup(network_interfaces.value, "network_interface_id", null)
+ private_ip_address = lookup(network_interfaces.value, "private_ip_address", null)
+ security_groups = lookup(network_interfaces.value, "security_groups", null) != null ? network_interfaces.value.security_groups : []
+ subnet_id = lookup(network_interfaces.value, "subnet_id", null)
+ }
+ }
+
+ dynamic "placement" {
+ for_each = var.placement != null ? [var.placement] : []
+ content {
+ affinity = lookup(placement.value, "affinity", null)
+ availability_zone = lookup(placement.value, "availability_zone", null)
+ group_name = lookup(placement.value, "group_name", null)
+ host_id = lookup(placement.value, "host_id", null)
+ spread_domain = lookup(placement.value, "spread_domain", null)
+ tenancy = lookup(placement.value, "tenancy", null)
+ partition_number = lookup(placement.value, "partition_number", null)
+ }
}
- # Tag the LT itself
- tags = merge(
- var.tags,
- lookup(var.node_groups_defaults, "additional_tags", {}),
- lookup(var.node_groups[each.key], "additional_tags", {}),
- )
+ dynamic "tag_specifications" {
+ for_each = toset(["instance", "volume", "network-interface"])
+ content {
+ resource_type = tag_specifications.key
+ tags = merge(var.tags, { Name = var.name })
+ }
+ }
lifecycle {
create_before_destroy = true
}
+
+ tags = var.tags
}
+
+# update_default_version = lookup(each.value, "update_default_version", true)
+
+# block_device_mappings {
+# device_name = "/dev/xvda"
+
+# ebs {
+# volume_size = lookup(each.value, "disk_size", null)
+# volume_type = lookup(each.value, "disk_type", null)
+# iops = lookup(each.value, "disk_iops", null)
+# throughput = lookup(each.value, "disk_throughput", null)
+# encrypted = lookup(each.value, "disk_encrypted", null)
+# kms_key_id = lookup(each.value, "disk_kms_key_id", null)
+# delete_on_termination = true
+# }
+# }
+
+# ebs_optimized = lookup(each.value, "ebs_optimized", !contains(var.ebs_optimized_not_supported, element(each.value.instance_types, 0)))
+
+# instance_type = each.value["set_instance_types_on_lt"] ? element(each.value.instance_types, 0) : null
+
+# monitoring {
+# enabled = lookup(each.value, "enable_monitoring", null)
+# }
+
+# network_interfaces {
+# associate_public_ip_address = lookup(each.value, "public_ip", null)
+# delete_on_termination = lookup(each.value, "eni_delete", null)
+# security_groups = compact(flatten([
+# var.worker_security_group_id,
+# var.worker_additional_security_group_ids,
+# lookup(
+# each.value,
+# "additional_security_group_ids",
+# null,
+# ),
+# ]))
+# }
+
+# # if you want to use a custom AMI
+# image_id = lookup(each.value, "ami_id", null)
+
+# # If you use a custom AMI, you need to supply via user-data, the bootstrap script as EKS DOESNT merge its managed user-data then
+# # you can add more than the minimum code you see in the template, e.g. install SSM agent, see https://github.com/aws/containers-roadmap/issues/593#issuecomment-577181345
+# #
+# # (optionally you can use https://registry.terraform.io/providers/hashicorp/cloudinit/latest/docs/data-sources/cloudinit_config to render the script, example: https://github.com/terraform-aws-modules/terraform-aws-eks/pull/997#issuecomment-705286151)
+
+# user_data = data.cloudinit_config.workers_userdata[each.key].rendered
+
+# key_name = lookup(each.value, "key_name", null)
+
+# metadata_options {
+# http_endpoint = lookup(each.value, "metadata_http_endpoint", null)
+# http_tokens = lookup(each.value, "metadata_http_tokens", null)
+# http_put_response_hop_limit = lookup(each.value, "metadata_http_put_response_hop_limit", null)
+# }
+
+# # Supplying custom tags to EKS instances is another use-case for LaunchTemplates
+# tag_specifications {
+# resource_type = "instance"
+
+# tags = merge(
+# var.tags,
+# {
+# Name = local.node_groups_names[each.key]
+# },
+# lookup(var.node_groups_defaults, "additional_tags", {}),
+# lookup(var.node_groups[each.key], "additional_tags", {})
+# )
+# }
+
+# # Supplying custom tags to EKS instances root volumes is another use-case for LaunchTemplates. (doesnt add tags to dynamically provisioned volumes via PVC tho)
+# tag_specifications {
+# resource_type = "volume"
+
+# tags = merge(
+# var.tags,
+# {
+# Name = local.node_groups_names[each.key]
+# },
+# lookup(var.node_groups_defaults, "additional_tags", {}),
+# lookup(var.node_groups[each.key], "additional_tags", {})
+# )
+# }
+
+# # Supplying custom tags to EKS instances ENI's is another use-case for LaunchTemplates
+# tag_specifications {
+# resource_type = "network-interface"
+
+# tags = merge(
+# var.tags,
+# {
+# Name = local.node_groups_names[each.key]
+# },
+# lookup(var.node_groups_defaults, "additional_tags", {}),
+# lookup(var.node_groups[each.key], "additional_tags", {})
+# )
+# }
+
+# # Tag the LT itself
+# tags = merge(
+# var.tags,
+# lookup(var.node_groups_defaults, "additional_tags", {}),
+# lookup(var.node_groups[each.key], "additional_tags", {}),
+# )
+
+# lifecycle {
+# create_before_destroy = true
+# }
+# }
diff --git a/modules/eks-managed-node-group/main.tf b/modules/eks-managed-node-group/main.tf
index 75e6209730..48687ac9dd 100644
--- a/modules/eks-managed-node-group/main.tf
+++ b/modules/eks-managed-node-group/main.tf
@@ -1,105 +1,76 @@
-resource "aws_eks_node_group" "workers" {
- for_each = local.node_groups_expanded
-
- node_group_name_prefix = lookup(each.value, "name", null) == null ? local.node_groups_names[each.key] : null
- node_group_name = lookup(each.value, "name", null)
+resource "aws_eks_node_group" "this" {
+ count = var.create ? 1 : 0
+ # Required
cluster_name = var.cluster_name
- node_role_arn = each.value["iam_role_arn"]
- subnet_ids = each.value["subnets"]
+ node_role_arn = var.iam_role_arn
+ subnet_ids = var.subnet_ids
scaling_config {
- desired_size = each.value["desired_capacity"]
- max_size = each.value["max_capacity"]
- min_size = each.value["min_capacity"]
+ desired_size = var.desired_size
+ max_size = var.max_size
+ min_size = var.min_size
}
- ami_type = lookup(each.value, "ami_type", null)
- disk_size = each.value["launch_template_id"] != null || each.value["create_launch_template"] ? null : lookup(each.value, "disk_size", null)
- instance_types = !each.value["set_instance_types_on_lt"] ? each.value["instance_types"] : null
- release_version = lookup(each.value, "ami_release_version", null)
- capacity_type = lookup(each.value, "capacity_type", null)
- force_update_version = lookup(each.value, "force_update_version", null)
-
- dynamic "remote_access" {
- for_each = each.value["key_name"] != "" && each.value["launch_template_id"] == null && !each.value["create_launch_template"] ? [{
- ec2_ssh_key = each.value["key_name"]
- source_security_group_ids = lookup(each.value, "source_security_group_ids", [])
- }] : []
+ # Optional
+ node_group_name = var.use_name_prefix ? null : var.name
+ node_group_name_prefix = var.use_name_prefix ? "${var.name}-" : null
- content {
- ec2_ssh_key = remote_access.value["ec2_ssh_key"]
- source_security_group_ids = remote_access.value["source_security_group_ids"]
- }
- }
+ ami_type = var.ami_type
+ release_version = var.ami_release_version
+ capacity_type = var.capacity_type
+ disk_size = var.disk_size
+ force_update_version = var.force_update_version
+ instance_types = var.instance_types
+ labels = var.labels
+ version = var.version
dynamic "launch_template" {
- for_each = each.value["launch_template_id"] != null ? [{
- id = each.value["launch_template_id"]
- version = each.value["launch_template_version"]
- }] : []
-
+ for_each = [var.launch_template]
content {
- id = launch_template.value["id"]
- version = launch_template.value["version"]
+ id = lookup(launch_template.value, "id", null)
+ name = lookup(launch_template.value, "name", null)
+ version = lookup(launch_template.value, "version", "$Default")
}
}
- dynamic "launch_template" {
- for_each = each.value["launch_template_id"] == null && each.value["create_launch_template"] ? [{
- id = aws_launch_template.workers[each.key].id
- version = each.value["launch_template_version"] == "$Latest" ? aws_launch_template.workers[each.key].latest_version : (
- each.value["launch_template_version"] == "$Default" ? aws_launch_template.workers[each.key].default_version : each.value["launch_template_version"]
- )
- }] : []
-
+ dynamic "remote_access" {
+ for_each = [var.remote_access]
content {
- id = launch_template.value["id"]
- version = launch_template.value["version"]
+ ec2_ssh_key = lookup(remote_access.value, "ec2_ssh_key")
+ source_security_group_ids = lookup(remote_access.value, "source_security_group_ids")
}
}
dynamic "taint" {
- for_each = each.value["taints"]
-
+ for_each = var.taints
content {
- key = taint.value["key"]
- value = taint.value["value"]
- effect = taint.value["effect"]
+ key = taint.value.key
+ value = lookup(taint.value, "value")
+ effect = taint.value.effect
}
}
dynamic "update_config" {
- for_each = try(each.value.update_config.max_unavailable_percentage > 0, each.value.update_config.max_unavailable > 0, false) ? [true] : []
-
+ for_each = [var.update_config]
content {
- max_unavailable_percentage = try(each.value.update_config.max_unavailable_percentage, null)
- max_unavailable = try(each.value.update_config.max_unavailable, null)
+ max_unavailable_percentage = lookup(update_config.value, "max_unavailable_percentage", null)
+ max_unavailable = lookup(update_config.value, "max_unavailable", null)
}
}
timeouts {
- create = lookup(each.value["timeouts"], "create", null)
- update = lookup(each.value["timeouts"], "update", null)
- delete = lookup(each.value["timeouts"], "delete", null)
+ create = lookup(var.timeouts, "create", null)
+ update = lookup(var.timeouts, "update", null)
+ delete = lookup(var.timeouts, "delete", null)
}
- version = lookup(each.value, "version", null)
-
- labels = merge(
- lookup(var.node_groups_defaults, "k8s_labels", {}),
- lookup(var.node_groups[each.key], "k8s_labels", {})
- )
-
- tags = merge(
- var.tags,
- lookup(var.node_groups_defaults, "additional_tags", {}),
- lookup(var.node_groups[each.key], "additional_tags", {}),
- )
-
lifecycle {
create_before_destroy = true
- ignore_changes = [scaling_config[0].desired_size]
+ ignore_changes = [
+ scaling_config[0].desired_size,
+ ]
}
+ tags = var.tags
}
diff --git a/modules/eks-managed-node-group/outputs.tf b/modules/eks-managed-node-group/outputs.tf
index ad148ea514..f41699ac21 100644
--- a/modules/eks-managed-node-group/outputs.tf
+++ b/modules/eks-managed-node-group/outputs.tf
@@ -2,13 +2,3 @@ output "node_groups" {
description = "Outputs from EKS node groups. Map of maps, keyed by `var.node_groups` keys. See `aws_eks_node_group` Terraform documentation for values"
value = aws_eks_node_group.workers
}
-
-output "aws_auth_roles" {
- description = "Roles for use in aws-auth ConfigMap"
- value = [
- for k, v in local.node_groups_expanded : {
- worker_role_arn = lookup(v, "iam_role_arn", var.default_iam_role_arn)
- platform = "linux"
- }
- ]
-}
From 21731e5074065032d261032c6890618e60aeed76 Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Wed, 10 Nov 2021 20:26:25 -0500
Subject: [PATCH 15/83] chore: stash work in progress on eks managed node
groups
---
README.md | 2 +
examples/eks_managed_node_group/README.md | 3 +-
.../disk_encryption_policy.tf | 10 +-
examples/eks_managed_node_group/main.tf | 145 ++++---
modules/eks-managed-node-group/README.md | 117 +++++-
.../eks-managed-node-group/launch_template.tf | 340 ----------------
modules/eks-managed-node-group/locals.tf | 51 ---
modules/eks-managed-node-group/main.tf | 349 +++++++++++++++-
modules/eks-managed-node-group/outputs.tf | 8 +-
.../templates/default.sh.tpl | 49 +++
.../templates/userdata.sh.tpl | 26 +-
modules/eks-managed-node-group/variables.tf | 384 ++++++++++++++++--
modules/self-managed-node-group/README.md | 2 +-
modules/self-managed-node-group/variables.tf | 1 +
templates/kubeconfig.tpl | 38 --
variables.tf | 8 +
workers.tf | 101 ++++-
17 files changed, 1036 insertions(+), 598 deletions(-)
delete mode 100644 modules/eks-managed-node-group/launch_template.tf
delete mode 100644 modules/eks-managed-node-group/locals.tf
create mode 100644 modules/eks-managed-node-group/templates/default.sh.tpl
delete mode 100644 templates/kubeconfig.tpl
diff --git a/README.md b/README.md
index 8b716166f3..7515c42119 100644
--- a/README.md
+++ b/README.md
@@ -131,6 +131,7 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
| Name | Source | Version |
|------|--------|---------|
+| [eks\_managed\_node\_groups](#module\_eks\_managed\_node\_groups) | ./modules/eks-managed-node-group | n/a |
| [fargate](#module\_fargate) | ./modules/fargate | n/a |
| [self\_managed\_node\_group](#module\_self\_managed\_node\_group) | ./modules/self-managed-node-group | n/a |
@@ -204,6 +205,7 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
| [create\_fargate\_pod\_execution\_role](#input\_create\_fargate\_pod\_execution\_role) | Controls if the EKS Fargate pod execution IAM role should be created | `bool` | `true` | no |
| [create\_worker\_iam\_role](#input\_create\_worker\_iam\_role) | Determines whether a worker IAM role is created or to use an existing IAM role | `bool` | `true` | no |
| [create\_worker\_security\_group](#input\_create\_worker\_security\_group) | Whether to create a security group for the worker nodes | `bool` | `true` | no |
+| [eks\_managed\_node\_groups](#input\_eks\_managed\_node\_groups) | Map of EKS managed node group definitions to create | `any` | `{}` | no |
| [enable\_irsa](#input\_enable\_irsa) | Whether to create OpenID Connect Provider for EKS to enable IRSA | `bool` | `false` | no |
| [fargate\_iam\_role\_path](#input\_fargate\_iam\_role\_path) | Fargate IAM role path | `string` | `null` | no |
| [fargate\_iam\_role\_permissions\_boundary](#input\_fargate\_iam\_role\_permissions\_boundary) | ARN of the policy that is used to set the permissions boundary for the Fargate role | `string` | `null` | no |
diff --git a/examples/eks_managed_node_group/README.md b/examples/eks_managed_node_group/README.md
index bc1a155e65..bac4c1f93f 100644
--- a/examples/eks_managed_node_group/README.md
+++ b/examples/eks_managed_node_group/README.md
@@ -35,6 +35,7 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Version |
|------|---------|
| [aws](#provider\_aws) | >= 3.22.0 |
+| [cloudinit](#provider\_cloudinit) | n/a |
## Modules
@@ -47,11 +48,11 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Type |
|------|------|
-| [aws_iam_service_linked_role.autoscaling](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_service_linked_role) | resource |
| [aws_launch_template.default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource |
| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source |
| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source |
| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
+| [cloudinit_config.custom](https://registry.terraform.io/providers/hashicorp/cloudinit/latest/docs/data-sources/config) | data source |
## Inputs
diff --git a/examples/eks_managed_node_group/disk_encryption_policy.tf b/examples/eks_managed_node_group/disk_encryption_policy.tf
index 3f834ad100..e4619ce683 100644
--- a/examples/eks_managed_node_group/disk_encryption_policy.tf
+++ b/examples/eks_managed_node_group/disk_encryption_policy.tf
@@ -1,9 +1,9 @@
# if you have used ASGs before, that role got auto-created already and you need to import to TF state
-resource "aws_iam_service_linked_role" "autoscaling" {
- aws_service_name = "autoscaling.amazonaws.com"
- description = "Default Service-Linked Role enables access to AWS Services and Resources used or managed by Auto Scaling"
- custom_suffix = "lt_with_managed_node_groups" # the full name is "AWSServiceRoleForAutoScaling_lt_with_managed_node_groups" < 64 characters
-}
+# resource "aws_iam_service_linked_role" "autoscaling" {
+# aws_service_name = "autoscaling.amazonaws.com"
+# description = "Default Service-Linked Role enables access to AWS Services and Resources used or managed by Auto Scaling"
+# custom_suffix = "lt_with_managed_node_groups" # the full name is "AWSServiceRoleForAutoScaling_lt_with_managed_node_groups" < 64 characters
+# }
#data "aws_caller_identity" "current" {}
#
diff --git a/examples/eks_managed_node_group/main.tf b/examples/eks_managed_node_group/main.tf
index ca2bb6ffe4..c08515e990 100644
--- a/examples/eks_managed_node_group/main.tf
+++ b/examples/eks_managed_node_group/main.tf
@@ -6,12 +6,29 @@ locals {
name = "ex-${replace(basename(path.cwd), "_", "-")}"
cluster_version = "1.20"
region = "eu-west-1"
+
+ tags = {
+ Example = local.name
+ GithubRepo = "terraform-aws-eks"
+ GithubOrg = "terraform-aws-modules"
+ }
}
################################################################################
# EKS Module
################################################################################
+data "cloudinit_config" "custom" {
+ gzip = false
+ base64_encode = true
+ boundary = "//"
+
+ part {
+ content_type = "text/x-shellscript"
+ content = "echo 'hello world!'"
+ }
+}
+
module "eks" {
source = "../.."
@@ -24,68 +41,80 @@ module "eks" {
cluster_endpoint_private_access = true
cluster_endpoint_public_access = true
- node_groups = {
+ eks_managed_node_groups = {
+ # default_node_group = {}
+ # create_launch_template = {
+ # create_launch_template = true
+ # launch_template_name = "create-launch-template"
+ # user_data = data.cloudinit_config.custom.rendered
+ # }
+ # custom_ami = {
+ # create_launch_template = true
+ # launch_template_name = "custom-ami"
+ # # user_data = data.cloudinit_config.custom_ami.rendered
+
+ # # Current default AMI used by managed node groups - pseudo "custom"
+ # ami_id = "ami-0caf35bc73450c396"
+ # }
# use arleady defined launch template
- example1 = {
- name_prefix = "example1"
- desired_capacity = 1
- max_capacity = 15
- min_capacity = 1
+ # example1 = {
+ # name_prefix = "example1"
+ # desired_capacity = 1
+ # max_capacity = 15
+ # min_capacity = 1
+
+ # launch_template_id = aws_launch_template.default.id
+ # launch_template_version = aws_launch_template.default.default_version
- launch_template_id = aws_launch_template.default.id
- launch_template_version = aws_launch_template.default.default_version
+ # instance_types = ["t3.small"]
+
+ # tags = merge(local.tags, {
+ # ExtraTag = "example1"
+ # })
+ # }
- instance_types = ["t3.small"]
- additional_tags = {
- ExtraTag = "example1"
- }
- }
# create launch template
- example2 = {
- create_launch_template = true
- desired_capacity = 1
- max_capacity = 10
- min_capacity = 1
-
- disk_size = 50
- disk_type = "gp3"
- disk_throughput = 150
- disk_iops = 3000
-
- instance_types = ["t3.large"]
- capacity_type = "SPOT"
-
- bootstrap_env = {
- CONTAINER_RUNTIME = "containerd"
- USE_MAX_PODS = false
- }
- kubelet_extra_args = "--max-pods=110"
- k8s_labels = {
- GithubRepo = "terraform-aws-eks"
- GithubOrg = "terraform-aws-modules"
- }
- additional_tags = {
- ExtraTag = "example2"
- }
- taints = [
- {
- key = "dedicated"
- value = "gpuGroup"
- effect = "NO_SCHEDULE"
- }
- ]
- update_config = {
- max_unavailable_percentage = 50 # or set `max_unavailable`
- }
- }
+ # example2 = {
+ # create_launch_template = true
+ # desired_capacity = 1
+ # max_capacity = 10
+ # min_capacity = 1
+
+ # disk_size = 50
+ # disk_type = "gp3"
+ # disk_throughput = 150
+ # disk_iops = 3000
+
+ # instance_types = ["t3.large"]
+ # capacity_type = "SPOT"
+
+ # bootstrap_env = {
+ # CONTAINER_RUNTIME = "containerd"
+ # USE_MAX_PODS = false
+ # }
+ # kubelet_extra_args = "--max-pods=110"
+ # k8s_labels = {
+ # GithubRepo = "terraform-aws-eks"
+ # GithubOrg = "terraform-aws-modules"
+ # }
+ # additional_tags = {
+ # ExtraTag = "example2"
+ # }
+ # taints = [
+ # {
+ # key = "dedicated"
+ # value = "gpuGroup"
+ # effect = "NO_SCHEDULE"
+ # }
+ # ]
+ # update_config = {
+ # max_unavailable_percentage = 50 # or set `max_unavailable`
+ # }
+ # }
}
- tags = {
- Example = local.name
- GithubRepo = "terraform-aws-eks"
- GithubOrg = "terraform-aws-modules"
- }
+ tags = local.tags
}
################################################################################
@@ -135,9 +164,5 @@ module "vpc" {
"kubernetes.io/role/internal-elb" = "1"
}
- tags = {
- Example = local.name
- GithubRepo = "terraform-aws-eks"
- GithubOrg = "terraform-aws-modules"
- }
+ tags = local.tags
}
diff --git a/modules/eks-managed-node-group/README.md b/modules/eks-managed-node-group/README.md
index ecb13e934b..13e2abdf6f 100644
--- a/modules/eks-managed-node-group/README.md
+++ b/modules/eks-managed-node-group/README.md
@@ -1,6 +1,35 @@
# EKS Managed Node Group Module
-Helper submodule to create and manage resources related to `eks_node_groups`.
+
+# User Data Configurations
+
+- https://github.com/aws/containers-roadmap/issues/596#issuecomment-675097667
+> An important note is that user data must in MIME multi-part archive format,
+> as by default, EKS will merge the bootstrapping command required for nodes to join the
+> cluster with your user data. If you use a custom AMI in your launch template,
+> this merging will (__NOT__) happen and you are responsible for nodes joining the cluster.
+> See [docs for more details](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-user-data)
+
+- https://aws.amazon.com/blogs/containers/introducing-launch-template-and-custom-ami-support-in-amazon-eks-managed-node-groups/
+
+a. Use EKS provided AMI which merges its user data with the user data users provide in the launch template
+ i. No additional user data
+ ii. Add additional user data
+b. Use custom AMI which MUST bring its own user data that bootstraps the node
+ i. Bring your own user data (whole shebang)
+ ii. Use "default" template provided by module here and (optionally) any additional user data
+
+TODO - need to try these out in order and verify and document what happens with user data.
+
+
+## From LT
+This is based on the LT that EKS would create if no custom one is specified (aws ec2 describe-launch-template-versions --launch-template-id xxx) there are several more options one could set but you probably dont need to modify them you can take the default and add your custom AMI and/or custom tags
+#
+Trivia: AWS transparently creates a copy of your LaunchTemplate and actually uses that copy then for the node group. If you DONT use a custom AMI,
+
+If you use a custom AMI, you need to supply via user-data, the bootstrap script as EKS DOESNT merge its managed user-data then you can add more than the minimum code you see in the template, e.g. install SSM agent, see https://github.com/aws/containers-roadmap/issues/593#issuecomment-577181345
+ #
+(optionally you can use https://registry.terraform.io/providers/hashicorp/cloudinit/latest/docs/data-sources/cloudinit_config to render the script, example: https://github.com/terraform-aws-modules/terraform-aws-eks/pull/997#issuecomment-705286151) then the default user-data for bootstrapping a cluster is merged in the copy.
## Node Groups' IAM Role
@@ -73,7 +102,6 @@ The role ARN specified in `var.default_iam_role_arn` will be used by default. In
| Name | Version |
|------|---------|
| [aws](#provider\_aws) | >= 3.56.0 |
-| [cloudinit](#provider\_cloudinit) | >= 2.0 |
## Modules
@@ -84,29 +112,82 @@ No modules.
| Name | Type |
|------|------|
| [aws_eks_node_group.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_node_group) | resource |
-| [aws_launch_template.workers](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource |
-| [cloudinit_config.workers_userdata](https://registry.terraform.io/providers/hashicorp/cloudinit/latest/docs/data-sources/config) | data source |
+| [aws_iam_role.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
+| [aws_iam_role_policy_attachment.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
+| [aws_launch_template.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource |
+| [aws_eks_cluster.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source |
+| [aws_iam_policy_document.assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
+| [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source |
## Inputs
| Name | Description | Type | Default | Required |
|------|-------------|------|---------|:--------:|
-| [cluster\_auth\_base64](#input\_cluster\_auth\_base64) | Base64 encoded CA of parent cluster | `string` | `""` | no |
-| [cluster\_endpoint](#input\_cluster\_endpoint) | Endpoint of parent cluster | `string` | `""` | no |
-| [cluster\_name](#input\_cluster\_name) | Name of parent cluster | `string` | `""` | no |
-| [create\_eks](#input\_create\_eks) | Controls if EKS resources should be created (it affects almost all resources) | `bool` | `true` | no |
-| [default\_iam\_role\_arn](#input\_default\_iam\_role\_arn) | ARN of the default IAM worker role to use if one is not specified in `var.node_groups` or `var.node_groups_defaults` | `string` | `""` | no |
-| [ebs\_optimized\_not\_supported](#input\_ebs\_optimized\_not\_supported) | List of instance types that do not support EBS optimization | `list(string)` | `[]` | no |
-| [node\_groups](#input\_node\_groups) | Map of maps of `eks_node_groups` to create. See "`node_groups` and `node_groups_defaults` keys" section in README.md for more details | `any` | `{}` | no |
-| [node\_groups\_defaults](#input\_node\_groups\_defaults) | map of maps of node groups to create. See "`node_groups` and `node_groups_defaults` keys" section in README.md for more details | `any` | `{}` | no |
+| [ami\_id](#input\_ami\_id) | The AMI from which to launch the instance. If not supplied, EKS will use its own default image | `string` | `null` | no |
+| [ami\_release\_version](#input\_ami\_release\_version) | AMI version of the EKS Node Group. Defaults to latest version for Kubernetes version | `string` | `null` | no |
+| [ami\_type](#input\_ami\_type) | Type of Amazon Machine Image (AMI) associated with the EKS Node Group. Valid values are `AL2_x86_64`, `AL2_x86_64_GPU`, `AL2_ARM_64`, `CUSTOM`, `BOTTLEROCKET_ARM_64`, `BOTTLEROCKET_x86_64` | `string` | `null` | no |
+| [block\_device\_mappings](#input\_block\_device\_mappings) | Specify volumes to attach to the instance besides the volumes specified by the AMI | `list(any)` | `[]` | no |
+| [capacity\_reservation\_specification](#input\_capacity\_reservation\_specification) | Targeting for EC2 capacity reservations | `any` | `null` | no |
+| [capacity\_type](#input\_capacity\_type) | Type of capacity associated with the EKS Node Group. Valid values: `ON_DEMAND`, `SPOT` | `string` | `null` | no |
+| [cluster\_auth\_base64](#input\_cluster\_auth\_base64) | Base64 encoded CA of associated EKS cluster | `string` | `""` | no |
+| [cluster\_endpoint](#input\_cluster\_endpoint) | Endpoint of associated EKS cluster | `string` | `""` | no |
+| [cluster\_name](#input\_cluster\_name) | Name of associated EKS cluster | `string` | `null` | no |
+| [cluster\_version](#input\_cluster\_version) | Kubernetes version. Defaults to EKS Cluster Kubernetes version | `string` | `null` | no |
+| [cpu\_options](#input\_cpu\_options) | The CPU options for the instance | `map(string)` | `null` | no |
+| [create](#input\_create) | Determines whether to create EKS managed node group or not | `bool` | `true` | no |
+| [create\_iam\_role](#input\_create\_iam\_role) | Determines whether an IAM role is created or to use an existing IAM role | `bool` | `true` | no |
+| [create\_launch\_template](#input\_create\_launch\_template) | Determines whether to create launch template or not | `bool` | `false` | no |
+| [credit\_specification](#input\_credit\_specification) | Customize the credit specification of the instance | `map(string)` | `null` | no |
+| [default\_version](#input\_default\_version) | Default Version of the launch template | `string` | `null` | no |
+| [description](#input\_description) | Description of the launch template | `string` | `null` | no |
+| [desired\_size](#input\_desired\_size) | Desired number of worker nodes | `number` | `1` | no |
+| [disable\_api\_termination](#input\_disable\_api\_termination) | If true, enables EC2 instance termination protection | `bool` | `null` | no |
+| [disk\_size](#input\_disk\_size) | Disk size in GiB for worker nodes. Defaults to `20` | `number` | `null` | no |
+| [ebs\_optimized](#input\_ebs\_optimized) | If true, the launched EC2 instance will be EBS-optimized | `bool` | `null` | no |
+| [elastic\_gpu\_specifications](#input\_elastic\_gpu\_specifications) | The elastic GPU to attach to the instance | `map(string)` | `null` | no |
+| [elastic\_inference\_accelerator](#input\_elastic\_inference\_accelerator) | Configuration block containing an Elastic Inference Accelerator to attach to the instance | `map(string)` | `null` | no |
+| [enable\_monitoring](#input\_enable\_monitoring) | Enables/disables detailed monitoring | `bool` | `null` | no |
+| [enclave\_options](#input\_enclave\_options) | Enable Nitro Enclaves on launched instances | `map(string)` | `null` | no |
+| [force\_update\_version](#input\_force\_update\_version) | Force version update if existing pods are unable to be drained due to a pod disruption budget issue | `bool` | `null` | no |
+| [hibernation\_options](#input\_hibernation\_options) | The hibernation options for the instance | `map(string)` | `null` | no |
+| [iam\_role\_additional\_policies](#input\_iam\_role\_additional\_policies) | Additional policies to be added to the IAM role | `list(string)` | `[]` | no |
+| [iam\_role\_arn](#input\_iam\_role\_arn) | Amazon Resource Name (ARN) of the IAM Role that provides permissions for the EKS Node Group | `string` | `null` | no |
+| [iam\_role\_attach\_cni\_policy](#input\_iam\_role\_attach\_cni\_policy) | Whether to attach the Amazon managed `AmazonEKS_CNI_Policy` IAM policy to the IAM IAM role. WARNING: If set `false` the permissions must be assigned to the `aws-node` DaemonSet pods via another method or nodes will not be able to join the cluster | `bool` | `true` | no |
+| [iam\_role\_name](#input\_iam\_role\_name) | Name to use on IAM role created | `string` | `null` | no |
+| [iam\_role\_path](#input\_iam\_role\_path) | IAM role path | `string` | `null` | no |
+| [iam\_role\_permissions\_boundary](#input\_iam\_role\_permissions\_boundary) | ARN of the policy that is used to set the permissions boundary for the IAM role | `string` | `null` | no |
+| [iam\_role\_tags](#input\_iam\_role\_tags) | A map of additional tags to add to the IAM role created | `map(string)` | `{}` | no |
+| [iam\_role\_use\_name\_prefix](#input\_iam\_role\_use\_name\_prefix) | Determines whether the IAM role name (`iam_role_name`) is used as a prefix | `string` | `true` | no |
+| [instance\_initiated\_shutdown\_behavior](#input\_instance\_initiated\_shutdown\_behavior) | Shutdown behavior for the instance. Can be `stop` or `terminate`. (Default: `stop`) | `string` | `null` | no |
+| [instance\_market\_options](#input\_instance\_market\_options) | The market (purchasing) option for the instance | `any` | `null` | no |
+| [instance\_types](#input\_instance\_types) | Set of instance types associated with the EKS Node Group. Defaults to `["t3.medium"]` | `list(string)` | `null` | no |
+| [kernel\_id](#input\_kernel\_id) | The kernel ID | `string` | `null` | no |
+| [key\_name](#input\_key\_name) | The key name that should be used for the instance | `string` | `null` | no |
+| [labels](#input\_labels) | Key-value map of Kubernetes labels. Only labels that are applied with the EKS API are managed by this argument. Other Kubernetes labels applied to the EKS Node Group will not be managed | `map(string)` | `null` | no |
+| [launch\_template\_name](#input\_launch\_template\_name) | Launch template name - either to be created (`var.create_launch_template` = `true`) or existing (`var.create_launch_template` = `false`) | `string` | `null` | no |
+| [launch\_template\_use\_name\_prefix](#input\_launch\_template\_use\_name\_prefix) | Determines whether to use `launch_template_name` as is or create a unique name beginning with the `launch_template_name` as the prefix | `bool` | `true` | no |
+| [launch\_template\_version](#input\_launch\_template\_version) | Launch template version number. The default is `$Default` | `string` | `null` | no |
+| [license\_specifications](#input\_license\_specifications) | A list of license specifications to associate with | `map(string)` | `null` | no |
+| [max\_size](#input\_max\_size) | Maximum number of worker nodes | `number` | `3` | no |
+| [metadata\_options](#input\_metadata\_options) | Customize the metadata options for the instance | `map(string)` | `null` | no |
+| [min\_size](#input\_min\_size) | Minimum number of worker nodes | `number` | `0` | no |
+| [name](#input\_name) | Name of the EKS Node Group | `string` | `null` | no |
+| [network\_interfaces](#input\_network\_interfaces) | Customize network interfaces to be attached at instance boot time | `list(any)` | `[]` | no |
+| [placement](#input\_placement) | The placement of the instance | `map(string)` | `null` | no |
+| [ram\_disk\_id](#input\_ram\_disk\_id) | The ID of the ram disk | `string` | `null` | no |
+| [remote\_access](#input\_remote\_access) | Configuration block with remote access settings | `map(string)` | `null` | no |
+| [subnet\_ids](#input\_subnet\_ids) | Identifiers of EC2 Subnets to associate with the EKS Node Group. These subnets must have the following resource tag: `kubernetes.io/cluster/CLUSTER_NAME` | `list(string)` | `null` | no |
+| [tag\_specifications](#input\_tag\_specifications) | The tags to apply to the resources during launch | `list(any)` | `[]` | no |
| [tags](#input\_tags) | A map of tags to add to all resources | `map(string)` | `{}` | no |
-| [worker\_additional\_security\_group\_ids](#input\_worker\_additional\_security\_group\_ids) | A list of additional security group ids to attach to worker instances | `list(string)` | `[]` | no |
-| [worker\_security\_group\_id](#input\_worker\_security\_group\_id) | If provided, all workers will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the EKS cluster. | `string` | `""` | no |
-| [workers\_group\_defaults](#input\_workers\_group\_defaults) | Workers group defaults from parent | `any` | `{}` | no |
+| [taints](#input\_taints) | The Kubernetes taints to be applied to the nodes in the node group. Maximum of 50 taints per node group | `map(string)` | `null` | no |
+| [timeouts](#input\_timeouts) | Create, update, and delete timeout configurations for the node group | `map(string)` | `{}` | no |
+| [update\_config](#input\_update\_config) | Configuration block of settings for max unavailable resources during node group updates | `map(string)` | `null` | no |
+| [update\_default\_version](#input\_update\_default\_version) | Whether to update Default Version each update. Conflicts with `default_version` | `bool` | `true` | no |
+| [use\_name\_prefix](#input\_use\_name\_prefix) | Determines whether to use `name` as is or create a unique name beginning with the `name` as the prefix | `bool` | `true` | no |
+| [user\_data](#input\_user\_data) | The Base64-encoded user data to provide when launching the instance | `string` | `null` | no |
+| [vpc\_security\_group\_ids](#input\_vpc\_security\_group\_ids) | A list of security group IDs to associate | `list(string)` | `null` | no |
## Outputs
-| Name | Description |
-|------|-------------|
-| [node\_groups](#output\_node\_groups) | Outputs from EKS node groups. Map of maps, keyed by `var.node_groups` keys. See `aws_eks_node_group` Terraform documentation for values |
+No outputs.
diff --git a/modules/eks-managed-node-group/launch_template.tf b/modules/eks-managed-node-group/launch_template.tf
deleted file mode 100644
index 9cf32abe99..0000000000
--- a/modules/eks-managed-node-group/launch_template.tf
+++ /dev/null
@@ -1,340 +0,0 @@
-data "cloudinit_config" "workers_userdata" {
- count = var.create && var.create_launch_template ? 1 : 0
-
- gzip = false
- base64_encode = true
- boundary = "//"
-
- part {
- content_type = "text/x-shellscript"
- content = templatefile("${path.module}/templates/userdata.sh.tpl",
- {
- cluster_name = var.cluster_name
- cluster_endpoint = var.cluster_endpoint
- cluster_auth_base64 = var.cluster_auth_base64
- ami_id = var.ami_id
- ami_is_eks_optimized = each.value["ami_is_eks_optimized"]
- bootstrap_env = each.value["bootstrap_env"]
- kubelet_extra_args = each.value["kubelet_extra_args"]
- pre_userdata = each.value["pre_userdata"]
- capacity_type = lookup(each.value, "capacity_type", "ON_DEMAND")
- append_labels = length(lookup(each.value, "k8s_labels", {})) > 0 ? ",${join(",", [for k, v in lookup(each.value, "k8s_labels", {}) : "${k}=${v}"])}" : ""
- }
- )
- }
-}
-
-# This is based on the LT that EKS would create if no custom one is specified (aws ec2 describe-launch-template-versions --launch-template-id xxx)
-# there are several more options one could set but you probably dont need to modify them
-# you can take the default and add your custom AMI and/or custom tags
-#
-# Trivia: AWS transparently creates a copy of your LaunchTemplate and actually uses that copy then for the node group. If you DONT use a custom AMI,
-# then the default user-data for bootstrapping a cluster is merged in the copy.
-resource "aws_launch_template" "workers" {
- count = var.create && var.create_launch_template ? 1 : 0
-
- name = var.launch_template_use_name_prefix ? null : var.launch_template_name
- name_prefix = var.launch_template_use_name_prefix ? "${var.launch_template_name}-" : null
- description = coalesce(var.description, "EKS Managed Node Group custom LT for ${var.name}")
-
-
- ebs_optimized = var.ebs_optimized
- image_id = var.image_id
- instance_type = var.instance_type
- key_name = var.key_name
- user_data = var.user_data
-
- vpc_security_group_ids = var.vpc_security_group_ids
-
- default_version = var.default_version
- update_default_version = var.update_default_version
- disable_api_termination = var.disable_api_termination
- instance_initiated_shutdown_behavior = var.instance_initiated_shutdown_behavior
- kernel_id = var.kernel_id
- ram_disk_id = var.ram_disk_id
-
- dynamic "block_device_mappings" {
- for_each = var.block_device_mappings
- content {
- device_name = block_device_mappings.value.device_name
- no_device = lookup(block_device_mappings.value, "no_device", null)
- virtual_name = lookup(block_device_mappings.value, "virtual_name", null)
-
- dynamic "ebs" {
- for_each = flatten([lookup(block_device_mappings.value, "ebs", [])])
- content {
- delete_on_termination = lookup(ebs.value, "delete_on_termination", null)
- encrypted = lookup(ebs.value, "encrypted", null)
- kms_key_id = lookup(ebs.value, "kms_key_id", null)
- iops = lookup(ebs.value, "iops", null)
- throughput = lookup(ebs.value, "throughput", null)
- snapshot_id = lookup(ebs.value, "snapshot_id", null)
- volume_size = lookup(ebs.value, "volume_size", null)
- volume_type = lookup(ebs.value, "volume_type", null)
- }
- }
- }
- }
-
- dynamic "capacity_reservation_specification" {
- for_each = var.capacity_reservation_specification != null ? [var.capacity_reservation_specification] : []
- content {
- capacity_reservation_preference = lookup(capacity_reservation_specification.value, "capacity_reservation_preference", null)
-
- dynamic "capacity_reservation_target" {
- for_each = lookup(capacity_reservation_specification.value, "capacity_reservation_target", [])
- content {
- capacity_reservation_id = lookup(capacity_reservation_target.value, "capacity_reservation_id", null)
- }
- }
- }
- }
-
- dynamic "cpu_options" {
- for_each = var.cpu_options != null ? [var.cpu_options] : []
- content {
- core_count = cpu_options.value.core_count
- threads_per_core = cpu_options.value.threads_per_core
- }
- }
-
- dynamic "credit_specification" {
- for_each = var.credit_specification != null ? [var.credit_specification] : []
- content {
- cpu_credits = credit_specification.value.cpu_credits
- }
- }
-
- dynamic "elastic_gpu_specifications" {
- for_each = var.elastic_gpu_specifications != null ? [var.elastic_gpu_specifications] : []
- content {
- type = elastic_gpu_specifications.value.type
- }
- }
-
- dynamic "elastic_inference_accelerator" {
- for_each = var.elastic_inference_accelerator != null ? [var.elastic_inference_accelerator] : []
- content {
- type = elastic_inference_accelerator.value.type
- }
- }
-
- dynamic "enclave_options" {
- for_each = var.enclave_options != null ? [var.enclave_options] : []
- content {
- enabled = enclave_options.value.enabled
- }
- }
-
- dynamic "hibernation_options" {
- for_each = var.hibernation_options != null ? [var.hibernation_options] : []
- content {
- configured = hibernation_options.value.configured
- }
- }
-
- dynamic "iam_instance_profile" {
- for_each = var.iam_instance_profile_name != null || var.iam_instance_profile_arn != null ? [1] : []
- content {
- name = var.iam_instance_profile_name
- arn = var.iam_instance_profile_arn
- }
- }
-
- dynamic "instance_market_options" {
- for_each = var.instance_market_options != null ? [var.instance_market_options] : []
- content {
- market_type = instance_market_options.value.market_type
-
- dynamic "spot_options" {
- for_each = lookup(instance_market_options.value, "spot_options", null) != null ? [instance_market_options.value.spot_options] : []
- content {
- block_duration_minutes = spot_options.value.block_duration_minutes
- instance_interruption_behavior = lookup(spot_options.value, "instance_interruption_behavior", null)
- max_price = lookup(spot_options.value, "max_price", null)
- spot_instance_type = lookup(spot_options.value, "spot_instance_type", null)
- valid_until = lookup(spot_options.value, "valid_until", null)
- }
- }
- }
- }
-
- dynamic "license_specification" {
- for_each = var.license_specifications != null ? [var.license_specifications] : []
- content {
- license_configuration_arn = license_specifications.value.license_configuration_arn
- }
- }
-
- dynamic "metadata_options" {
- for_each = var.metadata_options != null ? [var.metadata_options] : []
- content {
- http_endpoint = lookup(metadata_options.value, "http_endpoint", null)
- http_tokens = lookup(metadata_options.value, "http_tokens", null)
- http_put_response_hop_limit = lookup(metadata_options.value, "http_put_response_hop_limit", null)
- }
- }
-
- dynamic "monitoring" {
- for_each = var.enable_monitoring != null ? [1] : []
- content {
- enabled = var.enable_monitoring
- }
- }
-
- dynamic "network_interfaces" {
- for_each = var.network_interfaces
- content {
- associate_carrier_ip_address = lookup(network_interfaces.value, "associate_carrier_ip_address", null)
- associate_public_ip_address = lookup(network_interfaces.value, "associate_public_ip_address", null)
- delete_on_termination = lookup(network_interfaces.value, "delete_on_termination", null)
- description = lookup(network_interfaces.value, "description", null)
- device_index = lookup(network_interfaces.value, "device_index", null)
- ipv4_addresses = lookup(network_interfaces.value, "ipv4_addresses", null) != null ? network_interfaces.value.ipv4_addresses : []
- ipv4_address_count = lookup(network_interfaces.value, "ipv4_address_count", null)
- ipv6_addresses = lookup(network_interfaces.value, "ipv6_addresses", null) != null ? network_interfaces.value.ipv6_addresses : []
- ipv6_address_count = lookup(network_interfaces.value, "ipv6_address_count", null)
- network_interface_id = lookup(network_interfaces.value, "network_interface_id", null)
- private_ip_address = lookup(network_interfaces.value, "private_ip_address", null)
- security_groups = lookup(network_interfaces.value, "security_groups", null) != null ? network_interfaces.value.security_groups : []
- subnet_id = lookup(network_interfaces.value, "subnet_id", null)
- }
- }
-
- dynamic "placement" {
- for_each = var.placement != null ? [var.placement] : []
- content {
- affinity = lookup(placement.value, "affinity", null)
- availability_zone = lookup(placement.value, "availability_zone", null)
- group_name = lookup(placement.value, "group_name", null)
- host_id = lookup(placement.value, "host_id", null)
- spread_domain = lookup(placement.value, "spread_domain", null)
- tenancy = lookup(placement.value, "tenancy", null)
- partition_number = lookup(placement.value, "partition_number", null)
- }
- }
-
- dynamic "tag_specifications" {
- for_each = toset(["instance", "volume", "network-interface"])
- content {
- resource_type = tag_specifications.key
- tags = merge(var.tags, { Name = var.name })
- }
- }
-
- lifecycle {
- create_before_destroy = true
- }
-
- tags = var.tags
-}
-
-# update_default_version = lookup(each.value, "update_default_version", true)
-
-# block_device_mappings {
-# device_name = "/dev/xvda"
-
-# ebs {
-# volume_size = lookup(each.value, "disk_size", null)
-# volume_type = lookup(each.value, "disk_type", null)
-# iops = lookup(each.value, "disk_iops", null)
-# throughput = lookup(each.value, "disk_throughput", null)
-# encrypted = lookup(each.value, "disk_encrypted", null)
-# kms_key_id = lookup(each.value, "disk_kms_key_id", null)
-# delete_on_termination = true
-# }
-# }
-
-# ebs_optimized = lookup(each.value, "ebs_optimized", !contains(var.ebs_optimized_not_supported, element(each.value.instance_types, 0)))
-
-# instance_type = each.value["set_instance_types_on_lt"] ? element(each.value.instance_types, 0) : null
-
-# monitoring {
-# enabled = lookup(each.value, "enable_monitoring", null)
-# }
-
-# network_interfaces {
-# associate_public_ip_address = lookup(each.value, "public_ip", null)
-# delete_on_termination = lookup(each.value, "eni_delete", null)
-# security_groups = compact(flatten([
-# var.worker_security_group_id,
-# var.worker_additional_security_group_ids,
-# lookup(
-# each.value,
-# "additional_security_group_ids",
-# null,
-# ),
-# ]))
-# }
-
-# # if you want to use a custom AMI
-# image_id = lookup(each.value, "ami_id", null)
-
-# # If you use a custom AMI, you need to supply via user-data, the bootstrap script as EKS DOESNT merge its managed user-data then
-# # you can add more than the minimum code you see in the template, e.g. install SSM agent, see https://github.com/aws/containers-roadmap/issues/593#issuecomment-577181345
-# #
-# # (optionally you can use https://registry.terraform.io/providers/hashicorp/cloudinit/latest/docs/data-sources/cloudinit_config to render the script, example: https://github.com/terraform-aws-modules/terraform-aws-eks/pull/997#issuecomment-705286151)
-
-# user_data = data.cloudinit_config.workers_userdata[each.key].rendered
-
-# key_name = lookup(each.value, "key_name", null)
-
-# metadata_options {
-# http_endpoint = lookup(each.value, "metadata_http_endpoint", null)
-# http_tokens = lookup(each.value, "metadata_http_tokens", null)
-# http_put_response_hop_limit = lookup(each.value, "metadata_http_put_response_hop_limit", null)
-# }
-
-# # Supplying custom tags to EKS instances is another use-case for LaunchTemplates
-# tag_specifications {
-# resource_type = "instance"
-
-# tags = merge(
-# var.tags,
-# {
-# Name = local.node_groups_names[each.key]
-# },
-# lookup(var.node_groups_defaults, "additional_tags", {}),
-# lookup(var.node_groups[each.key], "additional_tags", {})
-# )
-# }
-
-# # Supplying custom tags to EKS instances root volumes is another use-case for LaunchTemplates. (doesnt add tags to dynamically provisioned volumes via PVC tho)
-# tag_specifications {
-# resource_type = "volume"
-
-# tags = merge(
-# var.tags,
-# {
-# Name = local.node_groups_names[each.key]
-# },
-# lookup(var.node_groups_defaults, "additional_tags", {}),
-# lookup(var.node_groups[each.key], "additional_tags", {})
-# )
-# }
-
-# # Supplying custom tags to EKS instances ENI's is another use-case for LaunchTemplates
-# tag_specifications {
-# resource_type = "network-interface"
-
-# tags = merge(
-# var.tags,
-# {
-# Name = local.node_groups_names[each.key]
-# },
-# lookup(var.node_groups_defaults, "additional_tags", {}),
-# lookup(var.node_groups[each.key], "additional_tags", {})
-# )
-# }
-
-# # Tag the LT itself
-# tags = merge(
-# var.tags,
-# lookup(var.node_groups_defaults, "additional_tags", {}),
-# lookup(var.node_groups[each.key], "additional_tags", {}),
-# )
-
-# lifecycle {
-# create_before_destroy = true
-# }
-# }
diff --git a/modules/eks-managed-node-group/locals.tf b/modules/eks-managed-node-group/locals.tf
deleted file mode 100644
index 0a6c7cbffb..0000000000
--- a/modules/eks-managed-node-group/locals.tf
+++ /dev/null
@@ -1,51 +0,0 @@
-locals {
- # Merge defaults and per-group values to make code cleaner
- node_groups_expanded = { for k, v in var.node_groups : k => merge(
- {
- desired_capacity = var.workers_group_defaults["asg_desired_capacity"]
- iam_role_arn = var.default_iam_role_arn
- instance_types = [var.workers_group_defaults["instance_type"]]
- key_name = var.workers_group_defaults["key_name"]
- launch_template_id = var.workers_group_defaults["launch_template_id"]
- launch_template_version = var.workers_group_defaults["launch_template_version"]
- set_instance_types_on_lt = false
- max_capacity = var.workers_group_defaults["asg_max_size"]
- min_capacity = var.workers_group_defaults["asg_min_size"]
- subnets = var.workers_group_defaults["subnets"]
- create_launch_template = false
- bootstrap_env = {}
- kubelet_extra_args = var.workers_group_defaults["kubelet_extra_args"]
- disk_size = var.workers_group_defaults["root_volume_size"]
- disk_type = var.workers_group_defaults["root_volume_type"]
- disk_iops = var.workers_group_defaults["root_iops"]
- disk_throughput = var.workers_group_defaults["root_volume_throughput"]
- disk_encrypted = var.workers_group_defaults["root_encrypted"]
- disk_kms_key_id = var.workers_group_defaults["root_kms_key_id"]
- enable_monitoring = var.workers_group_defaults["enable_monitoring"]
- eni_delete = var.workers_group_defaults["eni_delete"]
- public_ip = var.workers_group_defaults["public_ip"]
- pre_userdata = var.workers_group_defaults["pre_userdata"]
- additional_security_group_ids = var.workers_group_defaults["additional_security_group_ids"]
- taints = []
- timeouts = var.workers_group_defaults["timeouts"]
- update_default_version = true
- ebs_optimized = null
- metadata_http_endpoint = var.workers_group_defaults["metadata_http_endpoint"]
- metadata_http_tokens = var.workers_group_defaults["metadata_http_tokens"]
- metadata_http_put_response_hop_limit = var.workers_group_defaults["metadata_http_put_response_hop_limit"]
- ami_is_eks_optimized = true
- },
- var.node_groups_defaults,
- v,
- ) if var.create_eks }
-
- node_groups_names = { for k, v in local.node_groups_expanded : k => lookup(
- v,
- "name",
- lookup(
- v,
- "name_prefix",
- join("-", [var.cluster_name, k])
- )
- ) }
-}
diff --git a/modules/eks-managed-node-group/main.tf b/modules/eks-managed-node-group/main.tf
index 48687ac9dd..79af0e7649 100644
--- a/modules/eks-managed-node-group/main.tf
+++ b/modules/eks-managed-node-group/main.tf
@@ -1,15 +1,275 @@
+locals {
+ use_custom_launch_template = var.create_launch_template || var.launch_template_name != null
+ policy_arn_prefix = "arn:${data.aws_partition.current.partition}:iam::aws:policy"
+}
+
+data "aws_partition" "current" {}
+
+data "aws_eks_cluster" "this" {
+ count = var.create ? 1 : 0
+
+ name = var.cluster_name
+}
+
+################################################################################
+# User Data
+################################################################################
+
+# https://github.com/aws/containers-roadmap/issues/596#issuecomment-675097667
+# An important note is that user data must in MIME multi-part archive format,
+# as by default, EKS will merge the bootstrapping command required for nodes to join the
+# cluster with your user data. If you use a custom AMI in your launch template,
+# this merging will NOT happen and you are responsible for nodes joining the cluster.
+# See docs for more details -> https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-user-data
+
+# data "cloudinit_config" "custom_ami_user_data" {
+# count = var.create && var.create_launch_template && var.use_custom_ami ? 1 : 0
+
+# gzip = false
+# base64_encode = true
+# boundary = "//"
+
+# part {
+# content_type = "text/x-shellscript"
+# content = templatefile("${path.module}/templates/userdata.sh.tpl",
+# {
+# use_custom_ami = var.use_custom_ami
+# ami_id = var.ami_id
+
+# cluster_name = var.cluster_name
+# cluster_endpoint = var.cluster_endpoint
+# cluster_auth_base64 = var.cluster_auth_base64
+# bootstrap_environment_variables = var.user_data_bootstrap_env_vars
+# kubelet_extra_args = var.kubelet_extra_args
+# user_data_pre_bootstrap = var.user_data_pre_bootstrap
+# user_data_post_bootstrap = var.user_data_post_bootstrap
+# capacity_type = var.capacity_type
+# append_labels = length(var.k8s_labels) > 0 ? ",${join(",", [for k, v in var.k8s_labels : "${k}=${v}"])}" : ""
+# }
+# )
+# }
+# }
+
+################################################################################
+# Launch template
+################################################################################
+
+resource "aws_launch_template" "this" {
+ count = var.create && var.create_launch_template ? 1 : 0
+
+ name = var.launch_template_use_name_prefix ? null : var.launch_template_name
+ name_prefix = var.launch_template_use_name_prefix ? "${var.launch_template_name}-" : null
+ description = coalesce(var.description, "EKS Managed Node Group custom LT for ${var.name}")
+
+ ebs_optimized = var.ebs_optimized
+ image_id = var.ami_id
+ # # Set on node group instead
+ # instance_type = var.launch_template_instance_type
+ key_name = var.key_name
+ user_data = var.user_data
+
+ vpc_security_group_ids = var.vpc_security_group_ids
+
+ default_version = var.default_version
+ update_default_version = var.update_default_version
+ disable_api_termination = var.disable_api_termination
+ instance_initiated_shutdown_behavior = var.instance_initiated_shutdown_behavior
+ kernel_id = var.kernel_id
+ ram_disk_id = var.ram_disk_id
+
+ dynamic "block_device_mappings" {
+ for_each = var.block_device_mappings
+ content {
+ device_name = block_device_mappings.value.device_name
+ no_device = lookup(block_device_mappings.value, "no_device", null)
+ virtual_name = lookup(block_device_mappings.value, "virtual_name", null)
+
+ dynamic "ebs" {
+ for_each = flatten([lookup(block_device_mappings.value, "ebs", [])])
+ content {
+ delete_on_termination = lookup(ebs.value, "delete_on_termination", null)
+ encrypted = lookup(ebs.value, "encrypted", null)
+ kms_key_id = lookup(ebs.value, "kms_key_id", null)
+ iops = lookup(ebs.value, "iops", null)
+ throughput = lookup(ebs.value, "throughput", null)
+ snapshot_id = lookup(ebs.value, "snapshot_id", null)
+ volume_size = lookup(ebs.value, "volume_size", null)
+ volume_type = lookup(ebs.value, "volume_type", null)
+ }
+ }
+ }
+ }
+
+ dynamic "capacity_reservation_specification" {
+ for_each = var.capacity_reservation_specification != null ? [var.capacity_reservation_specification] : []
+ content {
+ capacity_reservation_preference = lookup(capacity_reservation_specification.value, "capacity_reservation_preference", null)
+
+ dynamic "capacity_reservation_target" {
+ for_each = lookup(capacity_reservation_specification.value, "capacity_reservation_target", [])
+ content {
+ capacity_reservation_id = lookup(capacity_reservation_target.value, "capacity_reservation_id", null)
+ }
+ }
+ }
+ }
+
+ dynamic "cpu_options" {
+ for_each = var.cpu_options != null ? [var.cpu_options] : []
+ content {
+ core_count = cpu_options.value.core_count
+ threads_per_core = cpu_options.value.threads_per_core
+ }
+ }
+
+ dynamic "credit_specification" {
+ for_each = var.credit_specification != null ? [var.credit_specification] : []
+ content {
+ cpu_credits = credit_specification.value.cpu_credits
+ }
+ }
+
+ dynamic "elastic_gpu_specifications" {
+ for_each = var.elastic_gpu_specifications != null ? [var.elastic_gpu_specifications] : []
+ content {
+ type = elastic_gpu_specifications.value.type
+ }
+ }
+
+ dynamic "elastic_inference_accelerator" {
+ for_each = var.elastic_inference_accelerator != null ? [var.elastic_inference_accelerator] : []
+ content {
+ type = elastic_inference_accelerator.value.type
+ }
+ }
+
+ dynamic "enclave_options" {
+ for_each = var.enclave_options != null ? [var.enclave_options] : []
+ content {
+ enabled = enclave_options.value.enabled
+ }
+ }
+
+ dynamic "hibernation_options" {
+ for_each = var.hibernation_options != null ? [var.hibernation_options] : []
+ content {
+ configured = hibernation_options.value.configured
+ }
+ }
+
+ # # Set on EKS managed node group, will fail if set here
+ # dynamic "iam_instance_profile" {
+ # for_each = [var.iam_instance_profile]
+ # content {
+ # name = lookup(var.iam_instance_profile, "name", null)
+ # arn = lookup(var.iam_instance_profile, "arn", null)
+ # }
+ # }
+
+ dynamic "instance_market_options" {
+ for_each = var.instance_market_options != null ? [var.instance_market_options] : []
+ content {
+ market_type = instance_market_options.value.market_type
+
+ dynamic "spot_options" {
+ for_each = lookup(instance_market_options.value, "spot_options", null) != null ? [instance_market_options.value.spot_options] : []
+ content {
+ block_duration_minutes = spot_options.value.block_duration_minutes
+ instance_interruption_behavior = lookup(spot_options.value, "instance_interruption_behavior", null)
+ max_price = lookup(spot_options.value, "max_price", null)
+ spot_instance_type = lookup(spot_options.value, "spot_instance_type", null)
+ valid_until = lookup(spot_options.value, "valid_until", null)
+ }
+ }
+ }
+ }
+
+ dynamic "license_specification" {
+ for_each = var.license_specifications != null ? [var.license_specifications] : []
+ content {
+ license_configuration_arn = license_specifications.value.license_configuration_arn
+ }
+ }
+
+ dynamic "metadata_options" {
+ for_each = var.metadata_options != null ? [var.metadata_options] : []
+ content {
+ http_endpoint = lookup(metadata_options.value, "http_endpoint", null)
+ http_tokens = lookup(metadata_options.value, "http_tokens", null)
+ http_put_response_hop_limit = lookup(metadata_options.value, "http_put_response_hop_limit", null)
+ }
+ }
+
+ dynamic "monitoring" {
+ for_each = var.enable_monitoring != null ? [1] : []
+ content {
+ enabled = var.enable_monitoring
+ }
+ }
+
+ dynamic "network_interfaces" {
+ for_each = var.network_interfaces
+ content {
+ associate_carrier_ip_address = lookup(network_interfaces.value, "associate_carrier_ip_address", null)
+ associate_public_ip_address = lookup(network_interfaces.value, "associate_public_ip_address", null)
+ delete_on_termination = lookup(network_interfaces.value, "delete_on_termination", null)
+ description = lookup(network_interfaces.value, "description", null)
+ device_index = lookup(network_interfaces.value, "device_index", null)
+ ipv4_addresses = lookup(network_interfaces.value, "ipv4_addresses", null) != null ? network_interfaces.value.ipv4_addresses : []
+ ipv4_address_count = lookup(network_interfaces.value, "ipv4_address_count", null)
+ ipv6_addresses = lookup(network_interfaces.value, "ipv6_addresses", null) != null ? network_interfaces.value.ipv6_addresses : []
+ ipv6_address_count = lookup(network_interfaces.value, "ipv6_address_count", null)
+ network_interface_id = lookup(network_interfaces.value, "network_interface_id", null)
+ private_ip_address = lookup(network_interfaces.value, "private_ip_address", null)
+ security_groups = lookup(network_interfaces.value, "security_groups", null) != null ? network_interfaces.value.security_groups : []
+ subnet_id = lookup(network_interfaces.value, "subnet_id", null)
+ }
+ }
+
+ dynamic "placement" {
+ for_each = var.placement != null ? [var.placement] : []
+ content {
+ affinity = lookup(placement.value, "affinity", null)
+ availability_zone = lookup(placement.value, "availability_zone", null)
+ group_name = lookup(placement.value, "group_name", null)
+ host_id = lookup(placement.value, "host_id", null)
+ spread_domain = lookup(placement.value, "spread_domain", null)
+ tenancy = lookup(placement.value, "tenancy", null)
+ partition_number = lookup(placement.value, "partition_number", null)
+ }
+ }
+
+ dynamic "tag_specifications" {
+ for_each = toset(["instance", "volume", "network-interface"])
+ content {
+ resource_type = tag_specifications.key
+ tags = merge(var.tags, { Name = var.name })
+ }
+ }
+
+ lifecycle {
+ create_before_destroy = true
+ }
+
+ tags = var.tags
+}
+
+################################################################################
+# EKS Managed Node Group
+################################################################################
+
resource "aws_eks_node_group" "this" {
count = var.create ? 1 : 0
# Required
cluster_name = var.cluster_name
- node_role_arn = var.iam_role_arn
+ node_role_arn = var.create_iam_role ? aws_iam_role.this[0].arn : var.iam_role_arn
subnet_ids = var.subnet_ids
scaling_config {
- desired_size = var.desired_size
- max_size = var.max_size
min_size = var.min_size
+ max_size = var.max_size
+ desired_size = var.desired_size
}
# Optional
@@ -23,27 +283,27 @@ resource "aws_eks_node_group" "this" {
force_update_version = var.force_update_version
instance_types = var.instance_types
labels = var.labels
- version = var.version
+ version = var.cluster_version
dynamic "launch_template" {
- for_each = [var.launch_template]
+ for_each = local.use_custom_launch_template ? [1] : []
content {
- id = lookup(launch_template.value, "id", null)
- name = lookup(launch_template.value, "name", null)
- version = lookup(launch_template.value, "version", "$Default")
+ name = try(aws_launch_template.this[0].name, var.launch_template_name)
+ # Change order to allow users to set version priority before using defaults
+ version = coalesce(var.launch_template_version, try(aws_launch_template.this[0].default_version, "$Default"))
}
}
dynamic "remote_access" {
- for_each = [var.remote_access]
+ for_each = var.remote_access != null ? [var.remote_access] : []
content {
- ec2_ssh_key = lookup(remote_access.value, "ec2_ssh_key")
- source_security_group_ids = lookup(remote_access.value, "source_security_group_ids")
+ ec2_ssh_key = lookup(remote_access.value, "ec2_ssh_key", null)
+ source_security_group_ids = lookup(remote_access.value, "source_security_group_ids", [])
}
}
dynamic "taint" {
- for_each = var.taints
+ for_each = var.taints != null ? var.taints : {}
content {
key = taint.value.key
value = lookup(taint.value, "value")
@@ -52,17 +312,20 @@ resource "aws_eks_node_group" "this" {
}
dynamic "update_config" {
- for_each = [var.update_config]
+ for_each = var.update_config != null ? [var.update_config] : []
content {
max_unavailable_percentage = lookup(update_config.value, "max_unavailable_percentage", null)
max_unavailable = lookup(update_config.value, "max_unavailable", null)
}
}
- timeouts {
- create = lookup(var.timeouts, "create", null)
- update = lookup(var.timeouts, "update", null)
- delete = lookup(var.timeouts, "delete", null)
+ dynamic "timeouts" {
+ for_each = [var.timeouts]
+ content {
+ create = lookup(var.timeouts, "create", null)
+ update = lookup(var.timeouts, "update", null)
+ delete = lookup(var.timeouts, "delete", null)
+ }
}
lifecycle {
@@ -72,5 +335,55 @@ resource "aws_eks_node_group" "this" {
]
}
- tags = var.tags
+ tags = merge(
+ var.tags,
+ { Name = var.name }
+ )
+}
+
+################################################################################
+# IAM Role
+################################################################################
+
+locals {
+ iam_role_name = coalesce(var.iam_role_name, "${var.cluster_name}-worker")
+}
+
+data "aws_iam_policy_document" "assume_role_policy" {
+ count = var.create && var.create_iam_role ? 1 : 0
+
+ statement {
+ sid = "EKSWorkerAssumeRole"
+ actions = ["sts:AssumeRole"]
+
+ principals {
+ type = "Service"
+ identifiers = ["ec2.${data.aws_partition.current.dns_suffix}"]
+ }
+ }
+}
+
+resource "aws_iam_role" "this" {
+ count = var.create && var.create_iam_role ? 1 : 0
+
+ name = var.iam_role_use_name_prefix ? null : local.iam_role_name
+ name_prefix = var.iam_role_use_name_prefix ? try("${local.iam_role_name}-", local.iam_role_name) : null
+ path = var.iam_role_path
+
+ assume_role_policy = data.aws_iam_policy_document.assume_role_policy[0].json
+ permissions_boundary = var.iam_role_permissions_boundary
+ force_detach_policies = true
+
+ tags = merge(var.tags, var.iam_role_tags)
+}
+
+resource "aws_iam_role_policy_attachment" "this" {
+ for_each = var.create && var.create_iam_role ? toset(compact(distinct(concat([
+ "${local.policy_arn_prefix}/AmazonEKSWorkerNodePolicy",
+ "${local.policy_arn_prefix}/AmazonEC2ContainerRegistryReadOnly",
+ var.iam_role_attach_cni_policy ? "${local.policy_arn_prefix}/AmazonEKS_CNI_Policy" : "",
+ ], var.iam_role_additional_policies)))) : toset([])
+
+ policy_arn = each.value
+ role = aws_iam_role.this[0].name
}
diff --git a/modules/eks-managed-node-group/outputs.tf b/modules/eks-managed-node-group/outputs.tf
index f41699ac21..ab3b051646 100644
--- a/modules/eks-managed-node-group/outputs.tf
+++ b/modules/eks-managed-node-group/outputs.tf
@@ -1,4 +1,4 @@
-output "node_groups" {
- description = "Outputs from EKS node groups. Map of maps, keyed by `var.node_groups` keys. See `aws_eks_node_group` Terraform documentation for values"
- value = aws_eks_node_group.workers
-}
+# output "node_groups" {
+# description = "Outputs from EKS node groups. Map of maps, keyed by `var.node_groups` keys. See `aws_eks_node_group` Terraform documentation for values"
+# value = aws_eks_node_group.this
+# }
diff --git a/modules/eks-managed-node-group/templates/default.sh.tpl b/modules/eks-managed-node-group/templates/default.sh.tpl
new file mode 100644
index 0000000000..5d664e8e5b
--- /dev/null
+++ b/modules/eks-managed-node-group/templates/default.sh.tpl
@@ -0,0 +1,49 @@
+### Default user data
+MIME-Version: 1.0
+Content-Type: multipart/mixed; boundary="//"
+
+--//
+Content-Type: text/x-shellscript; charset="us-ascii"
+#!/bin/bash
+set -ex
+B64_CLUSTER_CA=xxx
+API_SERVER_URL=xxx
+K8S_CLUSTER_DNS_IP=172.20.0.10
+/etc/eks/bootstrap.sh --kubelet-extra-args '--node-labels=eks.amazonaws.com/nodegroup-image=ami-0caf35bc73450c396,eks.amazonaws.com/capacityType=ON_DEMAND,eks.amazonaws.com/nodegroup=default_node_group' --b64-cluster-ca $B64_CLUSTER_CA --apiserver-endpoint $API_SERVER_URL --dns-cluster-ip $K8S_CLUSTER_DNS_IP
+
+--//--
+
+
+### Custom launch template with user added user data
+MIME-Version: 1.0
+Content-Type: multipart/mixed; boundary="//"
+
+--//
+Content-Transfer-Encoding: 7bit
+Content-Type: text/x-shellscript
+Mime-Version: 1.0
+
+echo 'hello world!'
+--//
+Content-Type: text/x-shellscript; charset="us-ascii"
+#!/bin/bash
+set -ex
+B64_CLUSTER_CA=xxx
+API_SERVER_URL=xxx
+K8S_CLUSTER_DNS_IP=172.20.0.10
+/etc/eks/bootstrap.sh --kubelet-extra-args '--node-labels=eks.amazonaws.com/sourceLaunchTemplateVersion=1,eks.amazonaws.com/nodegroup-image=ami-0caf35bc73450c396,eks.amazonaws.com/capacityType=ON_DEMAND,eks.amazonaws.com/nodegroup=create_launch_template,eks.amazonaws.com/sourceLaunchTemplateId=lt-003a9022005aa0062' --b64-cluster-ca $B64_CLUSTER_CA --apiserver-endpoint $API_SERVER_URL --dns-cluster-ip $K8S_CLUSTER_DNS_IP
+
+
+--//--
+
+### Custom AMI - even when using EKS AMI
+Content-Type: multipart/mixed; boundary="//"
+MIME-Version: 1.0
+
+--//
+Content-Transfer-Encoding: 7bit
+Content-Type: text/x-shellscript
+Mime-Version: 1.0
+
+echo 'hello world!'
+--//--
diff --git a/modules/eks-managed-node-group/templates/userdata.sh.tpl b/modules/eks-managed-node-group/templates/userdata.sh.tpl
index 321c17b427..e9a9e17c37 100644
--- a/modules/eks-managed-node-group/templates/userdata.sh.tpl
+++ b/modules/eks-managed-node-group/templates/userdata.sh.tpl
@@ -1,9 +1,9 @@
#!/bin/bash -e
%{ if length(ami_id) == 0 ~}
-
-# Set bootstrap env
+# Inject custom environment variables and kubelet args into bootstrap process
+# for both EKS optimized/managed AMI or custom EKS AMI
printf '#!/bin/bash
-%{ for k, v in bootstrap_env ~}
+%{ for k, v in bootstrap_environment_variables ~}
export ${k}="${v}"
%{ endfor ~}
export ADDITIONAL_KUBELET_EXTRA_ARGS="${kubelet_extra_args}"
@@ -16,19 +16,27 @@ sed -i '/^set -o errexit/a\\nsource /etc/profile.d/eks-bootstrap-env.sh' /etc/ek
sed -i 's/^KUBELET_EXTRA_ARGS="$${KUBELET_EXTRA_ARGS:-}/KUBELET_EXTRA_ARGS="$${KUBELET_EXTRA_ARGS:-} $${ADDITIONAL_KUBELET_EXTRA_ARGS}/' /etc/eks/bootstrap.sh
%{else ~}
-# Set variables for custom AMI
-API_SERVER_URL=${cluster_endpoint}
-B64_CLUSTER_CA=${cluster_auth_base64}
-%{ for k, v in bootstrap_env ~}
+# Set additional boostrap environment variables for custom AMI
+%{ for k, v in bootstrap_environment_variables ~}
${k}="${v}"
%{ endfor ~}
KUBELET_EXTRA_ARGS='--node-labels=eks.amazonaws.com/nodegroup-image=${ami_id},eks.amazonaws.com/capacityType=${capacity_type}${append_labels} ${kubelet_extra_args}'
%{endif ~}
-# User supplied pre userdata
-${pre_userdata}
+
+
+
%{ if length(ami_id) > 0 && ami_is_eks_optimized ~}
+# Custom AMI bootstrap configurations
+
+${user_data_pre_bootstrap}
+
+# Set required environment variables for custom AMI
+API_SERVER_URL=${cluster_endpoint}
+B64_CLUSTER_CA=${cluster_auth_base64}
# Call bootstrap for EKS optimised custom AMI
/etc/eks/bootstrap.sh ${cluster_name} --apiserver-endpoint "$${API_SERVER_URL}" --b64-cluster-ca "$${B64_CLUSTER_CA}" --kubelet-extra-args "$${KUBELET_EXTRA_ARGS}"
%{ endif ~}
+
+${user_data_post_bootstrap}
diff --git a/modules/eks-managed-node-group/variables.tf b/modules/eks-managed-node-group/variables.tf
index 1aa8cfe26d..3c31086bc4 100644
--- a/modules/eks-managed-node-group/variables.tf
+++ b/modules/eks-managed-node-group/variables.tf
@@ -1,71 +1,393 @@
-variable "create_eks" {
- description = "Controls if EKS resources should be created (it affects almost all resources)"
+variable "create" {
+ description = "Determines whether to create EKS managed node group or not"
type = bool
default = true
}
-variable "cluster_name" {
- description = "Name of parent cluster"
- type = string
- default = ""
+variable "tags" {
+ description = "A map of tags to add to all resources"
+ type = map(string)
+ default = {}
}
+################################################################################
+# User Data
+################################################################################
+
variable "cluster_endpoint" {
- description = "Endpoint of parent cluster"
+ description = "Endpoint of associated EKS cluster"
type = string
default = ""
}
variable "cluster_auth_base64" {
- description = "Base64 encoded CA of parent cluster"
+ description = "Base64 encoded CA of associated EKS cluster"
type = string
default = ""
}
-variable "default_iam_role_arn" {
- description = "ARN of the default IAM worker role to use if one is not specified in `var.node_groups` or `var.node_groups_defaults`"
+################################################################################
+# Launch template
+################################################################################
+
+variable "create_launch_template" {
+ description = "Determines whether to create launch template or not"
+ type = bool
+ default = false
+}
+
+variable "launch_template_name" {
+ description = "Launch template name - either to be created (`var.create_launch_template` = `true`) or existing (`var.create_launch_template` = `false`)"
type = string
- default = ""
+ default = null
}
-variable "workers_group_defaults" {
- description = "Workers group defaults from parent"
- type = any
- default = {}
+variable "launch_template_use_name_prefix" {
+ description = "Determines whether to use `launch_template_name` as is or create a unique name beginning with the `launch_template_name` as the prefix"
+ type = bool
+ default = true
}
-variable "worker_security_group_id" {
- description = "If provided, all workers will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the EKS cluster."
+variable "description" {
+ description = "Description of the launch template"
type = string
- default = ""
+ default = null
}
-variable "worker_additional_security_group_ids" {
- description = "A list of additional security group ids to attach to worker instances"
+variable "ebs_optimized" {
+ description = "If true, the launched EC2 instance will be EBS-optimized"
+ type = bool
+ default = null
+}
+
+variable "ami_id" {
+ description = "The AMI from which to launch the instance. If not supplied, EKS will use its own default image"
+ type = string
+ default = null
+}
+
+variable "key_name" {
+ description = "The key name that should be used for the instance"
+ type = string
+ default = null
+}
+
+variable "user_data" {
+ description = "The Base64-encoded user data to provide when launching the instance"
+ type = string
+ default = null
+}
+
+variable "vpc_security_group_ids" {
+ description = "A list of security group IDs to associate"
type = list(string)
+ default = null
+}
+
+variable "default_version" {
+ description = "Default Version of the launch template"
+ type = string
+ default = null
+}
+
+variable "update_default_version" {
+ description = "Whether to update Default Version each update. Conflicts with `default_version`"
+ type = bool
+ default = true
+}
+
+variable "disable_api_termination" {
+ description = "If true, enables EC2 instance termination protection"
+ type = bool
+ default = null
+}
+
+variable "instance_initiated_shutdown_behavior" {
+ description = "Shutdown behavior for the instance. Can be `stop` or `terminate`. (Default: `stop`)"
+ type = string
+ default = null
+}
+
+variable "kernel_id" {
+ description = "The kernel ID"
+ type = string
+ default = null
+}
+
+variable "ram_disk_id" {
+ description = "The ID of the ram disk"
+ type = string
+ default = null
+}
+
+variable "block_device_mappings" {
+ description = "Specify volumes to attach to the instance besides the volumes specified by the AMI"
+ type = list(any)
default = []
}
-variable "tags" {
- description = "A map of tags to add to all resources"
+variable "capacity_reservation_specification" {
+ description = "Targeting for EC2 capacity reservations"
+ type = any
+ default = null
+}
+
+variable "cpu_options" {
+ description = "The CPU options for the instance"
type = map(string)
- default = {}
+ default = null
}
-variable "node_groups_defaults" {
- description = "map of maps of node groups to create. See \"`node_groups` and `node_groups_defaults` keys\" section in README.md for more details"
- type = any
- default = {}
+variable "credit_specification" {
+ description = "Customize the credit specification of the instance"
+ type = map(string)
+ default = null
+}
+
+variable "elastic_gpu_specifications" {
+ description = "The elastic GPU to attach to the instance"
+ type = map(string)
+ default = null
+}
+
+variable "elastic_inference_accelerator" {
+ description = "Configuration block containing an Elastic Inference Accelerator to attach to the instance"
+ type = map(string)
+ default = null
+}
+
+variable "enclave_options" {
+ description = "Enable Nitro Enclaves on launched instances"
+ type = map(string)
+ default = null
+}
+
+variable "hibernation_options" {
+ description = "The hibernation options for the instance"
+ type = map(string)
+ default = null
}
-variable "node_groups" {
- description = "Map of maps of `eks_node_groups` to create. See \"`node_groups` and `node_groups_defaults` keys\" section in README.md for more details"
+variable "instance_market_options" {
+ description = "The market (purchasing) option for the instance"
type = any
+ default = null
+}
+
+variable "license_specifications" {
+ description = "A list of license specifications to associate with"
+ type = map(string)
+ default = null
+}
+
+variable "metadata_options" {
+ description = "Customize the metadata options for the instance"
+ type = map(string)
+ default = null
+}
+
+variable "enable_monitoring" {
+ description = "Enables/disables detailed monitoring"
+ type = bool
+ default = null
+}
+
+variable "network_interfaces" {
+ description = "Customize network interfaces to be attached at instance boot time"
+ type = list(any)
+ default = []
+}
+
+variable "placement" {
+ description = "The placement of the instance"
+ type = map(string)
+ default = null
+}
+
+variable "tag_specifications" {
+ description = "The tags to apply to the resources during launch"
+ type = list(any)
+ default = []
+}
+
+################################################################################
+# EKS Managed Node Group
+################################################################################
+
+variable "cluster_name" {
+ description = "Name of associated EKS cluster"
+ type = string
+ default = null
+}
+
+variable "iam_role_arn" {
+ description = "Amazon Resource Name (ARN) of the IAM Role that provides permissions for the EKS Node Group"
+ type = string
+ default = null
+}
+
+variable "subnet_ids" {
+ description = "Identifiers of EC2 Subnets to associate with the EKS Node Group. These subnets must have the following resource tag: `kubernetes.io/cluster/CLUSTER_NAME`"
+ type = list(string)
+ default = null
+}
+
+variable "min_size" {
+ description = "Minimum number of worker nodes"
+ type = number
+ default = 0
+}
+
+variable "max_size" {
+ description = "Maximum number of worker nodes"
+ type = number
+ default = 3
+}
+
+variable "desired_size" {
+ description = "Desired number of worker nodes"
+ type = number
+ default = 1
+}
+
+variable "name" {
+ description = "Name of the EKS Node Group"
+ type = string
+ default = null
+}
+
+variable "use_name_prefix" {
+ description = "Determines whether to use `name` as is or create a unique name beginning with the `name` as the prefix"
+ type = bool
+ default = true
+}
+
+variable "ami_type" {
+ description = "Type of Amazon Machine Image (AMI) associated with the EKS Node Group. Valid values are `AL2_x86_64`, `AL2_x86_64_GPU`, `AL2_ARM_64`, `CUSTOM`, `BOTTLEROCKET_ARM_64`, `BOTTLEROCKET_x86_64`"
+ type = string
+ default = null
+}
+
+variable "ami_release_version" {
+ description = "AMI version of the EKS Node Group. Defaults to latest version for Kubernetes version"
+ type = string
+ default = null
+}
+
+variable "capacity_type" {
+ description = "Type of capacity associated with the EKS Node Group. Valid values: `ON_DEMAND`, `SPOT`"
+ type = string
+ default = null
+}
+
+variable "disk_size" {
+ description = "Disk size in GiB for worker nodes. Defaults to `20`"
+ type = number
+ default = null
+}
+
+variable "force_update_version" {
+ description = "Force version update if existing pods are unable to be drained due to a pod disruption budget issue"
+ type = bool
+ default = null
+}
+
+variable "instance_types" {
+ description = "Set of instance types associated with the EKS Node Group. Defaults to `[\"t3.medium\"]`"
+ type = list(string)
+ default = null
+}
+
+variable "labels" {
+ description = "Key-value map of Kubernetes labels. Only labels that are applied with the EKS API are managed by this argument. Other Kubernetes labels applied to the EKS Node Group will not be managed"
+ type = map(string)
+ default = null
+}
+
+variable "cluster_version" {
+ description = "Kubernetes version. Defaults to EKS Cluster Kubernetes version"
+ type = string
+ default = null
+}
+
+variable "launch_template_version" {
+ description = "Launch template version number. The default is `$Default`"
+ type = string
+ default = null
+}
+
+variable "remote_access" {
+ description = "Configuration block with remote access settings"
+ type = map(string)
+ default = null
+}
+
+variable "taints" {
+ description = "The Kubernetes taints to be applied to the nodes in the node group. Maximum of 50 taints per node group"
+ type = map(string)
+ default = null
+}
+
+variable "update_config" {
+ description = "Configuration block of settings for max unavailable resources during node group updates"
+ type = map(string)
+ default = null
+}
+
+variable "timeouts" {
+ description = "Create, update, and delete timeout configurations for the node group"
+ type = map(string)
default = {}
}
-variable "ebs_optimized_not_supported" {
- description = "List of instance types that do not support EBS optimization"
+################################################################################
+# IAM Role
+################################################################################
+
+variable "create_iam_role" {
+ description = "Determines whether an IAM role is created or to use an existing IAM role"
+ type = bool
+ default = true
+}
+
+variable "iam_role_name" {
+ description = "Name to use on IAM role created"
+ type = string
+ default = null
+}
+
+variable "iam_role_use_name_prefix" {
+ description = "Determines whether the IAM role name (`iam_role_name`) is used as a prefix"
+ type = string
+ default = true
+}
+
+variable "iam_role_path" {
+ description = "IAM role path"
+ type = string
+ default = null
+}
+
+variable "iam_role_permissions_boundary" {
+ description = "ARN of the policy that is used to set the permissions boundary for the IAM role"
+ type = string
+ default = null
+}
+
+variable "iam_role_attach_cni_policy" {
+ description = "Whether to attach the Amazon managed `AmazonEKS_CNI_Policy` IAM policy to the IAM IAM role. WARNING: If set `false` the permissions must be assigned to the `aws-node` DaemonSet pods via another method or nodes will not be able to join the cluster"
+ type = bool
+ default = true
+}
+
+variable "iam_role_additional_policies" {
+ description = "Additional policies to be added to the IAM role"
type = list(string)
default = []
}
+
+variable "iam_role_tags" {
+ description = "A map of additional tags to add to the IAM role created"
+ type = map(string)
+ default = {}
+}
diff --git a/modules/self-managed-node-group/README.md b/modules/self-managed-node-group/README.md
index 6a076eb6df..423253dbde 100644
--- a/modules/self-managed-node-group/README.md
+++ b/modules/self-managed-node-group/README.md
@@ -66,7 +66,7 @@ No modules.
| [instance\_type](#input\_instance\_type) | The type of the instance to launch | `string` | `""` | no |
| [kernel\_id](#input\_kernel\_id) | The kernel ID | `string` | `null` | no |
| [key\_name](#input\_key\_name) | The key name that should be used for the instance | `string` | `null` | no |
-| [launch\_template\_name](#input\_launch\_template\_name) | Launch template name - either to be created (`var.create_launch_template` = `true`) or existing (`var.create_launch_template` = `false`) | `string` | n/a | yes |
+| [launch\_template\_name](#input\_launch\_template\_name) | Launch template name - either to be created (`var.create_launch_template` = `true`) or existing (`var.create_launch_template` = `false`) | `string` | `null` | no |
| [launch\_template\_use\_name\_prefix](#input\_launch\_template\_use\_name\_prefix) | Determines whether to use `launch_template_name` as is or create a unique name beginning with the `launch_template_name` as the prefix | `bool` | `true` | no |
| [launch\_template\_version](#input\_launch\_template\_version) | Launch template version. Can be version number, `$Latest`, or `$Default` | `string` | `null` | no |
| [license\_specifications](#input\_license\_specifications) | A list of license specifications to associate with | `map(string)` | `null` | no |
diff --git a/modules/self-managed-node-group/variables.tf b/modules/self-managed-node-group/variables.tf
index 31511d5e48..a5bf2e61b2 100644
--- a/modules/self-managed-node-group/variables.tf
+++ b/modules/self-managed-node-group/variables.tf
@@ -22,6 +22,7 @@ variable "use_name_prefix" {
variable "launch_template_name" {
description = "Launch template name - either to be created (`var.create_launch_template` = `true`) or existing (`var.create_launch_template` = `false`)"
type = string
+ default = null
}
variable "launch_template_version" {
diff --git a/templates/kubeconfig.tpl b/templates/kubeconfig.tpl
deleted file mode 100644
index 5004243bec..0000000000
--- a/templates/kubeconfig.tpl
+++ /dev/null
@@ -1,38 +0,0 @@
-apiVersion: v1
-preferences: {}
-kind: Config
-
-clusters:
-- cluster:
- server: ${endpoint}
- certificate-authority-data: ${cluster_auth_base64}
- name: ${kubeconfig_name}
-
-contexts:
-- context:
- cluster: ${kubeconfig_name}
- user: ${kubeconfig_name}
- name: ${kubeconfig_name}
-
-current-context: ${kubeconfig_name}
-
-users:
-- name: ${kubeconfig_name}
- user:
- exec:
- apiVersion: ${aws_authenticator_kubeconfig_apiversion}
- command: ${aws_authenticator_command}
- args:
-%{~ for i in aws_authenticator_command_args }
- - "${i}"
-%{~ endfor ~}
-%{ for i in aws_authenticator_additional_args }
- - ${i}
-%{~ endfor ~}
-%{ if length(aws_authenticator_env_variables) > 0 }
- env:
- %{~ for k, v in aws_authenticator_env_variables ~}
- - name: ${k}
- value: ${v}
- %{~ endfor ~}
-%{ endif }
diff --git a/variables.tf b/variables.tf
index 3ed6dcfa46..8967553e67 100644
--- a/variables.tf
+++ b/variables.tf
@@ -328,7 +328,15 @@ variable "self_managed_node_groups" {
default = {}
}
+################################################################################
+# EKS Managed Node Group
+################################################################################
+variable "eks_managed_node_groups" {
+ description = "Map of EKS managed node group definitions to create"
+ type = any
+ default = {}
+}
diff --git a/workers.tf b/workers.tf
index 59cd9c16da..6a97959441 100644
--- a/workers.tf
+++ b/workers.tf
@@ -24,30 +24,87 @@ module "fargate" {
# EKS Managed Node Group
################################################################################
-# module "eks_managed_node_groups" {
-# source = "./modules/eks-managed-node-group"
+module "eks_managed_node_groups" {
+ source = "./modules/eks-managed-node-group"
-# create_eks = var.create_eks
+ for_each = var.create ? var.eks_managed_node_groups : {}
-# cluster_name = local.cluster_name
-# cluster_endpoint = local.cluster_endpoint
-# cluster_auth_base64 = local.cluster_auth_base64
+ cluster_name = aws_eks_cluster.this[0].name
+
+ # EKS Managed Node Group
+ name = try(each.value.name, each.key)
+ use_name_prefix = try(each.value.use_name_prefix, false)
+
+ subnet_ids = try(each.value.subnet_ids, var.subnet_ids)
+
+ min_size = try(each.value.min_size, 1)
+ max_size = try(each.value.max_size, 3)
+ desired_size = try(each.value.desired_size, 1)
-# default_iam_role_arn = coalescelist(aws_iam_role.workers[*].arn, [""])[0]
-# ebs_optimized_not_supported = local.ebs_optimized_not_supported
-# workers_group_defaults = local.workers_group_defaults
-# worker_security_group_id = local.worker_security_group_id
-# worker_additional_security_group_ids = var.worker_additional_security_group_ids
+ ami_id = try(each.value.ami_id, null)
+ ami_type = try(each.value.ami_type, null)
+ ami_release_version = try(each.value.ami_release_version, null)
-# node_groups_defaults = var.node_groups_defaults
-# node_groups = var.node_groups
+ capacity_type = try(each.value.capacity_type, null)
+ disk_size = try(each.value.disk_size, null)
+ force_update_version = try(each.value.force_update_version, null)
+ instance_types = try(each.value.instance_types, null)
+ labels = try(each.value.labels, null)
+ cluster_version = try(each.value.cluster_version, null)
-# tags = var.tags
+ remote_access = try(each.value.remote_access, null)
+ taints = try(each.value.taints, null)
+ update_config = try(each.value.update_config, null)
+ timeouts = try(each.value.timeouts, {})
-# depends_on = [
-# aws_eks_cluster.this,
-# ]
-# }
+ # Launch Template
+ create_launch_template = try(each.value.create_launch_template, false)
+ launch_template_name = try(each.value.launch_template_name, null)
+ launch_template_use_name_prefix = try(each.value.launch_template_use_name_prefix, true)
+ launch_template_version = try(each.value.launch_template_version, null)
+ description = try(each.value.description, null)
+
+ ebs_optimized = try(each.value.ebs_optimized, null)
+ key_name = try(each.value.key_name, null)
+ user_data = try(each.value.user_data, null)
+
+ vpc_security_group_ids = try(each.value.vpc_security_group_ids, null)
+
+ default_version = try(each.value.default_version, null)
+ update_default_version = try(each.value.update_default_version, null)
+ disable_api_termination = try(each.value.disable_api_termination, null)
+ instance_initiated_shutdown_behavior = try(each.value.instance_initiated_shutdown_behavior, null)
+ kernel_id = try(each.value.kernel_id, null)
+ ram_disk_id = try(each.value.ram_disk_id, null)
+
+ block_device_mappings = try(each.value.block_device_mappings, [])
+ capacity_reservation_specification = try(each.value.capacity_reservation_specification, null)
+ cpu_options = try(each.value.cpu_options, null)
+ credit_specification = try(each.value.credit_specification, null)
+ elastic_gpu_specifications = try(each.value.elastic_gpu_specifications, null)
+ elastic_inference_accelerator = try(each.value.elastic_inference_accelerator, null)
+ enclave_options = try(each.value.enclave_options, null)
+ hibernation_options = try(each.value.hibernation_options, null)
+ instance_market_options = try(each.value.instance_market_options, null)
+ license_specifications = try(each.value.license_specifications, null)
+ metadata_options = try(each.value.metadata_options, null)
+ enable_monitoring = try(each.value.enable_monitoring, null)
+ network_interfaces = try(each.value.network_interfaces, [])
+ placement = try(each.value.placement, null)
+
+ # IAM role
+ create_iam_role = try(each.value.create_iam_role, true)
+ iam_role_arn = try(each.value.iam_role_arn, null)
+ iam_role_name = try(each.value.iam_role_name, null)
+ iam_role_use_name_prefix = try(each.value.iam_role_use_name_prefix, true)
+ iam_role_path = try(each.value.iam_role_path, null)
+ iam_role_permissions_boundary = try(each.value.iam_role_permissions_boundary, null)
+ iam_role_tags = try(each.value.iam_role_tags, {})
+ iam_role_attach_cni_policy = try(each.value.iam_role_attach_cni_policy, true)
+ iam_role_additional_policies = try(each.value.iam_role_additional_policies, [])
+
+ tags = var.tags
+}
################################################################################
# Self Managed Node Group
@@ -58,20 +115,20 @@ module "self_managed_node_group" {
for_each = var.create ? var.self_managed_node_groups : {}
- cluster_name = var.cluster_name
+ cluster_name = aws_eks_cluster.this[0].name
# Autoscaling Group
- name = try(each.value.name, var.cluster_name)
+ name = try(each.value.name, each.key)
use_name_prefix = try(each.value.use_name_prefix, false)
- launch_template_name = try(each.value.launch_template_name, var.cluster_name)
+ launch_template_name = try(each.value.launch_template_name, each.key)
launch_template_version = try(each.value.launch_template_version, null)
availability_zones = try(each.value.availability_zones, null)
subnet_ids = try(each.value.subnet_ids, var.subnet_ids)
min_size = try(each.value.min_size, 0)
max_size = try(each.value.max_size, 0)
- desired_capacity = try(each.value.desired_capacity, 0)
+ desired_capacity = try(each.value.desired_size, 0) # to be consisted with EKS MNG
capacity_rebalance = try(each.value.capacity_rebalance, null)
min_elb_capacity = try(each.value.min_elb_capacity, null)
wait_for_elb_capacity = try(each.value.wait_for_elb_capacity, null)
From e5cd0f35dfb9a90c1945084e62adfcfc1d7abb8b Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Thu, 11 Nov 2021 17:50:57 -0500
Subject: [PATCH 16/83] chore: updating user data
---
examples/eks_managed_node_group/main.tf | 6 +-
modules/eks-managed-node-group/README.md | 18 +++--
modules/eks-managed-node-group/main.tf | 76 ++++++++++---------
.../templates/userdata.sh.tpl | 42 ----------
modules/eks-managed-node-group/variables.tf | 58 ++++++++++++--
.../templates => templates}/default.sh.tpl | 9 ++-
templates/linux_user_data.sh.tpl | 13 ++++
templates/userdata.sh.tpl | 10 ---
...data_windows.tpl => windows_user_data.tpl} | 0
workers.tf | 15 +++-
10 files changed, 142 insertions(+), 105 deletions(-)
delete mode 100644 modules/eks-managed-node-group/templates/userdata.sh.tpl
rename {modules/eks-managed-node-group/templates => templates}/default.sh.tpl (79%)
create mode 100644 templates/linux_user_data.sh.tpl
delete mode 100644 templates/userdata.sh.tpl
rename templates/{userdata_windows.tpl => windows_user_data.tpl} (100%)
diff --git a/examples/eks_managed_node_group/main.tf b/examples/eks_managed_node_group/main.tf
index c08515e990..e9f860568f 100644
--- a/examples/eks_managed_node_group/main.tf
+++ b/examples/eks_managed_node_group/main.tf
@@ -44,9 +44,9 @@ module "eks" {
eks_managed_node_groups = {
# default_node_group = {}
# create_launch_template = {
- # create_launch_template = true
- # launch_template_name = "create-launch-template"
- # user_data = data.cloudinit_config.custom.rendered
+ # create_launch_template = true
+ # launch_template_name = "create-launch-template"
+ # pre_bootstrap_user_data = "echo 'hello world!'"
# }
# custom_ami = {
# create_launch_template = true
diff --git a/modules/eks-managed-node-group/README.md b/modules/eks-managed-node-group/README.md
index 13e2abdf6f..bd3428d1bd 100644
--- a/modules/eks-managed-node-group/README.md
+++ b/modules/eks-managed-node-group/README.md
@@ -102,6 +102,7 @@ The role ARN specified in `var.default_iam_role_arn` will be used by default. In
| Name | Version |
|------|---------|
| [aws](#provider\_aws) | >= 3.56.0 |
+| [cloudinit](#provider\_cloudinit) | >= 2.0 |
## Modules
@@ -115,9 +116,9 @@ No modules.
| [aws_iam_role.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
| [aws_iam_role_policy_attachment.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
| [aws_launch_template.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource |
-| [aws_eks_cluster.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source |
| [aws_iam_policy_document.assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
| [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source |
+| [cloudinit_config.eks_optimized_ami_user_data](https://registry.terraform.io/providers/hashicorp/cloudinit/latest/docs/data-sources/config) | data source |
## Inputs
@@ -127,10 +128,12 @@ No modules.
| [ami\_release\_version](#input\_ami\_release\_version) | AMI version of the EKS Node Group. Defaults to latest version for Kubernetes version | `string` | `null` | no |
| [ami\_type](#input\_ami\_type) | Type of Amazon Machine Image (AMI) associated with the EKS Node Group. Valid values are `AL2_x86_64`, `AL2_x86_64_GPU`, `AL2_ARM_64`, `CUSTOM`, `BOTTLEROCKET_ARM_64`, `BOTTLEROCKET_x86_64` | `string` | `null` | no |
| [block\_device\_mappings](#input\_block\_device\_mappings) | Specify volumes to attach to the instance besides the volumes specified by the AMI | `list(any)` | `[]` | no |
+| [bootstrap\_extra\_args](#input\_bootstrap\_extra\_args) | Additional arguments passed to the bootstrap script | `string` | `""` | no |
| [capacity\_reservation\_specification](#input\_capacity\_reservation\_specification) | Targeting for EC2 capacity reservations | `any` | `null` | no |
-| [capacity\_type](#input\_capacity\_type) | Type of capacity associated with the EKS Node Group. Valid values: `ON_DEMAND`, `SPOT` | `string` | `null` | no |
-| [cluster\_auth\_base64](#input\_cluster\_auth\_base64) | Base64 encoded CA of associated EKS cluster | `string` | `""` | no |
-| [cluster\_endpoint](#input\_cluster\_endpoint) | Endpoint of associated EKS cluster | `string` | `""` | no |
+| [capacity\_type](#input\_capacity\_type) | Type of capacity associated with the EKS Node Group. Valid values: `ON_DEMAND`, `SPOT` | `string` | `"ON_DEMAND"` | no |
+| [cluster\_auth\_base64](#input\_cluster\_auth\_base64) | Base64 encoded CA of associated EKS cluster | `string` | `null` | no |
+| [cluster\_dns\_ip](#input\_cluster\_dns\_ip) | The CIDR block that the EKS cluster provides service IP addresses from | `string` | `""` | no |
+| [cluster\_endpoint](#input\_cluster\_endpoint) | Endpoint of associated EKS cluster | `string` | `null` | no |
| [cluster\_name](#input\_cluster\_name) | Name of associated EKS cluster | `string` | `null` | no |
| [cluster\_version](#input\_cluster\_version) | Kubernetes version. Defaults to EKS Cluster Kubernetes version | `string` | `null` | no |
| [cpu\_options](#input\_cpu\_options) | The CPU options for the instance | `map(string)` | `null` | no |
@@ -138,6 +141,8 @@ No modules.
| [create\_iam\_role](#input\_create\_iam\_role) | Determines whether an IAM role is created or to use an existing IAM role | `bool` | `true` | no |
| [create\_launch\_template](#input\_create\_launch\_template) | Determines whether to create launch template or not | `bool` | `false` | no |
| [credit\_specification](#input\_credit\_specification) | Customize the credit specification of the instance | `map(string)` | `null` | no |
+| [custom\_ami\_is\_eks\_optimized](#input\_custom\_ami\_is\_eks\_optimized) | Determines whether the custom AMI ID provided (`ami_id`) is an EKS optimized AMI derivative or not | `bool` | `true` | no |
+| [custom\_user\_data](#input\_custom\_user\_data) | Base64-encoded user data used; should be used when `custom_ami_is_eks_optimized` = `false` to boostrap and join instances to the cluster | `string` | `null` | no |
| [default\_version](#input\_default\_version) | Default Version of the launch template | `string` | `null` | no |
| [description](#input\_description) | Description of the launch template | `string` | `null` | no |
| [desired\_size](#input\_desired\_size) | Desired number of worker nodes | `number` | `1` | no |
@@ -163,6 +168,7 @@ No modules.
| [instance\_types](#input\_instance\_types) | Set of instance types associated with the EKS Node Group. Defaults to `["t3.medium"]` | `list(string)` | `null` | no |
| [kernel\_id](#input\_kernel\_id) | The kernel ID | `string` | `null` | no |
| [key\_name](#input\_key\_name) | The key name that should be used for the instance | `string` | `null` | no |
+| [kubelet\_extra\_args](#input\_kubelet\_extra\_args) | Additional arguments passed to the --kubelet flag | `string` | `""` | no |
| [labels](#input\_labels) | Key-value map of Kubernetes labels. Only labels that are applied with the EKS API are managed by this argument. Other Kubernetes labels applied to the EKS Node Group will not be managed | `map(string)` | `null` | no |
| [launch\_template\_name](#input\_launch\_template\_name) | Launch template name - either to be created (`var.create_launch_template` = `true`) or existing (`var.create_launch_template` = `false`) | `string` | `null` | no |
| [launch\_template\_use\_name\_prefix](#input\_launch\_template\_use\_name\_prefix) | Determines whether to use `launch_template_name` as is or create a unique name beginning with the `launch_template_name` as the prefix | `bool` | `true` | no |
@@ -173,7 +179,10 @@ No modules.
| [min\_size](#input\_min\_size) | Minimum number of worker nodes | `number` | `0` | no |
| [name](#input\_name) | Name of the EKS Node Group | `string` | `null` | no |
| [network\_interfaces](#input\_network\_interfaces) | Customize network interfaces to be attached at instance boot time | `list(any)` | `[]` | no |
+| [node\_labels](#input\_node\_labels) | Key-value map of additional labels | `map(string)` | `{}` | no |
| [placement](#input\_placement) | The placement of the instance | `map(string)` | `null` | no |
+| [post\_bootstrap\_user\_data](#input\_post\_bootstrap\_user\_data) | User data that is appended to the user data script after of the EKS bootstrap script. Only valid when using a custom EKS optimized AMI derivative | `string` | `""` | no |
+| [pre\_bootstrap\_user\_data](#input\_pre\_bootstrap\_user\_data) | User data that is injected into the user data script ahead of the EKS bootstrap script | `string` | `""` | no |
| [ram\_disk\_id](#input\_ram\_disk\_id) | The ID of the ram disk | `string` | `null` | no |
| [remote\_access](#input\_remote\_access) | Configuration block with remote access settings | `map(string)` | `null` | no |
| [subnet\_ids](#input\_subnet\_ids) | Identifiers of EC2 Subnets to associate with the EKS Node Group. These subnets must have the following resource tag: `kubernetes.io/cluster/CLUSTER_NAME` | `list(string)` | `null` | no |
@@ -184,7 +193,6 @@ No modules.
| [update\_config](#input\_update\_config) | Configuration block of settings for max unavailable resources during node group updates | `map(string)` | `null` | no |
| [update\_default\_version](#input\_update\_default\_version) | Whether to update Default Version each update. Conflicts with `default_version` | `bool` | `true` | no |
| [use\_name\_prefix](#input\_use\_name\_prefix) | Determines whether to use `name` as is or create a unique name beginning with the `name` as the prefix | `bool` | `true` | no |
-| [user\_data](#input\_user\_data) | The Base64-encoded user data to provide when launching the instance | `string` | `null` | no |
| [vpc\_security\_group\_ids](#input\_vpc\_security\_group\_ids) | A list of security group IDs to associate | `list(string)` | `null` | no |
## Outputs
diff --git a/modules/eks-managed-node-group/main.tf b/modules/eks-managed-node-group/main.tf
index 79af0e7649..a14bbec07d 100644
--- a/modules/eks-managed-node-group/main.tf
+++ b/modules/eks-managed-node-group/main.tf
@@ -5,12 +5,6 @@ locals {
data "aws_partition" "current" {}
-data "aws_eks_cluster" "this" {
- count = var.create ? 1 : 0
-
- name = var.cluster_name
-}
-
################################################################################
# User Data
################################################################################
@@ -22,33 +16,45 @@ data "aws_eks_cluster" "this" {
# this merging will NOT happen and you are responsible for nodes joining the cluster.
# See docs for more details -> https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-user-data
-# data "cloudinit_config" "custom_ami_user_data" {
-# count = var.create && var.create_launch_template && var.use_custom_ami ? 1 : 0
-
-# gzip = false
-# base64_encode = true
-# boundary = "//"
-
-# part {
-# content_type = "text/x-shellscript"
-# content = templatefile("${path.module}/templates/userdata.sh.tpl",
-# {
-# use_custom_ami = var.use_custom_ami
-# ami_id = var.ami_id
-
-# cluster_name = var.cluster_name
-# cluster_endpoint = var.cluster_endpoint
-# cluster_auth_base64 = var.cluster_auth_base64
-# bootstrap_environment_variables = var.user_data_bootstrap_env_vars
-# kubelet_extra_args = var.kubelet_extra_args
-# user_data_pre_bootstrap = var.user_data_pre_bootstrap
-# user_data_post_bootstrap = var.user_data_post_bootstrap
-# capacity_type = var.capacity_type
-# append_labels = length(var.k8s_labels) > 0 ? ",${join(",", [for k, v in var.k8s_labels : "${k}=${v}"])}" : ""
-# }
-# )
-# }
-# }
+data "cloudinit_config" "eks_optimized_ami_user_data" {
+ count = var.create && (local.use_custom_launch_template && var.pre_bootstrap_user_data != "") || (var.ami_id != null && var.custom_ami_is_eks_optimized) ? 1 : 0
+
+ gzip = false
+ base64_encode = true
+ boundary = "//"
+
+ dynamic "part" {
+ for_each = var.pre_bootstrap_user_data != "" ? [1] : []
+ content {
+ content_type = "text/x-shellscript"
+ content = <<-EOT
+ #!/bin/bash -ex
+ ${var.pre_bootstrap_user_data}
+ EOT
+ }
+ }
+
+ dynamic "part" {
+ for_each = var.ami_id != null && var.custom_ami_is_eks_optimized ? [1] : []
+ content {
+ content_type = "text/x-shellscript"
+ content = templatefile("${path.module}/../../templates/linux_user_data.sh.tpl",
+ {
+ # Required to bootstrap node
+ cluster_name = var.cluster_name
+ cluster_endpoint = var.cluster_endpoint
+ cluster_auth_base64 = var.cluster_auth_base64
+ # Optional
+ cluster_dns_ip = var.cluster_dns_ip
+ bootstrap_extra_args = var.bootstrap_extra_args
+ kubelet_extra_args = var.kubelet_extra_args
+ post_bootstrap_user_data = var.post_bootstrap_user_data
+
+ }
+ )
+ }
+ }
+}
################################################################################
# Launch template
@@ -66,7 +72,7 @@ resource "aws_launch_template" "this" {
# # Set on node group instead
# instance_type = var.launch_template_instance_type
key_name = var.key_name
- user_data = var.user_data
+ user_data = try(data.cloudinit_config.eks_optimized_ami_user_data[0].rendered, var.custom_user_data)
vpc_security_group_ids = var.vpc_security_group_ids
@@ -335,6 +341,8 @@ resource "aws_eks_node_group" "this" {
]
}
+ # Note - unless you use a custom launch template, `Name` tags will not propagate down to the
+ # EC2 instances https://github.com/aws/containers-roadmap/issues/781
tags = merge(
var.tags,
{ Name = var.name }
diff --git a/modules/eks-managed-node-group/templates/userdata.sh.tpl b/modules/eks-managed-node-group/templates/userdata.sh.tpl
deleted file mode 100644
index e9a9e17c37..0000000000
--- a/modules/eks-managed-node-group/templates/userdata.sh.tpl
+++ /dev/null
@@ -1,42 +0,0 @@
-#!/bin/bash -e
-%{ if length(ami_id) == 0 ~}
-# Inject custom environment variables and kubelet args into bootstrap process
-# for both EKS optimized/managed AMI or custom EKS AMI
-printf '#!/bin/bash
-%{ for k, v in bootstrap_environment_variables ~}
-export ${k}="${v}"
-%{ endfor ~}
-export ADDITIONAL_KUBELET_EXTRA_ARGS="${kubelet_extra_args}"
-' > /etc/profile.d/eks-bootstrap-env.sh
-
-# Source extra environment variables in bootstrap script
-sed -i '/^set -o errexit/a\\nsource /etc/profile.d/eks-bootstrap-env.sh' /etc/eks/bootstrap.sh
-
-# Merge ADDITIONAL_KUBELET_EXTRA_ARGS into KUBELET_EXTRA_ARGS
-sed -i 's/^KUBELET_EXTRA_ARGS="$${KUBELET_EXTRA_ARGS:-}/KUBELET_EXTRA_ARGS="$${KUBELET_EXTRA_ARGS:-} $${ADDITIONAL_KUBELET_EXTRA_ARGS}/' /etc/eks/bootstrap.sh
-%{else ~}
-
-# Set additional boostrap environment variables for custom AMI
-%{ for k, v in bootstrap_environment_variables ~}
-${k}="${v}"
-%{ endfor ~}
-KUBELET_EXTRA_ARGS='--node-labels=eks.amazonaws.com/nodegroup-image=${ami_id},eks.amazonaws.com/capacityType=${capacity_type}${append_labels} ${kubelet_extra_args}'
-%{endif ~}
-
-
-
-
-%{ if length(ami_id) > 0 && ami_is_eks_optimized ~}
-# Custom AMI bootstrap configurations
-
-${user_data_pre_bootstrap}
-
-# Set required environment variables for custom AMI
-API_SERVER_URL=${cluster_endpoint}
-B64_CLUSTER_CA=${cluster_auth_base64}
-
-# Call bootstrap for EKS optimised custom AMI
-/etc/eks/bootstrap.sh ${cluster_name} --apiserver-endpoint "$${API_SERVER_URL}" --b64-cluster-ca "$${B64_CLUSTER_CA}" --kubelet-extra-args "$${KUBELET_EXTRA_ARGS}"
-%{ endif ~}
-
-${user_data_post_bootstrap}
diff --git a/modules/eks-managed-node-group/variables.tf b/modules/eks-managed-node-group/variables.tf
index 3c31086bc4..fe9148bc4b 100644
--- a/modules/eks-managed-node-group/variables.tf
+++ b/modules/eks-managed-node-group/variables.tf
@@ -14,18 +14,66 @@ variable "tags" {
# User Data
################################################################################
+variable "custom_user_data" {
+ description = "Base64-encoded user data used; should be used when `custom_ami_is_eks_optimized` = `false` to boostrap and join instances to the cluster"
+ type = string
+ default = null
+}
+
+variable "custom_ami_is_eks_optimized" {
+ description = "Determines whether the custom AMI ID provided (`ami_id`) is an EKS optimized AMI derivative or not"
+ type = bool
+ default = true
+}
+
variable "cluster_endpoint" {
description = "Endpoint of associated EKS cluster"
type = string
- default = ""
+ default = null
}
variable "cluster_auth_base64" {
description = "Base64 encoded CA of associated EKS cluster"
type = string
+ default = null
+}
+
+variable "cluster_dns_ip" {
+ description = "The CIDR block that the EKS cluster provides service IP addresses from"
+ type = string
+ default = "" # used in boostrap script conditional check
+}
+
+variable "pre_bootstrap_user_data" {
+ description = "User data that is injected into the user data script ahead of the EKS bootstrap script"
+ type = string
+ default = ""
+}
+
+variable "post_bootstrap_user_data" {
+ description = "User data that is appended to the user data script after of the EKS bootstrap script. Only valid when using a custom EKS optimized AMI derivative"
+ type = string
+ default = ""
+}
+
+variable "bootstrap_extra_args" {
+ description = "Additional arguments passed to the bootstrap script"
+ type = string
+ default = ""
+}
+
+variable "kubelet_extra_args" {
+ description = "Additional arguments passed to the --kubelet flag"
+ type = string
default = ""
}
+variable "node_labels" {
+ description = "Key-value map of additional labels"
+ type = map(string)
+ default = {}
+}
+
################################################################################
# Launch template
################################################################################
@@ -72,12 +120,6 @@ variable "key_name" {
default = null
}
-variable "user_data" {
- description = "The Base64-encoded user data to provide when launching the instance"
- type = string
- default = null
-}
-
variable "vpc_security_group_ids" {
description = "A list of security group IDs to associate"
type = list(string)
@@ -277,7 +319,7 @@ variable "ami_release_version" {
variable "capacity_type" {
description = "Type of capacity associated with the EKS Node Group. Valid values: `ON_DEMAND`, `SPOT`"
type = string
- default = null
+ default = "ON_DEMAND"
}
variable "disk_size" {
diff --git a/modules/eks-managed-node-group/templates/default.sh.tpl b/templates/default.sh.tpl
similarity index 79%
rename from modules/eks-managed-node-group/templates/default.sh.tpl
rename to templates/default.sh.tpl
index 5d664e8e5b..f104be3786 100644
--- a/modules/eks-managed-node-group/templates/default.sh.tpl
+++ b/templates/default.sh.tpl
@@ -9,7 +9,14 @@ set -ex
B64_CLUSTER_CA=xxx
API_SERVER_URL=xxx
K8S_CLUSTER_DNS_IP=172.20.0.10
-/etc/eks/bootstrap.sh --kubelet-extra-args '--node-labels=eks.amazonaws.com/nodegroup-image=ami-0caf35bc73450c396,eks.amazonaws.com/capacityType=ON_DEMAND,eks.amazonaws.com/nodegroup=default_node_group' --b64-cluster-ca $B64_CLUSTER_CA --apiserver-endpoint $API_SERVER_URL --dns-cluster-ip $K8S_CLUSTER_DNS_IP
+/etc/eks/bootstrap.sh \
+ --kubelet-extra-args '--node-labels=
+ eks.amazonaws.com/nodegroup-image=ami-0caf35bc73450c396,
+ eks.amazonaws.com/capacityType=ON_DEMAND,
+ eks.amazonaws.com/nodegroup=default_node_group'
+ --b64-cluster-ca $B64_CLUSTER_CA
+ --apiserver-endpoint $API_SERVER_URL
+ --dns-cluster-ip $K8S_CLUSTER_DNS_IP
--//--
diff --git a/templates/linux_user_data.sh.tpl b/templates/linux_user_data.sh.tpl
new file mode 100644
index 0000000000..8d8c758121
--- /dev/null
+++ b/templates/linux_user_data.sh.tpl
@@ -0,0 +1,13 @@
+#!/bin/bash -ex
+
+/etc/eks/bootstrap.sh ${cluster_name} ${bootstrap_extra_args} \
+%{ if length(kubelet_extra_args) > 0 ~}
+ --kubelet-extra-args '${kubelet_extra_args}' \
+%{ endif ~}
+%{ if length(cluster_dns_ip) > 0 ~}
+ --dns-cluster-ip ${cluster_dns_ip} \
+%{ endif ~}
+ --apiserver-endpoint ${cluster_endpoint} \
+ --b64-cluster-ca ${cluster_auth_base64}
+
+${post_bootstrap_user_data}
diff --git a/templates/userdata.sh.tpl b/templates/userdata.sh.tpl
deleted file mode 100644
index cf314b8800..0000000000
--- a/templates/userdata.sh.tpl
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/bash -e
-
-# Allow user supplied pre userdata code
-${pre_userdata}
-
-# Bootstrap and join the cluster
-/etc/eks/bootstrap.sh --b64-cluster-ca '${cluster_auth_base64}' --apiserver-endpoint '${endpoint}' ${bootstrap_extra_args} --kubelet-extra-args "${kubelet_extra_args}" '${cluster_name}'
-
-# Allow user supplied userdata code
-${additional_userdata}
diff --git a/templates/userdata_windows.tpl b/templates/windows_user_data.tpl
similarity index 100%
rename from templates/userdata_windows.tpl
rename to templates/windows_user_data.tpl
diff --git a/workers.tf b/workers.tf
index 6a97959441..65fb11a032 100644
--- a/workers.tf
+++ b/workers.tf
@@ -45,7 +45,7 @@ module "eks_managed_node_groups" {
ami_type = try(each.value.ami_type, null)
ami_release_version = try(each.value.ami_release_version, null)
- capacity_type = try(each.value.capacity_type, null)
+ capacity_type = try(each.value.capacity_type, "ON_DEMAND") # used in user data so don't use null
disk_size = try(each.value.disk_size, null)
force_update_version = try(each.value.force_update_version, null)
instance_types = try(each.value.instance_types, null)
@@ -57,6 +57,18 @@ module "eks_managed_node_groups" {
update_config = try(each.value.update_config, null)
timeouts = try(each.value.timeouts, {})
+ # User data
+ custom_user_data = try(each.value.custom_user_data, null)
+ custom_ami_is_eks_optimized = try(each.value.custom_ami_is_eks_optimized, true)
+ cluster_endpoint = try(aws_eks_cluster.this[0].endpoint, null)
+ cluster_auth_base64 = try(aws_eks_cluster.this[0].certificate_authority[0].data, null)
+ cluster_dns_ip = try(aws_eks_cluster.this[0].kubernetes_network_config[0].service_ipv4_cidr, "")
+ pre_bootstrap_user_data = try(each.value.pre_bootstrap_user_data, "")
+ post_bootstrap_user_data = try(each.value.post_bootstrap_user_data, "")
+ bootstrap_extra_args = try(each.value.bootstrap_extra_args, "")
+ kubelet_extra_args = try(each.value.kubelet_extra_args, "")
+ node_labels = try(each.value.node_labels, {})
+
# Launch Template
create_launch_template = try(each.value.create_launch_template, false)
launch_template_name = try(each.value.launch_template_name, null)
@@ -66,7 +78,6 @@ module "eks_managed_node_groups" {
ebs_optimized = try(each.value.ebs_optimized, null)
key_name = try(each.value.key_name, null)
- user_data = try(each.value.user_data, null)
vpc_security_group_ids = try(each.value.vpc_security_group_ids, null)
From 27495fc9d5741be5bea8ec843615ef0a0184db56 Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Thu, 11 Nov 2021 19:03:31 -0500
Subject: [PATCH 17/83] chore: add IAM role configs to self-managed module,
remove from root
---
README.md | 9 ---
examples/eks_managed_node_group/main.tf | 3 +-
modules/eks-managed-node-group/main.tf | 2 +-
modules/self-managed-node-group/README.md | 11 +++
modules/self-managed-node-group/main.tf | 47 +++++++++++++
modules/self-managed-node-group/variables.tf | 53 ++++++++++++++
variables.tf | 40 -----------
workers.tf | 73 ++++----------------
8 files changed, 127 insertions(+), 111 deletions(-)
diff --git a/README.md b/README.md
index 7515c42119..25f219e0de 100644
--- a/README.md
+++ b/README.md
@@ -141,11 +141,9 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
|------|------|
| [aws_cloudwatch_log_group.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_log_group) | resource |
| [aws_eks_cluster.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_cluster) | resource |
-| [aws_iam_instance_profile.worker](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_instance_profile) | resource |
| [aws_iam_openid_connect_provider.oidc_provider](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_openid_connect_provider) | resource |
| [aws_iam_policy.cluster_additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
| [aws_iam_role.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
-| [aws_iam_role.worker](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
| [aws_security_group.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
| [aws_security_group.worker](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
| [aws_security_group_rule.cluster_egress_internet](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
@@ -163,7 +161,6 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source |
| [aws_iam_policy_document.cluster_additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
| [aws_iam_policy_document.cluster_assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
-| [aws_iam_policy_document.worker_assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
| [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source |
| [tls_certificate.this](https://registry.terraform.io/providers/hashicorp/tls/latest/docs/data-sources/certificate) | data source |
@@ -203,7 +200,6 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
| [create\_cluster\_security\_group](#input\_create\_cluster\_security\_group) | Whether to create a security group for the cluster or attach the cluster to `cluster_security_group_id` | `bool` | `true` | no |
| [create\_fargate](#input\_create\_fargate) | Determines whether Fargate resources are created | `bool` | `false` | no |
| [create\_fargate\_pod\_execution\_role](#input\_create\_fargate\_pod\_execution\_role) | Controls if the EKS Fargate pod execution IAM role should be created | `bool` | `true` | no |
-| [create\_worker\_iam\_role](#input\_create\_worker\_iam\_role) | Determines whether a worker IAM role is created or to use an existing IAM role | `bool` | `true` | no |
| [create\_worker\_security\_group](#input\_create\_worker\_security\_group) | Whether to create a security group for the worker nodes | `bool` | `true` | no |
| [eks\_managed\_node\_groups](#input\_eks\_managed\_node\_groups) | Map of EKS managed node group definitions to create | `any` | `{}` | no |
| [enable\_irsa](#input\_enable\_irsa) | Whether to create OpenID Connect Provider for EKS to enable IRSA | `bool` | `false` | no |
@@ -223,11 +219,6 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
| [worker\_ami\_owner\_id](#input\_worker\_ami\_owner\_id) | The ID of the owner for the AMI to use for the AWS EKS workers. Valid values are an AWS account ID, 'self' (the current account), or an AWS owner alias (e.g. 'amazon', 'aws-marketplace', 'microsoft') | `string` | `"amazon"` | no |
| [worker\_create\_cluster\_primary\_security\_group\_rules](#input\_worker\_create\_cluster\_primary\_security\_group\_rules) | Whether to create security group rules to allow communication between pods on workers and pods using the primary cluster security group | `bool` | `false` | no |
| [worker\_egress\_cidrs](#input\_worker\_egress\_cidrs) | List of CIDR blocks that are permitted for workers egress traffic | `list(string)` |
[ "0.0.0.0/0" ]
| no |
-| [worker\_iam\_role\_name](#input\_worker\_iam\_role\_name) | Name to use on worker role created | `string` | `null` | no |
-| [worker\_iam\_role\_path](#input\_worker\_iam\_role\_path) | Worker IAM role path | `string` | `null` | no |
-| [worker\_iam\_role\_permissions\_boundary](#input\_worker\_iam\_role\_permissions\_boundary) | ARN of the policy that is used to set the permissions boundary for the worker role | `string` | `null` | no |
-| [worker\_iam\_role\_tags](#input\_worker\_iam\_role\_tags) | A map of additional tags to add to the worker IAM role created | `map(string)` | `{}` | no |
-| [worker\_iam\_role\_use\_name\_prefix](#input\_worker\_iam\_role\_use\_name\_prefix) | Determines whether worker IAM role name (`worker_iam_role_name`) is used as a prefix | `string` | `true` | no |
| [worker\_security\_group\_id](#input\_worker\_security\_group\_id) | If provided, all workers will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the EKS cluster | `string` | `""` | no |
| [worker\_security\_group\_name](#input\_worker\_security\_group\_name) | Name to use on worker role created | `string` | `null` | no |
| [worker\_security\_group\_tags](#input\_worker\_security\_group\_tags) | A map of additional tags to add to the worker security group created | `map(string)` | `{}` | no |
diff --git a/examples/eks_managed_node_group/main.tf b/examples/eks_managed_node_group/main.tf
index e9f860568f..3fb04e43b7 100644
--- a/examples/eks_managed_node_group/main.tf
+++ b/examples/eks_managed_node_group/main.tf
@@ -42,7 +42,7 @@ module "eks" {
cluster_endpoint_public_access = true
eks_managed_node_groups = {
- # default_node_group = {}
+ default_node_group = {}
# create_launch_template = {
# create_launch_template = true
# launch_template_name = "create-launch-template"
@@ -51,7 +51,6 @@ module "eks" {
# custom_ami = {
# create_launch_template = true
# launch_template_name = "custom-ami"
- # # user_data = data.cloudinit_config.custom_ami.rendered
# # Current default AMI used by managed node groups - pseudo "custom"
# ami_id = "ami-0caf35bc73450c396"
diff --git a/modules/eks-managed-node-group/main.tf b/modules/eks-managed-node-group/main.tf
index a14bbec07d..49a4a9c98e 100644
--- a/modules/eks-managed-node-group/main.tf
+++ b/modules/eks-managed-node-group/main.tf
@@ -375,7 +375,7 @@ resource "aws_iam_role" "this" {
count = var.create && var.create_iam_role ? 1 : 0
name = var.iam_role_use_name_prefix ? null : local.iam_role_name
- name_prefix = var.iam_role_use_name_prefix ? try("${local.iam_role_name}-", local.iam_role_name) : null
+ name_prefix = var.iam_role_use_name_prefix ? "${local.iam_role_name}-" : null
path = var.iam_role_path
assume_role_policy = data.aws_iam_policy_document.assume_role_policy[0].json
diff --git a/modules/self-managed-node-group/README.md b/modules/self-managed-node-group/README.md
index 423253dbde..640634d894 100644
--- a/modules/self-managed-node-group/README.md
+++ b/modules/self-managed-node-group/README.md
@@ -24,7 +24,10 @@ No modules.
|------|------|
| [aws_autoscaling_group.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/autoscaling_group) | resource |
| [aws_autoscaling_schedule.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/autoscaling_schedule) | resource |
+| [aws_iam_role.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
+| [aws_iam_role_policy_attachment.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
| [aws_launch_template.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource |
+| [aws_iam_policy_document.assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
## Inputs
@@ -37,6 +40,7 @@ No modules.
| [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster that the node group will be associated with | `string` | `null` | no |
| [cpu\_options](#input\_cpu\_options) | The CPU options for the instance | `map(string)` | `null` | no |
| [create](#input\_create) | Determines whether to create autoscaling group or not | `bool` | `true` | no |
+| [create\_iam\_role](#input\_create\_iam\_role) | Determines whether an IAM role is created or to use an existing IAM role | `bool` | `true` | no |
| [create\_launch\_template](#input\_create\_launch\_template) | Determines whether to create launch template or not | `bool` | `true` | no |
| [create\_schedule](#input\_create\_schedule) | Determines whether to create autoscaling group schedule or not | `bool` | `true` | no |
| [credit\_specification](#input\_credit\_specification) | Customize the credit specification of the instance | `map(string)` | `null` | no |
@@ -58,6 +62,13 @@ No modules.
| [hibernation\_options](#input\_hibernation\_options) | The hibernation options for the instance | `map(string)` | `null` | no |
| [iam\_instance\_profile\_arn](#input\_iam\_instance\_profile\_arn) | The IAM Instance Profile ARN to launch the instance with | `string` | `null` | no |
| [iam\_instance\_profile\_name](#input\_iam\_instance\_profile\_name) | The name attribute of the IAM instance profile to associate with launched instances | `string` | `null` | no |
+| [iam\_role\_additional\_policies](#input\_iam\_role\_additional\_policies) | Additional policies to be added to the IAM role | `list(string)` | `[]` | no |
+| [iam\_role\_attach\_cni\_policy](#input\_iam\_role\_attach\_cni\_policy) | Whether to attach the Amazon managed `AmazonEKS_CNI_Policy` IAM policy to the IAM IAM role. WARNING: If set `false` the permissions must be assigned to the `aws-node` DaemonSet pods via another method or nodes will not be able to join the cluster | `bool` | `true` | no |
+| [iam\_role\_name](#input\_iam\_role\_name) | Name to use on IAM role created | `string` | `null` | no |
+| [iam\_role\_path](#input\_iam\_role\_path) | IAM role path | `string` | `null` | no |
+| [iam\_role\_permissions\_boundary](#input\_iam\_role\_permissions\_boundary) | ARN of the policy that is used to set the permissions boundary for the IAM role | `string` | `null` | no |
+| [iam\_role\_tags](#input\_iam\_role\_tags) | A map of additional tags to add to the IAM role created | `map(string)` | `{}` | no |
+| [iam\_role\_use\_name\_prefix](#input\_iam\_role\_use\_name\_prefix) | Determines whether the IAM role name (`iam_role_name`) is used as a prefix | `string` | `true` | no |
| [image\_id](#input\_image\_id) | The AMI from which to launch the instance | `string` | `""` | no |
| [initial\_lifecycle\_hooks](#input\_initial\_lifecycle\_hooks) | One or more Lifecycle Hooks to attach to the Auto Scaling Group before instances are launched. The syntax is exactly the same as the separate `aws_autoscaling_lifecycle_hook` resource, without the `autoscaling_group_name` attribute. Please note that this will only work when creating a new Auto Scaling Group. For all other use-cases, please use `aws_autoscaling_lifecycle_hook` resource | `list(map(string))` | `[]` | no |
| [instance\_initiated\_shutdown\_behavior](#input\_instance\_initiated\_shutdown\_behavior) | Shutdown behavior for the instance. Can be `stop` or `terminate`. (Default: `stop`) | `string` | `null` | no |
diff --git a/modules/self-managed-node-group/main.tf b/modules/self-managed-node-group/main.tf
index 542f0edc2f..85ac3cd454 100644
--- a/modules/self-managed-node-group/main.tf
+++ b/modules/self-managed-node-group/main.tf
@@ -381,3 +381,50 @@ resource "aws_autoscaling_schedule" "this" {
# Cron examples: https://crontab.guru/examples.html
recurrence = lookup(each.value, "recurrence", null)
}
+
+################################################################################
+# IAM Role
+################################################################################
+
+locals {
+ iam_role_name = coalesce(var.iam_role_name, "${var.cluster_name}-worker")
+}
+
+data "aws_iam_policy_document" "assume_role_policy" {
+ count = var.create && var.create_iam_role ? 1 : 0
+
+ statement {
+ sid = "EKSWorkerAssumeRole"
+ actions = ["sts:AssumeRole"]
+
+ principals {
+ type = "Service"
+ identifiers = ["ec2.${data.aws_partition.current.dns_suffix}"]
+ }
+ }
+}
+
+resource "aws_iam_role" "this" {
+ count = var.create && var.create_iam_role ? 1 : 0
+
+ name = var.iam_role_use_name_prefix ? null : local.iam_role_name
+ name_prefix = var.iam_role_use_name_prefix ? "${local.iam_role_name}-" : null
+ path = var.iam_role_path
+
+ assume_role_policy = data.aws_iam_policy_document.assume_role_policy[0].json
+ permissions_boundary = var.iam_role_permissions_boundary
+ force_detach_policies = true
+
+ tags = merge(var.tags, var.iam_role_tags)
+}
+
+resource "aws_iam_role_policy_attachment" "this" {
+ for_each = var.create && var.create_iam_role ? toset(compact(distinct(concat([
+ "${local.policy_arn_prefix}/AmazonEKSWorkerNodePolicy",
+ "${local.policy_arn_prefix}/AmazonEC2ContainerRegistryReadOnly",
+ var.iam_role_attach_cni_policy ? "${local.policy_arn_prefix}/AmazonEKS_CNI_Policy" : "",
+ ], var.iam_role_additional_policies)))) : toset([])
+
+ policy_arn = each.value
+ role = aws_iam_role.this[0].name
+}
diff --git a/modules/self-managed-node-group/variables.tf b/modules/self-managed-node-group/variables.tf
index a5bf2e61b2..4c2b4a34c0 100644
--- a/modules/self-managed-node-group/variables.tf
+++ b/modules/self-managed-node-group/variables.tf
@@ -428,3 +428,56 @@ variable "schedules" {
type = map(any)
default = {}
}
+
+
+################################################################################
+# IAM Role
+################################################################################
+
+variable "create_iam_role" {
+ description = "Determines whether an IAM role is created or to use an existing IAM role"
+ type = bool
+ default = true
+}
+
+variable "iam_role_name" {
+ description = "Name to use on IAM role created"
+ type = string
+ default = null
+}
+
+variable "iam_role_use_name_prefix" {
+ description = "Determines whether the IAM role name (`iam_role_name`) is used as a prefix"
+ type = string
+ default = true
+}
+
+variable "iam_role_path" {
+ description = "IAM role path"
+ type = string
+ default = null
+}
+
+variable "iam_role_permissions_boundary" {
+ description = "ARN of the policy that is used to set the permissions boundary for the IAM role"
+ type = string
+ default = null
+}
+
+variable "iam_role_attach_cni_policy" {
+ description = "Whether to attach the Amazon managed `AmazonEKS_CNI_Policy` IAM policy to the IAM IAM role. WARNING: If set `false` the permissions must be assigned to the `aws-node` DaemonSet pods via another method or nodes will not be able to join the cluster"
+ type = bool
+ default = true
+}
+
+variable "iam_role_additional_policies" {
+ description = "Additional policies to be added to the IAM role"
+ type = list(string)
+ default = []
+}
+
+variable "iam_role_tags" {
+ description = "A map of additional tags to add to the IAM role created"
+ type = map(string)
+ default = {}
+}
diff --git a/variables.tf b/variables.tf
index 8967553e67..a25306bb1b 100644
--- a/variables.tf
+++ b/variables.tf
@@ -226,46 +226,6 @@ variable "cluster_iam_role_tags" {
default = {}
}
-################################################################################
-# Workers IAM Role
-################################################################################
-
-variable "create_worker_iam_role" {
- description = "Determines whether a worker IAM role is created or to use an existing IAM role"
- type = bool
- default = true
-}
-
-variable "worker_iam_role_name" {
- description = "Name to use on worker role created"
- type = string
- default = null
-}
-
-variable "worker_iam_role_use_name_prefix" {
- description = "Determines whether worker IAM role name (`worker_iam_role_name`) is used as a prefix"
- type = string
- default = true
-}
-
-variable "worker_iam_role_path" {
- description = "Worker IAM role path"
- type = string
- default = null
-}
-
-variable "worker_iam_role_permissions_boundary" {
- description = "ARN of the policy that is used to set the permissions boundary for the worker role"
- type = string
- default = null
-}
-
-variable "worker_iam_role_tags" {
- description = "A map of additional tags to add to the worker IAM role created"
- type = map(string)
- default = {}
-}
-
################################################################################
# Fargate
################################################################################
diff --git a/workers.tf b/workers.tf
index 65fb11a032..addb504f4e 100644
--- a/workers.tf
+++ b/workers.tf
@@ -45,7 +45,7 @@ module "eks_managed_node_groups" {
ami_type = try(each.value.ami_type, null)
ami_release_version = try(each.value.ami_release_version, null)
- capacity_type = try(each.value.capacity_type, "ON_DEMAND") # used in user data so don't use null
+ capacity_type = try(each.value.capacity_type, null)
disk_size = try(each.value.disk_size, null)
force_update_version = try(each.value.force_update_version, null)
instance_types = try(each.value.instance_types, null)
@@ -114,7 +114,7 @@ module "eks_managed_node_groups" {
iam_role_attach_cni_policy = try(each.value.iam_role_attach_cni_policy, true)
iam_role_additional_policies = try(each.value.iam_role_additional_policies, [])
- tags = var.tags
+ tags = merge(var.tags, try(each.value.tags, {}))
}
################################################################################
@@ -208,64 +208,19 @@ module "self_managed_node_group" {
placement = try(each.value.placement, null)
tag_specifications = try(each.value.tag_specifications, [])
- tags = try(each.value.tags, {})
- propagate_tags = try(each.value.propagate_tags, [])
-}
-
-################################################################################
-# IAM Role & Instance Profile
-################################################################################
-
-locals {
- worker_iam_role_name = coalesce(var.worker_iam_role_name, "${var.cluster_name}-worker")
-}
-
-resource "aws_iam_role" "worker" {
- count = var.create && var.create_worker_iam_role ? 1 : 0
-
- name = var.worker_iam_role_use_name_prefix ? null : local.worker_iam_role_name
- name_prefix = var.worker_iam_role_use_name_prefix ? try("${local.worker_iam_role_name}-", local.worker_iam_role_name) : null
- path = var.worker_iam_role_path
-
- assume_role_policy = data.aws_iam_policy_document.worker_assume_role_policy[0].json
- permissions_boundary = var.worker_iam_role_permissions_boundary
- managed_policy_arns = compact(distinct(concat([
- "${local.policy_arn_prefix}/AmazonEKSWorkerNodePolicy",
- "${local.policy_arn_prefix}/AmazonEC2ContainerRegistryReadOnly",
- var.attach_worker_cni_policy ? "${local.policy_arn_prefix}/AmazonEKS_CNI_Policy" : "",
- ], var.worker_additional_policies)))
- force_detach_policies = true
-
- tags = merge(var.tags, var.worker_iam_role_tags)
-}
-
-data "aws_iam_policy_document" "worker_assume_role_policy" {
- count = var.create && var.create_worker_iam_role ? 1 : 0
-
- statement {
- sid = "EKSWorkerAssumeRole"
- actions = ["sts:AssumeRole"]
-
- principals {
- type = "Service"
- identifiers = ["ec2.${data.aws_partition.current.dns_suffix}"]
- }
- }
-}
-
-resource "aws_iam_instance_profile" "worker" {
- count = var.create && var.create_worker_iam_role ? 1 : 0
-
- name = var.worker_iam_role_use_name_prefix ? null : local.worker_iam_role_name
- name_prefix = var.worker_iam_role_use_name_prefix ? try("${local.worker_iam_role_name}-", local.worker_iam_role_name) : null
- path = var.worker_iam_role_path
- role = aws_iam_role.worker[0].id
-
- lifecycle {
- create_before_destroy = true
- }
+ # IAM role
+ create_iam_role = try(each.value.create_iam_role, true)
+ iam_role_arn = try(each.value.iam_role_arn, null)
+ iam_role_name = try(each.value.iam_role_name, null)
+ iam_role_use_name_prefix = try(each.value.iam_role_use_name_prefix, true)
+ iam_role_path = try(each.value.iam_role_path, null)
+ iam_role_permissions_boundary = try(each.value.iam_role_permissions_boundary, null)
+ iam_role_tags = try(each.value.iam_role_tags, {})
+ iam_role_attach_cni_policy = try(each.value.iam_role_attach_cni_policy, true)
+ iam_role_additional_policies = try(each.value.iam_role_additional_policies, [])
- tags = merge(var.tags, var.worker_iam_role_tags)
+ tags = merge(var.tags, try(each.value.tags, {}))
+ propagate_tags = try(each.value.propagate_tags, [])
}
################################################################################
From 802f53a9a3f358cd8d2acbb04460434309318eee Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Thu, 11 Nov 2021 20:33:36 -0500
Subject: [PATCH 18/83] chore: move worker security group to sub-module
---
README.md | 4 -
main.tf | 13 ++
modules/self-managed-node-group/README.md | 22 ++-
modules/self-managed-node-group/main.tf | 155 +++++++++++++++++--
modules/self-managed-node-group/variables.tf | 69 +++++++--
variables.tf | 30 ----
workers.tf | 6 +-
7 files changed, 238 insertions(+), 61 deletions(-)
diff --git a/README.md b/README.md
index 25f219e0de..ee234eb601 100644
--- a/README.md
+++ b/README.md
@@ -200,7 +200,6 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
| [create\_cluster\_security\_group](#input\_create\_cluster\_security\_group) | Whether to create a security group for the cluster or attach the cluster to `cluster_security_group_id` | `bool` | `true` | no |
| [create\_fargate](#input\_create\_fargate) | Determines whether Fargate resources are created | `bool` | `false` | no |
| [create\_fargate\_pod\_execution\_role](#input\_create\_fargate\_pod\_execution\_role) | Controls if the EKS Fargate pod execution IAM role should be created | `bool` | `true` | no |
-| [create\_worker\_security\_group](#input\_create\_worker\_security\_group) | Whether to create a security group for the worker nodes | `bool` | `true` | no |
| [eks\_managed\_node\_groups](#input\_eks\_managed\_node\_groups) | Map of EKS managed node group definitions to create | `any` | `{}` | no |
| [enable\_irsa](#input\_enable\_irsa) | Whether to create OpenID Connect Provider for EKS to enable IRSA | `bool` | `false` | no |
| [fargate\_iam\_role\_path](#input\_fargate\_iam\_role\_path) | Fargate IAM role path | `string` | `null` | no |
@@ -220,9 +219,6 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
| [worker\_create\_cluster\_primary\_security\_group\_rules](#input\_worker\_create\_cluster\_primary\_security\_group\_rules) | Whether to create security group rules to allow communication between pods on workers and pods using the primary cluster security group | `bool` | `false` | no |
| [worker\_egress\_cidrs](#input\_worker\_egress\_cidrs) | List of CIDR blocks that are permitted for workers egress traffic | `list(string)` |
[ "0.0.0.0/0" ]
| no |
| [worker\_security\_group\_id](#input\_worker\_security\_group\_id) | If provided, all workers will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the EKS cluster | `string` | `""` | no |
-| [worker\_security\_group\_name](#input\_worker\_security\_group\_name) | Name to use on worker role created | `string` | `null` | no |
-| [worker\_security\_group\_tags](#input\_worker\_security\_group\_tags) | A map of additional tags to add to the worker security group created | `map(string)` | `{}` | no |
-| [worker\_security\_group\_use\_name\_prefix](#input\_worker\_security\_group\_use\_name\_prefix) | Determines whether the worker security group name (`worker_security_group_name`) is used as a prefix | `string` | `true` | no |
| [worker\_sg\_ingress\_from\_port](#input\_worker\_sg\_ingress\_from\_port) | Minimum port number from which pods will accept communication. Must be changed to a lower value if some pods in your cluster will expose a port lower than 1025 (e.g. 22, 80, or 443) | `number` | `1025` | no |
## Outputs
diff --git a/main.tf b/main.tf
index a456eb4b8f..2554066f02 100644
--- a/main.tf
+++ b/main.tf
@@ -138,6 +138,19 @@ resource "aws_security_group_rule" "cluster_private_access_sg_source" {
security_group_id = aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id
}
+# TODO
+# resource "aws_security_group_rule" "cluster_primary_ingress_worker" {
+# count = local.create_security_group && var.worker_create_cluster_primary_security_group_rules ? 1 : 0
+
+# description = "Allow pods running on worker to send communication to cluster primary security group (e.g. Fargate pods)."
+# protocol = "all"
+# security_group_id = aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id
+# source_security_group_id = local.worker_security_group_id
+# from_port = 0
+# to_port = 65535
+# type = "ingress"
+# }
+
################################################################################
# IRSA
################################################################################
diff --git a/modules/self-managed-node-group/README.md b/modules/self-managed-node-group/README.md
index 640634d894..eb0f72547d 100644
--- a/modules/self-managed-node-group/README.md
+++ b/modules/self-managed-node-group/README.md
@@ -24,10 +24,20 @@ No modules.
|------|------|
| [aws_autoscaling_group.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/autoscaling_group) | resource |
| [aws_autoscaling_schedule.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/autoscaling_schedule) | resource |
+| [aws_iam_instance_profile.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_instance_profile) | resource |
| [aws_iam_role.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
| [aws_iam_role_policy_attachment.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
| [aws_launch_template.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource |
+| [aws_security_group.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
+| [aws_security_group_rule.cluster_coredns_tcp_ingress](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
+| [aws_security_group_rule.cluster_coredns_udp_ingress](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
+| [aws_security_group_rule.cluster_ephemeral_ports_ingress](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
+| [aws_security_group_rule.cluster_https_ingress](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
+| [aws_security_group_rule.cluster_kubelet_ingress](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
+| [aws_security_group_rule.ingress_self](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
+| [aws_security_group_rule.worker_egress_all](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
| [aws_iam_policy_document.assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
+| [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source |
## Inputs
@@ -38,11 +48,13 @@ No modules.
| [capacity\_rebalance](#input\_capacity\_rebalance) | Indicates whether capacity rebalance is enabled | `bool` | `null` | no |
| [capacity\_reservation\_specification](#input\_capacity\_reservation\_specification) | Targeting for EC2 capacity reservations | `any` | `null` | no |
| [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster that the node group will be associated with | `string` | `null` | no |
+| [cluster\_security\_group\_id](#input\_cluster\_security\_group\_id) | Cluster control plain security group ID | `string` | `null` | no |
| [cpu\_options](#input\_cpu\_options) | The CPU options for the instance | `map(string)` | `null` | no |
| [create](#input\_create) | Determines whether to create autoscaling group or not | `bool` | `true` | no |
-| [create\_iam\_role](#input\_create\_iam\_role) | Determines whether an IAM role is created or to use an existing IAM role | `bool` | `true` | no |
+| [create\_iam\_instance\_profile](#input\_create\_iam\_instance\_profile) | Determines whether an IAM instance profile is created or to use an existing IAM instance profile | `bool` | `true` | no |
| [create\_launch\_template](#input\_create\_launch\_template) | Determines whether to create launch template or not | `bool` | `true` | no |
| [create\_schedule](#input\_create\_schedule) | Determines whether to create autoscaling group schedule or not | `bool` | `true` | no |
+| [create\_security\_group](#input\_create\_security\_group) | Whether to create a security group | `bool` | `true` | no |
| [credit\_specification](#input\_credit\_specification) | Customize the credit specification of the instance | `map(string)` | `null` | no |
| [default\_cooldown](#input\_default\_cooldown) | The amount of time, in seconds, after a scaling activity completes before another scaling activity can start | `number` | `null` | no |
| [default\_version](#input\_default\_version) | Default Version of the launch template | `string` | `null` | no |
@@ -68,7 +80,7 @@ No modules.
| [iam\_role\_path](#input\_iam\_role\_path) | IAM role path | `string` | `null` | no |
| [iam\_role\_permissions\_boundary](#input\_iam\_role\_permissions\_boundary) | ARN of the policy that is used to set the permissions boundary for the IAM role | `string` | `null` | no |
| [iam\_role\_tags](#input\_iam\_role\_tags) | A map of additional tags to add to the IAM role created | `map(string)` | `{}` | no |
-| [iam\_role\_use\_name\_prefix](#input\_iam\_role\_use\_name\_prefix) | Determines whether the IAM role name (`iam_role_name`) is used as a prefix | `string` | `true` | no |
+| [iam\_role\_use\_name\_prefix](#input\_iam\_role\_use\_name\_prefix) | Amazon Resource Name (ARN) of an existing IAM instance profile that provides permissions for the EKS Node Group | `string` | `true` | no |
| [image\_id](#input\_image\_id) | The AMI from which to launch the instance | `string` | `""` | no |
| [initial\_lifecycle\_hooks](#input\_initial\_lifecycle\_hooks) | One or more Lifecycle Hooks to attach to the Auto Scaling Group before instances are launched. The syntax is exactly the same as the separate `aws_autoscaling_lifecycle_hook` resource, without the `autoscaling_group_name` attribute. Please note that this will only work when creating a new Auto Scaling Group. For all other use-cases, please use `aws_autoscaling_lifecycle_hook` resource | `list(map(string))` | `[]` | no |
| [instance\_initiated\_shutdown\_behavior](#input\_instance\_initiated\_shutdown\_behavior) | Shutdown behavior for the instance. Can be `stop` or `terminate`. (Default: `stop`) | `string` | `null` | no |
@@ -96,6 +108,11 @@ No modules.
| [protect\_from\_scale\_in](#input\_protect\_from\_scale\_in) | Allows setting instance protection. The autoscaling group will not select instances with this setting for termination during scale in events. | `bool` | `false` | no |
| [ram\_disk\_id](#input\_ram\_disk\_id) | The ID of the ram disk | `string` | `null` | no |
| [schedules](#input\_schedules) | Map of autoscaling group schedule to create | `map(any)` | `{}` | no |
+| [security\_group\_description](#input\_security\_group\_description) | Description for the security group | `string` | `"EKS worker security group"` | no |
+| [security\_group\_egress\_cidr\_blocks](#input\_security\_group\_egress\_cidr\_blocks) | List of CIDR blocks that are permitted for security group egress traffic | `list(string)` |
[ "0.0.0.0/0" ]
| no |
+| [security\_group\_name](#input\_security\_group\_name) | Name to use on security group created | `string` | `null` | no |
+| [security\_group\_tags](#input\_security\_group\_tags) | A map of additional tags to add to the security group created | `map(string)` | `{}` | no |
+| [security\_group\_use\_name\_prefix](#input\_security\_group\_use\_name\_prefix) | Determines whether the security group name (`security_group_name`) is used as a prefix | `string` | `true` | no |
| [service\_linked\_role\_arn](#input\_service\_linked\_role\_arn) | The ARN of the service-linked role that the ASG will use to call other AWS services | `string` | `null` | no |
| [subnet\_ids](#input\_subnet\_ids) | A list of subnet IDs to launch resources in. Subnets automatically determine which availability zones the group will reside. Conflicts with `availability_zones` | `list(string)` | `null` | no |
| [suspended\_processes](#input\_suspended\_processes) | A list of processes to suspend for the Auto Scaling Group. The allowed values are `Launch`, `Terminate`, `HealthCheck`, `ReplaceUnhealthy`, `AZRebalance`, `AlarmNotification`, `ScheduledActions`, `AddToLoadBalancer`. Note that if you suspend either the `Launch` or `Terminate` process types, it can prevent your Auto Scaling Group from functioning properly | `list(string)` | `null` | no |
@@ -107,6 +124,7 @@ No modules.
| [use\_mixed\_instances\_policy](#input\_use\_mixed\_instances\_policy) | Determines whether to use a mixed instances policy in the autoscaling group or not | `bool` | `false` | no |
| [use\_name\_prefix](#input\_use\_name\_prefix) | Determines whether to use `name` as is or create a unique name beginning with the `name` as the prefix | `bool` | `true` | no |
| [user\_data](#input\_user\_data) | The Base64-encoded user data to provide when launching the instance. You should use this for Launch Templates instead user\_data | `string` | `null` | no |
+| [vpc\_id](#input\_vpc\_id) | ID of the VPC where the security group/nodes will be provisioned | `string` | `null` | no |
| [vpc\_security\_group\_ids](#input\_vpc\_security\_group\_ids) | A list of security group IDs to associate | `list(string)` | `null` | no |
| [wait\_for\_capacity\_timeout](#input\_wait\_for\_capacity\_timeout) | A maximum duration that Terraform should wait for ASG instances to be healthy before timing out. (See also Waiting for Capacity below.) Setting this to '0' causes Terraform to skip all Capacity Waiting behavior. | `string` | `null` | no |
| [wait\_for\_elb\_capacity](#input\_wait\_for\_elb\_capacity) | Setting this will cause Terraform to wait for exactly this number of healthy instances in all attached load balancers on both create and update operations. Takes precedence over `min_elb_capacity` behavior. | `number` | `null` | no |
diff --git a/modules/self-managed-node-group/main.tf b/modules/self-managed-node-group/main.tf
index 85ac3cd454..6a82a07f8f 100644
--- a/modules/self-managed-node-group/main.tf
+++ b/modules/self-managed-node-group/main.tf
@@ -1,3 +1,5 @@
+data "aws_partition" "current" {}
+
################################################################################
# Launch template
################################################################################
@@ -104,12 +106,8 @@ resource "aws_launch_template" "this" {
}
}
- dynamic "iam_instance_profile" {
- for_each = var.iam_instance_profile_name != null || var.iam_instance_profile_arn != null ? [1] : []
- content {
- name = var.iam_instance_profile_name
- arn = var.iam_instance_profile_arn
- }
+ iam_instance_profile {
+ arn = var.create_iam_instance_profile ? aws_iam_role.this[0].arn : var.iam_instance_profile_arn
}
dynamic "instance_market_options" {
@@ -382,16 +380,133 @@ resource "aws_autoscaling_schedule" "this" {
recurrence = lookup(each.value, "recurrence", null)
}
+################################################################################
+# Security Group
+# Defaults follow https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html
+################################################################################
+
+locals {
+ security_group_name = coalesce(var.security_group_name, "${var.name}-worker")
+ create_security_group = var.create && var.create_security_group
+}
+
+resource "aws_security_group" "this" {
+ count = local.create_security_group ? 1 : 0
+
+ name = var.security_group_use_name_prefix ? null : local.security_group_name
+ name_prefix = var.security_group_use_name_prefix ? "${local.security_group_name}-" : null
+ description = var.security_group_description
+ vpc_id = var.vpc_id
+
+ tags = merge(
+ var.tags,
+ {
+ "Name" = local.security_group_name
+ "kubernetes.io/cluster/${var.cluster_name}" = "owned"
+ },
+ var.security_group_tags
+ )
+}
+
+# Ingress
+resource "aws_security_group_rule" "cluster_https_ingress" {
+ count = local.create_security_group ? 1 : 0
+
+ description = "Allow communication from cluster control plane on 443/HTTPS"
+ protocol = "tcp"
+ security_group_id = aws_security_group.this[0].id
+ source_security_group_id = var.cluster_security_group_id
+ from_port = 443
+ to_port = 443
+ type = "ingress"
+}
+
+resource "aws_security_group_rule" "cluster_kubelet_ingress" {
+ count = local.create_security_group ? 1 : 0
+
+ description = "Allow communication from the cluster control plane to kubelet"
+ protocol = "tcp"
+ security_group_id = aws_security_group.this[0].id
+ source_security_group_id = var.cluster_security_group_id
+ from_port = 10250
+ to_port = 10250
+ type = "ingress"
+}
+
+resource "aws_security_group_rule" "cluster_coredns_tcp_ingress" {
+ count = local.create_security_group ? 1 : 0
+
+ description = "Allow communication from cluster control plane on 53/TCP for CoreDNS"
+ protocol = "tcp"
+ security_group_id = aws_security_group.this[0].id
+ source_security_group_id = var.cluster_security_group_id
+ from_port = 53
+ to_port = 53
+ type = "ingress"
+}
+
+resource "aws_security_group_rule" "cluster_coredns_udp_ingress" {
+ count = local.create_security_group ? 1 : 0
+
+ description = "Allow communication from cluster control plane on 53/UDP for CoreDNS"
+ protocol = "udp"
+ security_group_id = aws_security_group.this[0].id
+ source_security_group_id = var.cluster_security_group_id
+ from_port = 53
+ to_port = 53
+ type = "ingress"
+}
+
+resource "aws_security_group_rule" "cluster_ephemeral_ports_ingress" {
+ count = local.create_security_group ? 1 : 0
+
+ description = "Allow communication from the cluster control plane on Linux ephemeral ports"
+ protocol = "tcp"
+ security_group_id = aws_security_group.this[0].id
+ source_security_group_id = var.cluster_security_group_id
+ from_port = 1025 # TODO - consider putting back as variable, probably due to windows
+ to_port = 65535
+ type = "ingress"
+}
+
+# TODO - move to separate security group in root that all node groups will get assigned
+resource "aws_security_group_rule" "ingress_self" {
+ count = local.create_security_group ? 1 : 0
+
+ description = "Allow node to communicate with each other"
+ protocol = "-1"
+ security_group_id = aws_security_group.this[0].id
+ self = true
+ from_port = 0
+ to_port = 65535
+ type = "ingress"
+}
+
+# Egress
+resource "aws_security_group_rule" "worker_egress_all" {
+ count = local.create_security_group ? 1 : 0
+
+ description = "Allow egress to all ports/protocols"
+ protocol = "-1"
+ security_group_id = aws_security_group.this[0].id
+ cidr_blocks = var.security_group_egress_cidr_blocks
+ from_port = 0
+ to_port = 65535
+ type = "egress"
+}
+
################################################################################
# IAM Role
################################################################################
locals {
iam_role_name = coalesce(var.iam_role_name, "${var.cluster_name}-worker")
+
+ iam_role_policy_prefix = "arn:${data.aws_partition.current.partition}:iam::aws:policy"
}
data "aws_iam_policy_document" "assume_role_policy" {
- count = var.create && var.create_iam_role ? 1 : 0
+ count = var.create && var.create_iam_instance_profile ? 1 : 0
statement {
sid = "EKSWorkerAssumeRole"
@@ -405,7 +520,7 @@ data "aws_iam_policy_document" "assume_role_policy" {
}
resource "aws_iam_role" "this" {
- count = var.create && var.create_iam_role ? 1 : 0
+ count = var.create && var.create_iam_instance_profile ? 1 : 0
name = var.iam_role_use_name_prefix ? null : local.iam_role_name
name_prefix = var.iam_role_use_name_prefix ? "${local.iam_role_name}-" : null
@@ -419,12 +534,28 @@ resource "aws_iam_role" "this" {
}
resource "aws_iam_role_policy_attachment" "this" {
- for_each = var.create && var.create_iam_role ? toset(compact(distinct(concat([
- "${local.policy_arn_prefix}/AmazonEKSWorkerNodePolicy",
- "${local.policy_arn_prefix}/AmazonEC2ContainerRegistryReadOnly",
- var.iam_role_attach_cni_policy ? "${local.policy_arn_prefix}/AmazonEKS_CNI_Policy" : "",
+ for_each = var.create && var.create_iam_instance_profile ? toset(compact(distinct(concat([
+ "${local.iam_role_policy_prefix}/AmazonEKSWorkerNodePolicy",
+ "${local.iam_role_policy_prefix}/AmazonEC2ContainerRegistryReadOnly",
+ var.iam_role_attach_cni_policy ? "${local.iam_role_policy_prefix}/AmazonEKS_CNI_Policy" : "",
], var.iam_role_additional_policies)))) : toset([])
policy_arn = each.value
role = aws_iam_role.this[0].name
}
+
+resource "aws_iam_instance_profile" "this" {
+ count = var.create && var.create_iam_instance_profile ? 1 : 0
+
+ role = aws_iam_role.this[0].name
+
+ name = var.iam_role_use_name_prefix ? null : local.iam_role_name
+ name_prefix = var.iam_role_use_name_prefix ? "${local.iam_role_name}-" : null
+ path = var.iam_role_path
+
+ lifecycle {
+ create_before_destroy = true
+ }
+
+ tags = merge(var.tags, var.iam_role_tags)
+}
diff --git a/modules/self-managed-node-group/variables.tf b/modules/self-managed-node-group/variables.tf
index 4c2b4a34c0..f78510e301 100644
--- a/modules/self-managed-node-group/variables.tf
+++ b/modules/self-managed-node-group/variables.tf
@@ -322,12 +322,6 @@ variable "hibernation_options" {
default = null
}
-variable "iam_instance_profile_arn" {
- description = "The IAM Instance Profile ARN to launch the instance with"
- type = string
- default = null
-}
-
variable "instance_market_options" {
description = "The market (purchasing) option for the instance"
type = any
@@ -429,13 +423,64 @@ variable "schedules" {
default = {}
}
+################################################################################
+# Worker Security Group
+################################################################################
+
+variable "create_security_group" {
+ description = "Whether to create a security group"
+ type = bool
+ default = true
+}
+
+variable "security_group_name" {
+ description = "Name to use on security group created"
+ type = string
+ default = null
+}
+
+variable "security_group_use_name_prefix" {
+ description = "Determines whether the security group name (`security_group_name`) is used as a prefix"
+ type = string
+ default = true
+}
+
+variable "security_group_description" {
+ description = "Description for the security group"
+ type = string
+ default = "EKS worker security group"
+}
+
+variable "vpc_id" {
+ description = "ID of the VPC where the security group/nodes will be provisioned"
+ type = string
+ default = null
+}
+
+variable "security_group_egress_cidr_blocks" {
+ description = "List of CIDR blocks that are permitted for security group egress traffic"
+ type = list(string)
+ default = ["0.0.0.0/0"]
+}
+
+variable "cluster_security_group_id" {
+ description = "Cluster control plain security group ID"
+ type = string
+ default = null
+}
+
+variable "security_group_tags" {
+ description = "A map of additional tags to add to the security group created"
+ type = map(string)
+ default = {}
+}
################################################################################
# IAM Role
################################################################################
-variable "create_iam_role" {
- description = "Determines whether an IAM role is created or to use an existing IAM role"
+variable "create_iam_instance_profile" {
+ description = "Determines whether an IAM instance profile is created or to use an existing IAM instance profile"
type = bool
default = true
}
@@ -446,8 +491,14 @@ variable "iam_role_name" {
default = null
}
+variable "iam_instance_profile_arn" {
+ description = "The IAM Instance Profile ARN to launch the instance with"
+ type = string
+ default = null
+}
+
variable "iam_role_use_name_prefix" {
- description = "Determines whether the IAM role name (`iam_role_name`) is used as a prefix"
+ description = "Amazon Resource Name (ARN) of an existing IAM instance profile that provides permissions for the EKS Node Group"
type = string
default = true
}
diff --git a/variables.tf b/variables.tf
index a25306bb1b..50aba7015e 100644
--- a/variables.tf
+++ b/variables.tf
@@ -142,34 +142,6 @@ variable "cluster_security_group_tags" {
default = {}
}
-################################################################################
-# Worker Security Group
-################################################################################
-
-variable "create_worker_security_group" {
- description = "Whether to create a security group for the worker nodes"
- type = bool
- default = true
-}
-
-variable "worker_security_group_name" {
- description = "Name to use on worker role created"
- type = string
- default = null
-}
-
-variable "worker_security_group_use_name_prefix" {
- description = "Determines whether the worker security group name (`worker_security_group_name`) is used as a prefix"
- type = string
- default = true
-}
-
-variable "worker_security_group_tags" {
- description = "A map of additional tags to add to the worker security group created"
- type = map(string)
- default = {}
-}
-
################################################################################
# IRSA
################################################################################
@@ -298,8 +270,6 @@ variable "eks_managed_node_groups" {
default = {}
}
-
-
variable "worker_security_group_id" {
description = "If provided, all workers will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the EKS cluster"
type = string
diff --git a/workers.tf b/workers.tf
index addb504f4e..8024fe581c 100644
--- a/workers.tf
+++ b/workers.tf
@@ -198,8 +198,6 @@ module "self_managed_node_group" {
elastic_inference_accelerator = try(each.value.elastic_inference_accelerator, null)
enclave_options = try(each.value.enclave_options, null)
hibernation_options = try(each.value.hibernation_options, null)
- iam_instance_profile_name = try(each.value.iam_instance_profile_name, null)
- iam_instance_profile_arn = try(each.value.iam_instance_profile_arn, null)
instance_market_options = try(each.value.instance_market_options, null)
license_specifications = try(each.value.license_specifications, null)
metadata_options = try(each.value.metadata_options, null)
@@ -209,8 +207,8 @@ module "self_managed_node_group" {
tag_specifications = try(each.value.tag_specifications, [])
# IAM role
- create_iam_role = try(each.value.create_iam_role, true)
- iam_role_arn = try(each.value.iam_role_arn, null)
+ create_iam_instance_profile = try(each.value.create_iam_instance_profile, true)
+ iam_instance_profile_arn = try(each.value.iam_instance_profile_arn, null)
iam_role_name = try(each.value.iam_role_name, null)
iam_role_use_name_prefix = try(each.value.iam_role_use_name_prefix, true)
iam_role_path = try(each.value.iam_role_path, null)
From 70cab09b1fe47acf66e752e0ca0f7db70b9e2720 Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Fri, 12 Nov 2021 09:26:17 -0500
Subject: [PATCH 19/83] chore: clean up root and align EKS and self managed
node group sub-modules
---
README.md | 28 ----
data.tf | 16 ---
locals.tf | 38 -----
main.tf | 41 ++++--
modules/eks-managed-node-group/README.md | 17 ++-
modules/eks-managed-node-group/main.tf | 139 +++++++++++++++++--
modules/eks-managed-node-group/variables.tf | 58 +++++++-
modules/self-managed-node-group/README.md | 4 +-
modules/self-managed-node-group/main.tf | 19 ++-
modules/self-managed-node-group/variables.tf | 10 +-
outputs.tf | 60 --------
variables.tf | 78 -----------
workers.tf | 139 +++----------------
13 files changed, 277 insertions(+), 370 deletions(-)
delete mode 100644 data.tf
delete mode 100644 locals.tf
diff --git a/README.md b/README.md
index ee234eb601..b044b18dea 100644
--- a/README.md
+++ b/README.md
@@ -145,20 +145,9 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
| [aws_iam_policy.cluster_additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
| [aws_iam_role.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
| [aws_security_group.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
-| [aws_security_group.worker](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
| [aws_security_group_rule.cluster_egress_internet](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.cluster_https_worker_ingress](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.cluster_primary_ingress_worker](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
| [aws_security_group_rule.cluster_private_access_cidrs_source](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
| [aws_security_group_rule.cluster_private_access_sg_source](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.worker_egress_internet](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.worker_ingress_cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.worker_ingress_cluster_https](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.worker_ingress_cluster_kubelet](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.worker_ingress_cluster_primary](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.worker_ingress_self](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_ami.eks_worker](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source |
-| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source |
| [aws_iam_policy_document.cluster_additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
| [aws_iam_policy_document.cluster_assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
| [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source |
@@ -168,14 +157,9 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
| Name | Description | Type | Default | Required |
|------|-------------|------|---------|:--------:|
-| [attach\_worker\_cni\_policy](#input\_attach\_worker\_cni\_policy) | Whether to attach the Amazon managed `AmazonEKS_CNI_Policy` IAM policy to the default worker IAM role. WARNING: If set `false` the permissions must be assigned to the `aws-node` DaemonSet pods via another method or nodes will not be able to join the cluster | `bool` | `true` | no |
-| [cluster\_create\_endpoint\_private\_access\_sg\_rule](#input\_cluster\_create\_endpoint\_private\_access\_sg\_rule) | Whether to create security group rules for the access to the Amazon EKS private API server endpoint. When is `true`, `cluster_endpoint_private_access_cidrs` must be setted | `bool` | `false` | no |
-| [cluster\_egress\_cidrs](#input\_cluster\_egress\_cidrs) | List of CIDR blocks that are permitted for cluster egress traffic | `list(string)` |
[ "0.0.0.0/0" ]
| no |
| [cluster\_enabled\_log\_types](#input\_cluster\_enabled\_log\_types) | A list of the desired control plane logging to enable. For more information, see Amazon EKS Control Plane Logging documentation (https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html) | `list(string)` | `[]` | no |
| [cluster\_encryption\_config](#input\_cluster\_encryption\_config) | Configuration block with encryption configuration for the cluster. See examples/secrets\_encryption/main.tf for example format |
| `[]` | no |
| [cluster\_endpoint\_private\_access](#input\_cluster\_endpoint\_private\_access) | Indicates whether or not the Amazon EKS private API server endpoint is enabled | `bool` | `false` | no |
-| [cluster\_endpoint\_private\_access\_cidrs](#input\_cluster\_endpoint\_private\_access\_cidrs) | List of CIDR blocks which can access the Amazon EKS private API server endpoint. To use this `cluster_endpoint_private_access` and `cluster_create_endpoint_private_access_sg_rule` must be set to `true` | `list(string)` | `null` | no |
-| [cluster\_endpoint\_private\_access\_sg](#input\_cluster\_endpoint\_private\_access\_sg) | List of security group IDs which can access the Amazon EKS private API server endpoint. To use this `cluster_endpoint_private_access` and `cluster_create_endpoint_private_access_sg_rule` must be set to `true` | `list(string)` | `null` | no |
| [cluster\_endpoint\_public\_access](#input\_cluster\_endpoint\_public\_access) | Indicates whether or not the Amazon EKS public API server endpoint is enabled. When it's set to `false` ensure to have a proper private access with `cluster_endpoint_private_access = true` | `bool` | `true` | no |
| [cluster\_endpoint\_public\_access\_cidrs](#input\_cluster\_endpoint\_public\_access\_cidrs) | List of CIDR blocks which can access the Amazon EKS public API server endpoint | `list(string)` |
[ "0.0.0.0/0" ]
| no |
| [cluster\_iam\_role\_arn](#input\_cluster\_iam\_role\_arn) | Existing IAM role ARN for the cluster. Required if `create_cluster_iam_role` is set to `false` | `string` | `null` | no |
@@ -213,13 +197,6 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
| [subnet\_ids](#input\_subnet\_ids) | A list of subnet IDs to place the EKS cluster and workers within | `list(string)` | `[]` | no |
| [tags](#input\_tags) | A map of tags to add to all resources. Tags added to launch configuration or templates override these values for ASG Tags only | `map(string)` | `{}` | no |
| [vpc\_id](#input\_vpc\_id) | ID of the VPC where the cluster and workers will be provisioned | `string` | `null` | no |
-| [worker\_additional\_policies](#input\_worker\_additional\_policies) | Additional policies to be added to workers | `list(string)` | `[]` | no |
-| [worker\_ami\_name\_filter](#input\_worker\_ami\_name\_filter) | Name filter for AWS EKS worker AMI. If not provided, the latest official AMI for the specified 'cluster\_version' is used | `string` | `""` | no |
-| [worker\_ami\_owner\_id](#input\_worker\_ami\_owner\_id) | The ID of the owner for the AMI to use for the AWS EKS workers. Valid values are an AWS account ID, 'self' (the current account), or an AWS owner alias (e.g. 'amazon', 'aws-marketplace', 'microsoft') | `string` | `"amazon"` | no |
-| [worker\_create\_cluster\_primary\_security\_group\_rules](#input\_worker\_create\_cluster\_primary\_security\_group\_rules) | Whether to create security group rules to allow communication between pods on workers and pods using the primary cluster security group | `bool` | `false` | no |
-| [worker\_egress\_cidrs](#input\_worker\_egress\_cidrs) | List of CIDR blocks that are permitted for workers egress traffic | `list(string)` |
[ "0.0.0.0/0" ]
| no |
-| [worker\_security\_group\_id](#input\_worker\_security\_group\_id) | If provided, all workers will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the EKS cluster | `string` | `""` | no |
-| [worker\_sg\_ingress\_from\_port](#input\_worker\_sg\_ingress\_from\_port) | Minimum port number from which pods will accept communication. Must be changed to a lower value if some pods in your cluster will expose a port lower than 1025 (e.g. 22, 80, or 443) | `number` | `1025` | no |
## Outputs
@@ -243,9 +220,4 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
| [fargate\_profile\_ids](#output\_fargate\_profile\_ids) | EKS Cluster name and EKS Fargate Profile names separated by a colon (:). |
| [oidc\_provider\_arn](#output\_oidc\_provider\_arn) | The ARN of the OIDC Provider if `enable_irsa = true`. |
| [security\_group\_rule\_cluster\_https\_worker\_ingress](#output\_security\_group\_rule\_cluster\_https\_worker\_ingress) | Security group rule responsible for allowing pods to communicate with the EKS cluster API. |
-| [worker\_iam\_instance\_profile\_arns](#output\_worker\_iam\_instance\_profile\_arns) | default IAM instance profile ARN for EKS worker groups |
-| [worker\_iam\_instance\_profile\_names](#output\_worker\_iam\_instance\_profile\_names) | default IAM instance profile name for EKS worker groups |
-| [worker\_iam\_role\_arn](#output\_worker\_iam\_role\_arn) | default IAM role ARN for EKS worker groups |
-| [worker\_iam\_role\_name](#output\_worker\_iam\_role\_name) | default IAM role name for EKS worker groups |
-| [worker\_security\_group\_id](#output\_worker\_security\_group\_id) | Security group ID attached to the EKS workers. |
diff --git a/data.tf b/data.tf
deleted file mode 100644
index 7995e85b90..0000000000
--- a/data.tf
+++ /dev/null
@@ -1,16 +0,0 @@
-data "aws_partition" "current" {}
-
-data "aws_caller_identity" "current" {}
-
-data "aws_ami" "eks_worker" {
- count = var.create ? 1 : 0
-
- filter {
- name = "name"
- values = [coalesce(var.worker_ami_name_filter, "amazon-eks-node-${coalesce(var.cluster_version, "cluster_version")}-v*")]
- }
-
- most_recent = true
-
- owners = [var.worker_ami_owner_id]
-}
diff --git a/locals.tf b/locals.tf
deleted file mode 100644
index f8a466b2a3..0000000000
--- a/locals.tf
+++ /dev/null
@@ -1,38 +0,0 @@
-locals {
-
- # EKS Cluster
- cluster_id = try(aws_eks_cluster.this[0].id, "")
- cluster_arn = try(aws_eks_cluster.this[0].arn, "")
- cluster_endpoint = try(aws_eks_cluster.this[0].endpoint, "")
- cluster_auth_base64 = try(aws_eks_cluster.this[0].certificate_authority[0].data, "")
- cluster_primary_security_group_id = try(aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id, "")
-
- cluster_security_group_id = var.create_cluster_security_group ? join("", aws_security_group.cluster.*.id) : var.cluster_security_group_id
-
- # Worker groups
- worker_security_group_id = var.create_worker_security_group ? join("", aws_security_group.worker.*.id) : var.worker_security_group_id
- policy_arn_prefix = "arn:${data.aws_partition.current.partition}:iam::aws:policy"
-
- # launch_template_userdata_rendered = var.create ? [
- # for key, group in var.worker_groups : templatefile(
- # try(
- # group.userdata_template_file,
- # lookup(group, "platform", var.default_platform) == "windows"
- # ? "${path.module}/templates/userdata_windows.tpl"
- # : "${path.module}/templates/userdata.sh.tpl"
- # ),
- # merge({
- # platform = lookup(group, "platform", var.default_platform)
- # cluster_name = var.cluster_name
- # endpoint = local.cluster_endpoint
- # cluster_auth_base64 = local.cluster_auth_base64
- # pre_userdata = lookup(group, "pre_userdata", "")
- # additional_userdata = lookup(group, "additional_userdata", "")
- # bootstrap_extra_args = lookup(group, "bootstrap_extra_args", "")
- # kubelet_extra_args = lookup(group, "kubelet_extra_args", "")
- # },
- # lookup(group, "userdata_template_extra_args", "")
- # )
- # )
- # ] : []
-}
diff --git a/main.tf b/main.tf
index 2554066f02..352e20b318 100644
--- a/main.tf
+++ b/main.tf
@@ -1,3 +1,20 @@
+locals {
+
+ # EKS Cluster
+ cluster_id = try(aws_eks_cluster.this[0].id, "")
+ cluster_arn = try(aws_eks_cluster.this[0].arn, "")
+ cluster_endpoint = try(aws_eks_cluster.this[0].endpoint, "")
+ cluster_auth_base64 = try(aws_eks_cluster.this[0].certificate_authority[0].data, "")
+ cluster_primary_security_group_id = try(aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id, "")
+
+ cluster_security_group_id = var.create_cluster_security_group ? join("", aws_security_group.cluster.*.id) : var.cluster_security_group_id
+
+ # Worker groups
+ policy_arn_prefix = "arn:${data.aws_partition.current.partition}:iam::aws:policy"
+}
+
+data "aws_partition" "current" {}
+
################################################################################
# Cluster
################################################################################
@@ -75,7 +92,7 @@ resource "aws_security_group" "cluster" {
count = local.create_cluster_sg ? 1 : 0
name = var.cluster_security_group_use_name_prefix ? null : local.cluster_sg_name
- name_prefix = var.cluster_security_group_use_name_prefix ? try("${local.cluster_sg_name}-", local.cluster_sg_name) : null
+ name_prefix = var.cluster_security_group_use_name_prefix ? "${local.cluster_sg_name}-" : null
description = "EKS cluster security group"
vpc_id = var.vpc_id
@@ -93,24 +110,24 @@ resource "aws_security_group_rule" "cluster_egress_internet" {
description = "Allow cluster egress access to the Internet"
protocol = "-1"
- security_group_id = local.cluster_security_group_id
+ security_group_id = aws_security_group.this[0].id
cidr_blocks = var.cluster_egress_cidrs
from_port = 0
to_port = 0
type = "egress"
}
-resource "aws_security_group_rule" "cluster_https_worker_ingress" {
- count = local.create_cluster_sg && var.create_worker_security_group ? 1 : 0
+# resource "aws_security_group_rule" "cluster_https_worker_ingress" {
+# count = local.create_cluster_sg && var.create_worker_security_group ? 1 : 0
- description = "Allow pods to communicate with the EKS cluster API"
- protocol = "tcp"
- security_group_id = local.cluster_security_group_id
- source_security_group_id = local.worker_security_group_id
- from_port = 443
- to_port = 443
- type = "ingress"
-}
+# description = "Allow pods to communicate with the EKS cluster API"
+# protocol = "tcp"
+# security_group_id = aws_security_group.this[0].id
+# source_security_group_id = local.worker_security_group_id # TODO - what a circle, oy
+# from_port = 443
+# to_port = 443
+# type = "ingress"
+# }
resource "aws_security_group_rule" "cluster_private_access_cidrs_source" {
for_each = local.enable_cluster_private_endpoint_sg_access && var.cluster_endpoint_private_access_cidrs != null ? toset(var.cluster_endpoint_private_access_cidrs) : []
diff --git a/modules/eks-managed-node-group/README.md b/modules/eks-managed-node-group/README.md
index bd3428d1bd..64f1266d40 100644
--- a/modules/eks-managed-node-group/README.md
+++ b/modules/eks-managed-node-group/README.md
@@ -116,6 +116,14 @@ No modules.
| [aws_iam_role.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
| [aws_iam_role_policy_attachment.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
| [aws_launch_template.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource |
+| [aws_security_group.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
+| [aws_security_group_rule.cluster_coredns_tcp_ingress](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
+| [aws_security_group_rule.cluster_coredns_udp_ingress](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
+| [aws_security_group_rule.cluster_ephemeral_ports_ingress](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
+| [aws_security_group_rule.cluster_https_ingress](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
+| [aws_security_group_rule.cluster_kubelet_ingress](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
+| [aws_security_group_rule.ingress_self](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
+| [aws_security_group_rule.worker_egress_all](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
| [aws_iam_policy_document.assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
| [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source |
| [cloudinit_config.eks_optimized_ami_user_data](https://registry.terraform.io/providers/hashicorp/cloudinit/latest/docs/data-sources/config) | data source |
@@ -135,11 +143,13 @@ No modules.
| [cluster\_dns\_ip](#input\_cluster\_dns\_ip) | The CIDR block that the EKS cluster provides service IP addresses from | `string` | `""` | no |
| [cluster\_endpoint](#input\_cluster\_endpoint) | Endpoint of associated EKS cluster | `string` | `null` | no |
| [cluster\_name](#input\_cluster\_name) | Name of associated EKS cluster | `string` | `null` | no |
+| [cluster\_security\_group\_id](#input\_cluster\_security\_group\_id) | Cluster control plain security group ID | `string` | `null` | no |
| [cluster\_version](#input\_cluster\_version) | Kubernetes version. Defaults to EKS Cluster Kubernetes version | `string` | `null` | no |
| [cpu\_options](#input\_cpu\_options) | The CPU options for the instance | `map(string)` | `null` | no |
| [create](#input\_create) | Determines whether to create EKS managed node group or not | `bool` | `true` | no |
| [create\_iam\_role](#input\_create\_iam\_role) | Determines whether an IAM role is created or to use an existing IAM role | `bool` | `true` | no |
| [create\_launch\_template](#input\_create\_launch\_template) | Determines whether to create launch template or not | `bool` | `false` | no |
+| [create\_security\_group](#input\_create\_security\_group) | Whether to create a security group | `bool` | `true` | no |
| [credit\_specification](#input\_credit\_specification) | Customize the credit specification of the instance | `map(string)` | `null` | no |
| [custom\_ami\_is\_eks\_optimized](#input\_custom\_ami\_is\_eks\_optimized) | Determines whether the custom AMI ID provided (`ami_id`) is an EKS optimized AMI derivative or not | `bool` | `true` | no |
| [custom\_user\_data](#input\_custom\_user\_data) | Base64-encoded user data used; should be used when `custom_ami_is_eks_optimized` = `false` to boostrap and join instances to the cluster | `string` | `null` | no |
@@ -157,7 +167,6 @@ No modules.
| [hibernation\_options](#input\_hibernation\_options) | The hibernation options for the instance | `map(string)` | `null` | no |
| [iam\_role\_additional\_policies](#input\_iam\_role\_additional\_policies) | Additional policies to be added to the IAM role | `list(string)` | `[]` | no |
| [iam\_role\_arn](#input\_iam\_role\_arn) | Amazon Resource Name (ARN) of the IAM Role that provides permissions for the EKS Node Group | `string` | `null` | no |
-| [iam\_role\_attach\_cni\_policy](#input\_iam\_role\_attach\_cni\_policy) | Whether to attach the Amazon managed `AmazonEKS_CNI_Policy` IAM policy to the IAM IAM role. WARNING: If set `false` the permissions must be assigned to the `aws-node` DaemonSet pods via another method or nodes will not be able to join the cluster | `bool` | `true` | no |
| [iam\_role\_name](#input\_iam\_role\_name) | Name to use on IAM role created | `string` | `null` | no |
| [iam\_role\_path](#input\_iam\_role\_path) | IAM role path | `string` | `null` | no |
| [iam\_role\_permissions\_boundary](#input\_iam\_role\_permissions\_boundary) | ARN of the policy that is used to set the permissions boundary for the IAM role | `string` | `null` | no |
@@ -185,6 +194,11 @@ No modules.
| [pre\_bootstrap\_user\_data](#input\_pre\_bootstrap\_user\_data) | User data that is injected into the user data script ahead of the EKS bootstrap script | `string` | `""` | no |
| [ram\_disk\_id](#input\_ram\_disk\_id) | The ID of the ram disk | `string` | `null` | no |
| [remote\_access](#input\_remote\_access) | Configuration block with remote access settings | `map(string)` | `null` | no |
+| [security\_group\_description](#input\_security\_group\_description) | Description for the security group | `string` | `"EKS worker security group"` | no |
+| [security\_group\_egress\_cidr\_blocks](#input\_security\_group\_egress\_cidr\_blocks) | List of CIDR blocks that are permitted for security group egress traffic | `list(string)` |
[ "0.0.0.0/0" ]
| no |
+| [security\_group\_name](#input\_security\_group\_name) | Name to use on security group created | `string` | `null` | no |
+| [security\_group\_tags](#input\_security\_group\_tags) | A map of additional tags to add to the security group created | `map(string)` | `{}` | no |
+| [security\_group\_use\_name\_prefix](#input\_security\_group\_use\_name\_prefix) | Determines whether the security group name (`security_group_name`) is used as a prefix | `string` | `true` | no |
| [subnet\_ids](#input\_subnet\_ids) | Identifiers of EC2 Subnets to associate with the EKS Node Group. These subnets must have the following resource tag: `kubernetes.io/cluster/CLUSTER_NAME` | `list(string)` | `null` | no |
| [tag\_specifications](#input\_tag\_specifications) | The tags to apply to the resources during launch | `list(any)` | `[]` | no |
| [tags](#input\_tags) | A map of tags to add to all resources | `map(string)` | `{}` | no |
@@ -193,6 +207,7 @@ No modules.
| [update\_config](#input\_update\_config) | Configuration block of settings for max unavailable resources during node group updates | `map(string)` | `null` | no |
| [update\_default\_version](#input\_update\_default\_version) | Whether to update Default Version each update. Conflicts with `default_version` | `bool` | `true` | no |
| [use\_name\_prefix](#input\_use\_name\_prefix) | Determines whether to use `name` as is or create a unique name beginning with the `name` as the prefix | `bool` | `true` | no |
+| [vpc\_id](#input\_vpc\_id) | ID of the VPC where the security group/nodes will be provisioned | `string` | `null` | no |
| [vpc\_security\_group\_ids](#input\_vpc\_security\_group\_ids) | A list of security group IDs to associate | `list(string)` | `null` | no |
## Outputs
diff --git a/modules/eks-managed-node-group/main.tf b/modules/eks-managed-node-group/main.tf
index 49a4a9c98e..be29377b78 100644
--- a/modules/eks-managed-node-group/main.tf
+++ b/modules/eks-managed-node-group/main.tf
@@ -1,6 +1,6 @@
locals {
use_custom_launch_template = var.create_launch_template || var.launch_template_name != null
- policy_arn_prefix = "arn:${data.aws_partition.current.partition}:iam::aws:policy"
+
}
data "aws_partition" "current" {}
@@ -65,7 +65,7 @@ resource "aws_launch_template" "this" {
name = var.launch_template_use_name_prefix ? null : var.launch_template_name
name_prefix = var.launch_template_use_name_prefix ? "${var.launch_template_name}-" : null
- description = coalesce(var.description, "EKS Managed Node Group custom LT for ${var.name}")
+ description = coalesce(var.description, "Custom launch template for ${var.name} EKS managed node group")
ebs_optimized = var.ebs_optimized
image_id = var.ami_id
@@ -264,6 +264,12 @@ resource "aws_launch_template" "this" {
# EKS Managed Node Group
################################################################################
+locals {
+ launch_template_name = try(aws_launch_template.this[0].name, var.launch_template_name)
+ # Change order to allow users to set version priority before using defaults
+ launch_template_version = coalesce(var.launch_template_version, try(aws_launch_template.this[0].default_version, "$Default"))
+}
+
resource "aws_eks_node_group" "this" {
count = var.create ? 1 : 0
@@ -294,9 +300,8 @@ resource "aws_eks_node_group" "this" {
dynamic "launch_template" {
for_each = local.use_custom_launch_template ? [1] : []
content {
- name = try(aws_launch_template.this[0].name, var.launch_template_name)
- # Change order to allow users to set version priority before using defaults
- version = coalesce(var.launch_template_version, try(aws_launch_template.this[0].default_version, "$Default"))
+ name = local.launch_template_name
+ version = local.launch_template_version
}
}
@@ -349,19 +354,136 @@ resource "aws_eks_node_group" "this" {
)
}
+
+################################################################################
+# Security Group
+# Defaults follow https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html
+################################################################################
+
+locals {
+ security_group_name = coalesce(var.security_group_name, "${var.name}-worker")
+ create_security_group = var.create && var.create_security_group
+}
+
+resource "aws_security_group" "this" {
+ count = local.create_security_group ? 1 : 0
+
+ name = var.security_group_use_name_prefix ? null : local.security_group_name
+ name_prefix = var.security_group_use_name_prefix ? "${local.security_group_name}-" : null
+ description = var.security_group_description
+ vpc_id = var.vpc_id
+
+ tags = merge(
+ var.tags,
+ {
+ "Name" = local.security_group_name
+ "kubernetes.io/cluster/${var.cluster_name}" = "owned"
+ },
+ var.security_group_tags
+ )
+}
+
+# Ingress
+resource "aws_security_group_rule" "cluster_https_ingress" {
+ count = local.create_security_group ? 1 : 0
+
+ description = "Allow communication from cluster control plane on 443/HTTPS"
+ protocol = "tcp"
+ security_group_id = aws_security_group.this[0].id
+ source_security_group_id = var.cluster_security_group_id
+ from_port = 443
+ to_port = 443
+ type = "ingress"
+}
+
+resource "aws_security_group_rule" "cluster_kubelet_ingress" {
+ count = local.create_security_group ? 1 : 0
+
+ description = "Allow communication from the cluster control plane to kubelet"
+ protocol = "tcp"
+ security_group_id = aws_security_group.this[0].id
+ source_security_group_id = var.cluster_security_group_id
+ from_port = 10250
+ to_port = 10250
+ type = "ingress"
+}
+
+resource "aws_security_group_rule" "cluster_coredns_tcp_ingress" {
+ count = local.create_security_group ? 1 : 0
+
+ description = "Allow communication from cluster control plane on 53/TCP for CoreDNS"
+ protocol = "tcp"
+ security_group_id = aws_security_group.this[0].id
+ source_security_group_id = var.cluster_security_group_id
+ from_port = 53
+ to_port = 53
+ type = "ingress"
+}
+
+resource "aws_security_group_rule" "cluster_coredns_udp_ingress" {
+ count = local.create_security_group ? 1 : 0
+
+ description = "Allow communication from cluster control plane on 53/UDP for CoreDNS"
+ protocol = "udp"
+ security_group_id = aws_security_group.this[0].id
+ source_security_group_id = var.cluster_security_group_id
+ from_port = 53
+ to_port = 53
+ type = "ingress"
+}
+
+resource "aws_security_group_rule" "cluster_ephemeral_ports_ingress" {
+ count = local.create_security_group ? 1 : 0
+
+ description = "Allow communication from the cluster control plane on Linux ephemeral ports"
+ protocol = "tcp"
+ security_group_id = aws_security_group.this[0].id
+ source_security_group_id = var.cluster_security_group_id
+ from_port = 1025
+ to_port = 65535
+ type = "ingress"
+}
+
+# TODO - move to separate security group in root that all node groups will get assigned
+resource "aws_security_group_rule" "ingress_self" {
+ count = local.create_security_group ? 1 : 0
+
+ description = "Allow node to communicate with each other"
+ protocol = "-1"
+ security_group_id = aws_security_group.this[0].id
+ self = true
+ from_port = 0
+ to_port = 65535
+ type = "ingress"
+}
+
+# Egress
+resource "aws_security_group_rule" "worker_egress_all" {
+ count = local.create_security_group ? 1 : 0
+
+ description = "Allow egress to all ports/protocols"
+ protocol = "-1"
+ security_group_id = aws_security_group.this[0].id
+ cidr_blocks = var.security_group_egress_cidr_blocks
+ from_port = 0
+ to_port = 65535
+ type = "egress"
+}
+
################################################################################
# IAM Role
################################################################################
locals {
- iam_role_name = coalesce(var.iam_role_name, "${var.cluster_name}-worker")
+ iam_role_name = coalesce(var.iam_role_name, "${var.cluster_name}-worker")
+ policy_arn_prefix = "arn:${data.aws_partition.current.partition}:iam::aws:policy"
}
data "aws_iam_policy_document" "assume_role_policy" {
count = var.create && var.create_iam_role ? 1 : 0
statement {
- sid = "EKSWorkerAssumeRole"
+ sid = "EKSNodeAssumeRole"
actions = ["sts:AssumeRole"]
principals {
@@ -385,11 +507,12 @@ resource "aws_iam_role" "this" {
tags = merge(var.tags, var.iam_role_tags)
}
+# Policies attached ref https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_node_group
resource "aws_iam_role_policy_attachment" "this" {
for_each = var.create && var.create_iam_role ? toset(compact(distinct(concat([
"${local.policy_arn_prefix}/AmazonEKSWorkerNodePolicy",
"${local.policy_arn_prefix}/AmazonEC2ContainerRegistryReadOnly",
- var.iam_role_attach_cni_policy ? "${local.policy_arn_prefix}/AmazonEKS_CNI_Policy" : "",
+ "${local.policy_arn_prefix}/AmazonEKS_CNI_Policy",
], var.iam_role_additional_policies)))) : toset([])
policy_arn = each.value
diff --git a/modules/eks-managed-node-group/variables.tf b/modules/eks-managed-node-group/variables.tf
index fe9148bc4b..a839bd0072 100644
--- a/modules/eks-managed-node-group/variables.tf
+++ b/modules/eks-managed-node-group/variables.tf
@@ -382,6 +382,58 @@ variable "timeouts" {
default = {}
}
+################################################################################
+# Security Group
+################################################################################
+
+variable "create_security_group" {
+ description = "Whether to create a security group"
+ type = bool
+ default = true
+}
+
+variable "security_group_name" {
+ description = "Name to use on security group created"
+ type = string
+ default = null
+}
+
+variable "security_group_use_name_prefix" {
+ description = "Determines whether the security group name (`security_group_name`) is used as a prefix"
+ type = string
+ default = true
+}
+
+variable "security_group_description" {
+ description = "Description for the security group"
+ type = string
+ default = "EKS worker security group"
+}
+
+variable "vpc_id" {
+ description = "ID of the VPC where the security group/nodes will be provisioned"
+ type = string
+ default = null
+}
+
+variable "security_group_egress_cidr_blocks" {
+ description = "List of CIDR blocks that are permitted for security group egress traffic"
+ type = list(string)
+ default = ["0.0.0.0/0"]
+}
+
+variable "cluster_security_group_id" {
+ description = "Cluster control plain security group ID"
+ type = string
+ default = null
+}
+
+variable "security_group_tags" {
+ description = "A map of additional tags to add to the security group created"
+ type = map(string)
+ default = {}
+}
+
################################################################################
# IAM Role
################################################################################
@@ -416,12 +468,6 @@ variable "iam_role_permissions_boundary" {
default = null
}
-variable "iam_role_attach_cni_policy" {
- description = "Whether to attach the Amazon managed `AmazonEKS_CNI_Policy` IAM policy to the IAM IAM role. WARNING: If set `false` the permissions must be assigned to the `aws-node` DaemonSet pods via another method or nodes will not be able to join the cluster"
- type = bool
- default = true
-}
-
variable "iam_role_additional_policies" {
description = "Additional policies to be added to the IAM role"
type = list(string)
diff --git a/modules/self-managed-node-group/README.md b/modules/self-managed-node-group/README.md
index eb0f72547d..8bbf2a2f36 100644
--- a/modules/self-managed-node-group/README.md
+++ b/modules/self-managed-node-group/README.md
@@ -36,6 +36,7 @@ No modules.
| [aws_security_group_rule.cluster_kubelet_ingress](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
| [aws_security_group_rule.ingress_self](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
| [aws_security_group_rule.worker_egress_all](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
+| [aws_ami.eks_default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source |
| [aws_iam_policy_document.assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
| [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source |
@@ -49,6 +50,7 @@ No modules.
| [capacity\_reservation\_specification](#input\_capacity\_reservation\_specification) | Targeting for EC2 capacity reservations | `any` | `null` | no |
| [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster that the node group will be associated with | `string` | `null` | no |
| [cluster\_security\_group\_id](#input\_cluster\_security\_group\_id) | Cluster control plain security group ID | `string` | `null` | no |
+| [cluster\_version](#input\_cluster\_version) | Kubernetes cluster version - used to lookup default AMI ID if one is not provided | `string` | `null` | no |
| [cpu\_options](#input\_cpu\_options) | The CPU options for the instance | `map(string)` | `null` | no |
| [create](#input\_create) | Determines whether to create autoscaling group or not | `bool` | `true` | no |
| [create\_iam\_instance\_profile](#input\_create\_iam\_instance\_profile) | Determines whether an IAM instance profile is created or to use an existing IAM instance profile | `bool` | `true` | no |
@@ -81,7 +83,7 @@ No modules.
| [iam\_role\_permissions\_boundary](#input\_iam\_role\_permissions\_boundary) | ARN of the policy that is used to set the permissions boundary for the IAM role | `string` | `null` | no |
| [iam\_role\_tags](#input\_iam\_role\_tags) | A map of additional tags to add to the IAM role created | `map(string)` | `{}` | no |
| [iam\_role\_use\_name\_prefix](#input\_iam\_role\_use\_name\_prefix) | Amazon Resource Name (ARN) of an existing IAM instance profile that provides permissions for the EKS Node Group | `string` | `true` | no |
-| [image\_id](#input\_image\_id) | The AMI from which to launch the instance | `string` | `""` | no |
+| [image\_id](#input\_image\_id) | The AMI from which to launch the instance | `string` | `null` | no |
| [initial\_lifecycle\_hooks](#input\_initial\_lifecycle\_hooks) | One or more Lifecycle Hooks to attach to the Auto Scaling Group before instances are launched. The syntax is exactly the same as the separate `aws_autoscaling_lifecycle_hook` resource, without the `autoscaling_group_name` attribute. Please note that this will only work when creating a new Auto Scaling Group. For all other use-cases, please use `aws_autoscaling_lifecycle_hook` resource | `list(map(string))` | `[]` | no |
| [instance\_initiated\_shutdown\_behavior](#input\_instance\_initiated\_shutdown\_behavior) | Shutdown behavior for the instance. Can be `stop` or `terminate`. (Default: `stop`) | `string` | `null` | no |
| [instance\_market\_options](#input\_instance\_market\_options) | The market (purchasing) option for the instance | `any` | `null` | no |
diff --git a/modules/self-managed-node-group/main.tf b/modules/self-managed-node-group/main.tf
index 6a82a07f8f..b523645c39 100644
--- a/modules/self-managed-node-group/main.tf
+++ b/modules/self-managed-node-group/main.tf
@@ -1,5 +1,17 @@
data "aws_partition" "current" {}
+data "aws_ami" "eks_default" {
+ count = var.create ? 1 : 0
+
+ filter {
+ name = "name"
+ values = ["amazon-eks-node-${var.cluster_version}-v*"]
+ }
+
+ most_recent = true
+ owners = ["amazon"]
+}
+
################################################################################
# Launch template
################################################################################
@@ -9,7 +21,7 @@ resource "aws_launch_template" "this" {
name = var.launch_template_use_name_prefix ? null : var.launch_template_name
name_prefix = var.launch_template_use_name_prefix ? "${var.launch_template_name}-" : null
- description = var.description
+ description = coalesce(var.description, "Custom launch template for ${var.name} self managed node group")
ebs_optimized = var.ebs_optimized
image_id = var.image_id
@@ -199,7 +211,7 @@ resource "aws_launch_template" "this" {
}
################################################################################
-# Autoscaling group
+# Self Managed Node Group (Autoscaling Group)
################################################################################
locals {
@@ -509,7 +521,7 @@ data "aws_iam_policy_document" "assume_role_policy" {
count = var.create && var.create_iam_instance_profile ? 1 : 0
statement {
- sid = "EKSWorkerAssumeRole"
+ sid = "EKSNodeAssumeRole"
actions = ["sts:AssumeRole"]
principals {
@@ -544,6 +556,7 @@ resource "aws_iam_role_policy_attachment" "this" {
role = aws_iam_role.this[0].name
}
+# Only self-managed node group requires instance profile
resource "aws_iam_instance_profile" "this" {
count = var.create && var.create_iam_instance_profile ? 1 : 0
diff --git a/modules/self-managed-node-group/variables.tf b/modules/self-managed-node-group/variables.tf
index f78510e301..cf92b55dc5 100644
--- a/modules/self-managed-node-group/variables.tf
+++ b/modules/self-managed-node-group/variables.tf
@@ -368,7 +368,13 @@ variable "iam_instance_profile_name" {
variable "image_id" {
description = "The AMI from which to launch the instance"
type = string
- default = ""
+ default = null
+}
+
+variable "cluster_version" {
+ description = "Kubernetes cluster version - used to lookup default AMI ID if one is not provided"
+ type = string
+ default = null
}
variable "instance_type" {
@@ -424,7 +430,7 @@ variable "schedules" {
}
################################################################################
-# Worker Security Group
+# Security Group
################################################################################
variable "create_security_group" {
diff --git a/outputs.tf b/outputs.tf
index 68e0974837..512949e810 100644
--- a/outputs.tf
+++ b/outputs.tf
@@ -63,66 +63,6 @@ output "oidc_provider_arn" {
value = var.enable_irsa ? concat(aws_iam_openid_connect_provider.oidc_provider[*].arn, [""])[0] : null
}
-# output "workers_asg_arns" {
-# description = "IDs of the autoscaling groups containing workers."
-# value = aws_autoscaling_group.this.*.arn
-# }
-
-# output "workers_asg_names" {
-# description = "Names of the autoscaling groups containing workers."
-# value = aws_autoscaling_group.this.*.id
-# }
-
-# output "workers_default_ami_id" {
-# description = "ID of the default worker group AMI"
-# value = local.default_ami_id_linux
-# }
-
-# output "workers_default_ami_id_windows" {
-# description = "ID of the default Windows worker group AMI"
-# value = local.default_ami_id_windows
-# }
-
-# output "workers_launch_template_ids" {
-# description = "IDs of the worker launch templates."
-# value = aws_launch_template.this.*.id
-# }
-
-# output "workers_launch_template_arns" {
-# description = "ARNs of the worker launch templates."
-# value = aws_launch_template.this.*.arn
-# }
-
-# output "workers_launch_template_latest_versions" {
-# description = "Latest versions of the worker launch templates."
-# value = aws_launch_template.this.*.latest_version
-# }
-
-output "worker_security_group_id" {
- description = "Security group ID attached to the EKS workers."
- value = local.worker_security_group_id
-}
-
-output "worker_iam_instance_profile_arns" {
- description = "default IAM instance profile ARN for EKS worker groups"
- value = aws_iam_instance_profile.worker.*.arn
-}
-
-output "worker_iam_instance_profile_names" {
- description = "default IAM instance profile name for EKS worker groups"
- value = aws_iam_instance_profile.worker.*.name
-}
-
-output "worker_iam_role_name" {
- description = "default IAM role name for EKS worker groups"
- value = try(aws_iam_role.worker[0].name, "")
-}
-
-output "worker_iam_role_arn" {
- description = "default IAM role ARN for EKS worker groups"
- value = try(aws_iam_role.worker[0].arn, "")
-}
-
output "fargate_profile_ids" {
description = "EKS Cluster name and EKS Fargate Profile names separated by a colon (:)."
value = module.fargate.fargate_profile_ids
diff --git a/variables.tf b/variables.tf
index 50aba7015e..cf2c0b21d6 100644
--- a/variables.tf
+++ b/variables.tf
@@ -269,81 +269,3 @@ variable "eks_managed_node_groups" {
type = any
default = {}
}
-
-variable "worker_security_group_id" {
- description = "If provided, all workers will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the EKS cluster"
- type = string
- default = ""
-}
-
-variable "worker_ami_name_filter" {
- description = "Name filter for AWS EKS worker AMI. If not provided, the latest official AMI for the specified 'cluster_version' is used"
- type = string
- default = ""
-}
-
-variable "worker_ami_owner_id" {
- description = "The ID of the owner for the AMI to use for the AWS EKS workers. Valid values are an AWS account ID, 'self' (the current account), or an AWS owner alias (e.g. 'amazon', 'aws-marketplace', 'microsoft')"
- type = string
- default = "amazon"
-}
-
-# variable "worker_additional_security_group_ids" {
-# description = "A list of additional security group ids to attach to worker instances"
-# type = list(string)
-# default = []
-# }
-
-variable "worker_sg_ingress_from_port" {
- description = "Minimum port number from which pods will accept communication. Must be changed to a lower value if some pods in your cluster will expose a port lower than 1025 (e.g. 22, 80, or 443)"
- type = number
- default = 1025
-}
-
-variable "worker_additional_policies" {
- description = "Additional policies to be added to workers"
- type = list(string)
- default = []
-}
-
-variable "worker_create_cluster_primary_security_group_rules" {
- description = "Whether to create security group rules to allow communication between pods on workers and pods using the primary cluster security group"
- type = bool
- default = false
-}
-
-variable "cluster_create_endpoint_private_access_sg_rule" {
- description = "Whether to create security group rules for the access to the Amazon EKS private API server endpoint. When is `true`, `cluster_endpoint_private_access_cidrs` must be setted"
- type = bool
- default = false
-}
-
-variable "cluster_endpoint_private_access_cidrs" {
- description = "List of CIDR blocks which can access the Amazon EKS private API server endpoint. To use this `cluster_endpoint_private_access` and `cluster_create_endpoint_private_access_sg_rule` must be set to `true`"
- type = list(string)
- default = null
-}
-
-variable "cluster_endpoint_private_access_sg" {
- description = "List of security group IDs which can access the Amazon EKS private API server endpoint. To use this `cluster_endpoint_private_access` and `cluster_create_endpoint_private_access_sg_rule` must be set to `true`"
- type = list(string)
- default = null
-}
-
-variable "attach_worker_cni_policy" {
- description = "Whether to attach the Amazon managed `AmazonEKS_CNI_Policy` IAM policy to the default worker IAM role. WARNING: If set `false` the permissions must be assigned to the `aws-node` DaemonSet pods via another method or nodes will not be able to join the cluster"
- type = bool
- default = true
-}
-
-variable "cluster_egress_cidrs" {
- description = "List of CIDR blocks that are permitted for cluster egress traffic"
- type = list(string)
- default = ["0.0.0.0/0"]
-}
-
-variable "worker_egress_cidrs" {
- description = "List of CIDR blocks that are permitted for workers egress traffic"
- type = list(string)
- default = ["0.0.0.0/0"]
-}
diff --git a/workers.tf b/workers.tf
index 8024fe581c..4ba202d861 100644
--- a/workers.tf
+++ b/workers.tf
@@ -50,7 +50,7 @@ module "eks_managed_node_groups" {
force_update_version = try(each.value.force_update_version, null)
instance_types = try(each.value.instance_types, null)
labels = try(each.value.labels, null)
- cluster_version = try(each.value.cluster_version, null)
+ cluster_version = try(each.value.cluster_version, var.cluster_version)
remote_access = try(each.value.remote_access, null)
taints = try(each.value.taints, null)
@@ -111,7 +111,6 @@ module "eks_managed_node_groups" {
iam_role_path = try(each.value.iam_role_path, null)
iam_role_permissions_boundary = try(each.value.iam_role_permissions_boundary, null)
iam_role_tags = try(each.value.iam_role_tags, {})
- iam_role_attach_cni_policy = try(each.value.iam_role_attach_cni_policy, true)
iam_role_additional_policies = try(each.value.iam_role_additional_policies, [])
tags = merge(var.tags, try(each.value.tags, {}))
@@ -175,11 +174,12 @@ module "self_managed_node_group" {
create_launch_template = try(each.value.create_launch_template, true)
description = try(each.value.description, null)
- ebs_optimized = try(each.value.ebs_optimized, null)
- image_id = try(each.value.image_id, data.aws_ami.eks_worker[0].image_id)
- instance_type = try(each.value.instance_type, "m6i.large")
- key_name = try(each.value.key_name, null)
- user_data = try(each.value.user_data, null)
+ ebs_optimized = try(each.value.ebs_optimized, null)
+ image_id = try(each.value.image_id, null)
+ cluster_version = try(each.value.cluster_version, var.cluster_version)
+ instance_type = try(each.value.instance_type, "m6i.large")
+ key_name = try(each.value.key_name, null)
+ user_data = try(each.value.user_data, null)
vpc_security_group_ids = try(each.value.vpc_security_group_ids, null)
@@ -221,113 +221,18 @@ module "self_managed_node_group" {
propagate_tags = try(each.value.propagate_tags, [])
}
-################################################################################
-# Security Group
-################################################################################
-
-locals {
- worker_sg_name = coalesce(var.worker_security_group_name, "${var.cluster_name}-worker")
- create_worker_sg = var.create && var.create_worker_security_group
-}
-
-resource "aws_security_group" "worker" {
- count = local.create_worker_sg ? 1 : 0
-
- name = var.worker_security_group_use_name_prefix ? null : local.worker_sg_name
- name_prefix = var.worker_security_group_use_name_prefix ? try("${local.worker_sg_name}-", local.worker_sg_name) : null
- description = "EKS worker security group"
- vpc_id = var.vpc_id
-
- tags = merge(
- var.tags,
- {
- "Name" = local.worker_sg_name
- "kubernetes.io/cluster/${var.cluster_name}" = "owned"
- },
- var.worker_security_group_tags
- )
-}
-
-resource "aws_security_group_rule" "worker_egress_internet" {
- count = local.create_worker_sg ? 1 : 0
-
- description = "Allow nodes all egress to the Internet."
- protocol = "-1"
- security_group_id = local.worker_security_group_id
- cidr_blocks = var.worker_egress_cidrs
- from_port = 0
- to_port = 0
- type = "egress"
-}
-
-resource "aws_security_group_rule" "worker_ingress_self" {
- count = local.create_worker_sg ? 1 : 0
-
- description = "Allow node to communicate with each other."
- protocol = "-1"
- security_group_id = local.worker_security_group_id
- source_security_group_id = local.worker_security_group_id
- from_port = 0
- to_port = 65535
- type = "ingress"
-}
-
-resource "aws_security_group_rule" "worker_ingress_cluster" {
- count = local.create_worker_sg ? 1 : 0
-
- description = "Allow worker pods to receive communication from the cluster control plane."
- protocol = "tcp"
- security_group_id = local.worker_security_group_id
- source_security_group_id = local.cluster_security_group_id
- from_port = var.worker_sg_ingress_from_port
- to_port = 65535
- type = "ingress"
-}
-
-resource "aws_security_group_rule" "worker_ingress_cluster_kubelet" {
- count = local.create_worker_sg ? var.worker_sg_ingress_from_port > 10250 ? 1 : 0 : 0
-
- description = "Allow worker Kubelets to receive communication from the cluster control plane."
- protocol = "tcp"
- security_group_id = local.worker_security_group_id
- source_security_group_id = local.cluster_security_group_id
- from_port = 10250
- to_port = 10250
- type = "ingress"
-}
-
-resource "aws_security_group_rule" "worker_ingress_cluster_https" {
- count = local.create_worker_sg ? 1 : 0
-
- description = "Allow pods running extension API servers on port 443 to receive communication from cluster control plane."
- protocol = "tcp"
- security_group_id = local.worker_security_group_id
- source_security_group_id = local.cluster_security_group_id
- from_port = 443
- to_port = 443
- type = "ingress"
-}
-
-resource "aws_security_group_rule" "worker_ingress_cluster_primary" {
- count = local.create_worker_sg && var.worker_create_cluster_primary_security_group_rules ? 1 : 0
-
- description = "Allow pods running on worker to receive communication from cluster primary security group (e.g. Fargate pods)."
- protocol = "all"
- security_group_id = local.worker_security_group_id
- source_security_group_id = aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id
- from_port = 0
- to_port = 65535
- type = "ingress"
-}
-
-resource "aws_security_group_rule" "cluster_primary_ingress_worker" {
- count = local.create_worker_sg && var.worker_create_cluster_primary_security_group_rules ? 1 : 0
-
- description = "Allow pods running on worker to send communication to cluster primary security group (e.g. Fargate pods)."
- protocol = "all"
- security_group_id = aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id
- source_security_group_id = local.worker_security_group_id
- from_port = 0
- to_port = 65535
- type = "ingress"
-}
+# ################################################################################
+# # Security Group
+# ################################################################################
+
+# resource "aws_security_group_rule" "cluster_primary_ingress_worker" {
+# count = local.create_worker_sg && var.worker_create_cluster_primary_security_group_rules ? 1 : 0
+
+# description = "Allow pods running on worker to send communication to cluster primary security group (e.g. Fargate pods)."
+# protocol = "all"
+# security_group_id = aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id
+# source_security_group_id = local.worker_security_group_id
+# from_port = 0
+# to_port = 65535
+# type = "ingress"
+# }
From 6b9495d7022991127fa95765d8d5f4c363dc4494 Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Fri, 12 Nov 2021 10:27:59 -0500
Subject: [PATCH 20/83] chore: clean-up and standardize examples
---
.github/CONTRIBUTING.md | 1 -
.github/workflows/stale-actions.yaml | 2 +-
.pre-commit-config.yaml | 5 +-
README.md | 1 -
examples/bottlerocket/README.md | 16 ++---
examples/bottlerocket/main.tf | 57 ++++-----------
examples/bottlerocket/outputs.tf | 5 --
examples/bottlerocket/userdata.toml | 2 +-
examples/bottlerocket/versions.tf | 13 ++--
examples/complete/README.md | 12 +---
examples/complete/main.tf | 71 +++++--------------
examples/complete/outputs.tf | 5 --
examples/complete/versions.tf | 8 +--
examples/eks_managed_node_group/README.md | 12 ++--
examples/eks_managed_node_group/main.tf | 18 -----
.../templates/userdata.sh.tpl | 12 ----
examples/eks_managed_node_group/versions.tf | 12 ++--
examples/fargate/README.md | 11 +--
examples/fargate/main.tf | 53 +++-----------
examples/fargate/versions.tf | 8 +--
examples/instance_refresh/README.md | 18 ++---
examples/instance_refresh/main.tf | 52 ++++----------
examples/instance_refresh/outputs.tf | 10 ---
examples/instance_refresh/versions.tf | 13 ++--
examples/irsa/README.md | 16 ++---
examples/irsa/irsa.tf | 4 +-
examples/irsa/main.tf | 47 +++---------
examples/irsa/versions.tf | 13 ++--
examples/secrets_encryption/README.md | 11 +--
examples/secrets_encryption/main.tf | 48 +++----------
examples/secrets_encryption/versions.tf | 8 +--
examples/self_managed_node_groups/README.md | 2 -
examples/self_managed_node_groups/main.tf | 18 -----
examples/self_managed_node_groups/outputs.tf | 5 --
main.tf | 8 ---
modules/eks-managed-node-group/README.md | 4 +-
modules/eks-managed-node-group/versions.tf | 10 ++-
modules/self-managed-node-group/README.md | 4 +-
modules/self-managed-node-group/versions.tf | 2 +-
outputs.tf | 18 ++---
versions.tf | 4 --
41 files changed, 167 insertions(+), 472 deletions(-)
delete mode 100644 examples/eks_managed_node_group/templates/userdata.sh.tpl
diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md
index 65df33743e..53127506fa 100644
--- a/.github/CONTRIBUTING.md
+++ b/.github/CONTRIBUTING.md
@@ -31,4 +31,3 @@ To generate changelog, Pull Requests or Commits must have semantic and must foll
- `chore:` for chores stuff
The `chore` prefix skipped during changelog generation. It can be used for `chore: update changelog` commit message by example.
-
diff --git a/.github/workflows/stale-actions.yaml b/.github/workflows/stale-actions.yaml
index 93c90dfcd1..d2a5f6b96e 100644
--- a/.github/workflows/stale-actions.yaml
+++ b/.github/workflows/stale-actions.yaml
@@ -29,4 +29,4 @@ jobs:
days-before-close: 10
delete-branch: true
close-issue-message: This issue was automatically closed because of stale in 10 days
- close-pr-message: This PR was automatically closed because of stale in 10 days
\ No newline at end of file
+ close-pr-message: This PR was automatically closed because of stale in 10 days
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index d83cc29f2a..ad121ba7ac 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -1,6 +1,6 @@
repos:
- repo: https://github.com/antonbabenko/pre-commit-terraform
- rev: v1.55.0
+ rev: v1.56.0
hooks:
- id: terraform_fmt
- id: terraform_validate
@@ -17,7 +17,7 @@ repos:
- '--args=--only=terraform_documented_variables'
- '--args=--only=terraform_typed_variables'
- '--args=--only=terraform_module_pinned_source'
-# - '--args=--only=terraform_naming_convention'
+ - '--args=--only=terraform_naming_convention'
- '--args=--only=terraform_required_version'
- '--args=--only=terraform_required_providers'
- '--args=--only=terraform_standard_module_structure'
@@ -26,3 +26,4 @@ repos:
rev: v4.0.1
hooks:
- id: check-merge-conflict
+ - id: end-of-file-fixer
diff --git a/README.md b/README.md
index b044b18dea..25cf9500ea 100644
--- a/README.md
+++ b/README.md
@@ -117,7 +117,6 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
|------|---------|
| [terraform](#requirement\_terraform) | >= 0.13.1 |
| [aws](#requirement\_aws) | >= 3.56.0 |
-| [cloudinit](#requirement\_cloudinit) | >= 2.0.0 |
| [tls](#requirement\_tls) | >= 2.2.0 |
## Providers
diff --git a/examples/bottlerocket/README.md b/examples/bottlerocket/README.md
index 4d8b24b8be..2642df93ed 100644
--- a/examples/bottlerocket/README.md
+++ b/examples/bottlerocket/README.md
@@ -24,19 +24,15 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Version |
|------|---------|
| [terraform](#requirement\_terraform) | >= 0.13.1 |
-| [aws](#requirement\_aws) | >= 3.22.0 |
-| [kubernetes](#requirement\_kubernetes) | ~> 2.0 |
-| [local](#requirement\_local) | >= 1.4 |
-| [random](#requirement\_random) | >= 2.1 |
-| [tls](#requirement\_tls) | >= 2.0 |
+| [aws](#requirement\_aws) | >= 3.56.0 |
+| [tls](#requirement\_tls) | >= 2.2.0 |
## Providers
| Name | Version |
|------|---------|
-| [aws](#provider\_aws) | >= 3.22.0 |
-| [random](#provider\_random) | >= 2.1 |
-| [tls](#provider\_tls) | >= 2.0 |
+| [aws](#provider\_aws) | >= 3.56.0 |
+| [tls](#provider\_tls) | >= 2.2.0 |
## Modules
@@ -51,13 +47,9 @@ Note that this example may create resources which cost money. Run `terraform des
|------|------|
| [aws_iam_role_policy_attachment.ssm](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
| [aws_key_pair.nodes](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/key_pair) | resource |
-| [random_string.suffix](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/string) | resource |
| [tls_private_key.nodes](https://registry.terraform.io/providers/hashicorp/tls/latest/docs/resources/private_key) | resource |
| [aws_ami.bottlerocket_ami](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source |
| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source |
-| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source |
-| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
-| [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source |
## Inputs
diff --git a/examples/bottlerocket/main.tf b/examples/bottlerocket/main.tf
index 2fcab6f85f..8dcd569ace 100644
--- a/examples/bottlerocket/main.tf
+++ b/examples/bottlerocket/main.tf
@@ -3,9 +3,15 @@ provider "aws" {
}
locals {
- name = "bottlerocket-${random_string.suffix.result}"
+ name = "ex-${replace(basename(path.cwd), "_", "-")}"
cluster_version = "1.20"
region = "eu-west-1"
+
+ tags = {
+ Example = local.name
+ GithubRepo = "terraform-aws-eks"
+ GithubOrg = "terraform-aws-modules"
+ }
}
################################################################################
@@ -33,11 +39,6 @@ module "eks" {
asg_desired_capacity = 2
key_name = aws_key_pair.nodes.key_name
- # Since we are using default VPC there is no NAT gateway so we need to
- # attach public ip to nodes so they can reach k8s API server
- # do not repeat this at home (i.e. production)
- public_ip = true
-
# This section overrides default userdata template to pass bottlerocket
# specific user data
userdata_template_file = "${path.module}/userdata.toml"
@@ -46,7 +47,7 @@ module "eks" {
userdata_template_extra_args = {
enable_admin_container = false
enable_control_container = true
- aws_region = data.aws_region.current.name
+ aws_region = local.region
}
# example of k8s/kubelet configuration via additional_userdata
additional_userdata = <<-EOT
@@ -56,11 +57,7 @@ module "eks" {
}
}
- tags = {
- Example = local.name
- GithubRepo = "terraform-aws-eks"
- GithubOrg = "terraform-aws-modules"
- }
+ tags = local.tags
}
# SSM policy for bottlerocket control container access
@@ -70,30 +67,10 @@ resource "aws_iam_role_policy_attachment" "ssm" {
policy_arn = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"
}
-################################################################################
-# Kubernetes provider configuration
-################################################################################
-
-data "aws_eks_cluster" "cluster" {
- name = module.eks.cluster_id
-}
-
-data "aws_eks_cluster_auth" "cluster" {
- name = module.eks.cluster_id
-}
-
-provider "kubernetes" {
- host = data.aws_eks_cluster.cluster.endpoint
- cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data)
- token = data.aws_eks_cluster_auth.cluster.token
-}
-
################################################################################
# Supporting Resources
################################################################################
-data "aws_region" "current" {}
-
data "aws_ami" "bottlerocket_ami" {
most_recent = true
owners = ["amazon"]
@@ -117,13 +94,7 @@ resource "aws_key_pair" "nodes" {
# Supporting Resources
################################################################################
-data "aws_availability_zones" "available" {
-}
-
-resource "random_string" "suffix" {
- length = 8
- special = false
-}
+data "aws_availability_zones" "available" {}
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
@@ -131,7 +102,7 @@ module "vpc" {
name = local.name
cidr = "10.0.0.0/16"
- azs = data.aws_availability_zones.available.names
+ azs = [data.aws_availability_zones.available.names]
private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
enable_nat_gateway = true
@@ -148,9 +119,5 @@ module "vpc" {
"kubernetes.io/role/internal-elb" = "1"
}
- tags = {
- Example = local.name
- GithubRepo = "terraform-aws-eks"
- GithubOrg = "terraform-aws-modules"
- }
+ tags = local.tags
}
diff --git a/examples/bottlerocket/outputs.tf b/examples/bottlerocket/outputs.tf
index 8ea0263436..440cd0f723 100644
--- a/examples/bottlerocket/outputs.tf
+++ b/examples/bottlerocket/outputs.tf
@@ -7,8 +7,3 @@ output "cluster_security_group_id" {
description = "Security group ids attached to the cluster control plane."
value = module.eks.cluster_security_group_id
}
-
-# output "node_groups" {
-# description = "Outputs from node groups"
-# value = module.eks.node_groups
-# }
diff --git a/examples/bottlerocket/userdata.toml b/examples/bottlerocket/userdata.toml
index 1dc9273f58..85019675a6 100644
--- a/examples/bottlerocket/userdata.toml
+++ b/examples/bottlerocket/userdata.toml
@@ -21,4 +21,4 @@ enabled = ${enable_admin_container}
# It is enabled by default, and can be disabled if you do not expect to use SSM.
# This could leave you with no way to access the API and change settings on an existing node!
[settings.host-containers.control]
-enabled = ${enable_control_container}
\ No newline at end of file
+enabled = ${enable_control_container}
diff --git a/examples/bottlerocket/versions.tf b/examples/bottlerocket/versions.tf
index 6adb95785f..83a000f86f 100644
--- a/examples/bottlerocket/versions.tf
+++ b/examples/bottlerocket/versions.tf
@@ -2,10 +2,13 @@ terraform {
required_version = ">= 0.13.1"
required_providers {
- aws = ">= 3.22.0"
- local = ">= 1.4"
- random = ">= 2.1"
- kubernetes = "~> 2.0"
- tls = ">= 2.0"
+ aws = {
+ source = "hashicorp/aws"
+ version = ">= 3.56.0"
+ }
+ tls = {
+ source = "hashicorp/tls"
+ version = ">= 2.2.0"
+ }
}
}
diff --git a/examples/complete/README.md b/examples/complete/README.md
index 505251c50f..4da52b45fa 100644
--- a/examples/complete/README.md
+++ b/examples/complete/README.md
@@ -24,17 +24,13 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Version |
|------|---------|
| [terraform](#requirement\_terraform) | >= 0.13.1 |
-| [aws](#requirement\_aws) | >= 3.22.0 |
-| [kubernetes](#requirement\_kubernetes) | ~> 2.0 |
-| [local](#requirement\_local) | >= 1.4 |
-| [random](#requirement\_random) | >= 2.1 |
+| [aws](#requirement\_aws) | >= 3.56.0 |
## Providers
| Name | Version |
|------|---------|
-| [aws](#provider\_aws) | >= 3.22.0 |
-| [random](#provider\_random) | >= 2.1 |
+| [aws](#provider\_aws) | >= 3.56.0 |
## Modules
@@ -52,10 +48,7 @@ Note that this example may create resources which cost money. Run `terraform des
| [aws_security_group.all_worker_mgmt](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
| [aws_security_group.worker_group_mgmt_one](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
| [aws_security_group.worker_group_mgmt_two](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
-| [random_string.suffix](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/string) | resource |
| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source |
-| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source |
-| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
## Inputs
@@ -67,5 +60,4 @@ No inputs.
|------|-------------|
| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for EKS control plane. |
| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Security group ids attached to the cluster control plane. |
-| [kubectl\_config](#output\_kubectl\_config) | kubectl config as generated by the module. |
diff --git a/examples/complete/main.tf b/examples/complete/main.tf
index d5fc73e578..dca023bc75 100644
--- a/examples/complete/main.tf
+++ b/examples/complete/main.tf
@@ -3,9 +3,15 @@ provider "aws" {
}
locals {
- name = "complete-${random_string.suffix.result}"
+ name = "ex-${replace(basename(path.cwd), "_", "-")}"
cluster_version = "1.20"
region = "eu-west-1"
+
+ tags = {
+ Example = local.name
+ GithubRepo = "terraform-aws-eks"
+ GithubOrg = "terraform-aws-modules"
+ }
}
################################################################################
@@ -25,8 +31,6 @@ module "eks" {
cluster_endpoint_private_access = true
cluster_endpoint_public_access = true
-
-
worker_additional_security_group_ids = [aws_security_group.all_worker_mgmt.id]
# Worker groups
@@ -104,11 +108,7 @@ module "eks" {
}
}
- tags = {
- Example = local.name
- GithubRepo = "terraform-aws-eks"
- GithubOrg = "terraform-aws-modules"
- }
+ tags = local.tags
}
################################################################################
@@ -127,24 +127,6 @@ module "disabled_fargate" {
create_fargate_pod_execution_role = false
}
-################################################################################
-# Kubernetes provider configuration
-################################################################################
-
-data "aws_eks_cluster" "cluster" {
- name = module.eks.cluster_id
-}
-
-data "aws_eks_cluster_auth" "cluster" {
- name = module.eks.cluster_id
-}
-
-provider "kubernetes" {
- host = data.aws_eks_cluster.cluster.endpoint
- cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data)
- token = data.aws_eks_cluster_auth.cluster.token
-}
-
################################################################################
# Additional security groups for workers
################################################################################
@@ -154,13 +136,10 @@ resource "aws_security_group" "worker_group_mgmt_one" {
vpc_id = module.vpc.vpc_id
ingress {
- from_port = 22
- to_port = 22
- protocol = "tcp"
-
- cidr_blocks = [
- "10.0.0.0/8",
- ]
+ from_port = 22
+ to_port = 22
+ protocol = "tcp"
+ cidr_blocks = ["10.0.0.0/8"]
}
}
@@ -169,13 +148,10 @@ resource "aws_security_group" "worker_group_mgmt_two" {
vpc_id = module.vpc.vpc_id
ingress {
- from_port = 22
- to_port = 22
- protocol = "tcp"
-
- cidr_blocks = [
- "192.168.0.0/16",
- ]
+ from_port = 22
+ to_port = 22
+ protocol = "tcp"
+ cidr_blocks = ["192.168.0.0/16"]
}
}
@@ -187,7 +163,6 @@ resource "aws_security_group" "all_worker_mgmt" {
from_port = 22
to_port = 22
protocol = "tcp"
-
cidr_blocks = [
"10.0.0.0/8",
"172.16.0.0/12",
@@ -200,13 +175,7 @@ resource "aws_security_group" "all_worker_mgmt" {
# Supporting resources
################################################################################
-data "aws_availability_zones" "available" {
-}
-
-resource "random_string" "suffix" {
- length = 8
- special = false
-}
+data "aws_availability_zones" "available" {}
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
@@ -231,9 +200,5 @@ module "vpc" {
"kubernetes.io/role/internal-elb" = "1"
}
- tags = {
- Example = local.name
- GithubRepo = "terraform-aws-eks"
- GithubOrg = "terraform-aws-modules"
- }
+ tags = local.tags
}
diff --git a/examples/complete/outputs.tf b/examples/complete/outputs.tf
index fcc55c0596..440cd0f723 100644
--- a/examples/complete/outputs.tf
+++ b/examples/complete/outputs.tf
@@ -7,8 +7,3 @@ output "cluster_security_group_id" {
description = "Security group ids attached to the cluster control plane."
value = module.eks.cluster_security_group_id
}
-
-output "kubectl_config" {
- description = "kubectl config as generated by the module."
- value = module.eks.kubeconfig
-}
diff --git a/examples/complete/versions.tf b/examples/complete/versions.tf
index bbcf893252..97955e9bc8 100644
--- a/examples/complete/versions.tf
+++ b/examples/complete/versions.tf
@@ -2,9 +2,9 @@ terraform {
required_version = ">= 0.13.1"
required_providers {
- aws = ">= 3.22.0"
- local = ">= 1.4"
- random = ">= 2.1"
- kubernetes = "~> 2.0"
+ aws = {
+ source = "hashicorp/aws"
+ version = ">= 3.56.0"
+ }
}
}
diff --git a/examples/eks_managed_node_group/README.md b/examples/eks_managed_node_group/README.md
index bac4c1f93f..862b72db33 100644
--- a/examples/eks_managed_node_group/README.md
+++ b/examples/eks_managed_node_group/README.md
@@ -25,17 +25,15 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Version |
|------|---------|
| [terraform](#requirement\_terraform) | >= 0.13.1 |
-| [aws](#requirement\_aws) | >= 3.22.0 |
-| [kubernetes](#requirement\_kubernetes) | ~> 2.0 |
-| [local](#requirement\_local) | >= 1.4 |
-| [random](#requirement\_random) | >= 2.1 |
+| [aws](#requirement\_aws) | >= 3.56.0 |
+| [cloudinit](#requirement\_cloudinit) | >= 2.0.0 |
## Providers
| Name | Version |
|------|---------|
-| [aws](#provider\_aws) | >= 3.22.0 |
-| [cloudinit](#provider\_cloudinit) | n/a |
+| [aws](#provider\_aws) | >= 3.56.0 |
+| [cloudinit](#provider\_cloudinit) | >= 2.0.0 |
## Modules
@@ -50,8 +48,6 @@ Note that this example may create resources which cost money. Run `terraform des
|------|------|
| [aws_launch_template.default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource |
| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source |
-| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source |
-| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
| [cloudinit_config.custom](https://registry.terraform.io/providers/hashicorp/cloudinit/latest/docs/data-sources/config) | data source |
## Inputs
diff --git a/examples/eks_managed_node_group/main.tf b/examples/eks_managed_node_group/main.tf
index 3fb04e43b7..ad734de6db 100644
--- a/examples/eks_managed_node_group/main.tf
+++ b/examples/eks_managed_node_group/main.tf
@@ -116,24 +116,6 @@ module "eks" {
tags = local.tags
}
-################################################################################
-# Kubernetes provider configuration
-################################################################################
-
-data "aws_eks_cluster" "cluster" {
- name = module.eks.cluster_id
-}
-
-data "aws_eks_cluster_auth" "cluster" {
- name = module.eks.cluster_id
-}
-
-provider "kubernetes" {
- host = data.aws_eks_cluster.cluster.endpoint
- cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data)
- token = data.aws_eks_cluster_auth.cluster.token
-}
-
################################################################################
# Supporting Resources
################################################################################
diff --git a/examples/eks_managed_node_group/templates/userdata.sh.tpl b/examples/eks_managed_node_group/templates/userdata.sh.tpl
deleted file mode 100644
index 41eeb0ba03..0000000000
--- a/examples/eks_managed_node_group/templates/userdata.sh.tpl
+++ /dev/null
@@ -1,12 +0,0 @@
-MIME-Version: 1.0
-Content-Type: multipart/mixed; boundary="//"
-
---//
-Content-Type: text/x-shellscript; charset="us-ascii"
-#!/bin/bash
-set -e
-
-# Bootstrap and join the cluster
-/etc/eks/bootstrap.sh --b64-cluster-ca '${cluster_auth_base64}' --apiserver-endpoint '${endpoint}' ${bootstrap_extra_args} --kubelet-extra-args "${kubelet_extra_args}" '${cluster_name}'
-
---//--
diff --git a/examples/eks_managed_node_group/versions.tf b/examples/eks_managed_node_group/versions.tf
index bbcf893252..e41b1ab10a 100644
--- a/examples/eks_managed_node_group/versions.tf
+++ b/examples/eks_managed_node_group/versions.tf
@@ -2,9 +2,13 @@ terraform {
required_version = ">= 0.13.1"
required_providers {
- aws = ">= 3.22.0"
- local = ">= 1.4"
- random = ">= 2.1"
- kubernetes = "~> 2.0"
+ aws = {
+ source = "hashicorp/aws"
+ version = ">= 3.56.0"
+ }
+ cloudinit = {
+ source = "hashicorp/cloudinit"
+ version = ">= 2.0.0"
+ }
}
}
diff --git a/examples/fargate/README.md b/examples/fargate/README.md
index 9757e410f5..80cf92d880 100644
--- a/examples/fargate/README.md
+++ b/examples/fargate/README.md
@@ -23,17 +23,13 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Version |
|------|---------|
| [terraform](#requirement\_terraform) | >= 0.13.1 |
-| [aws](#requirement\_aws) | >= 3.22.0 |
-| [kubernetes](#requirement\_kubernetes) | ~> 2.0 |
-| [local](#requirement\_local) | >= 1.4 |
-| [random](#requirement\_random) | >= 2.1 |
+| [aws](#requirement\_aws) | >= 3.56.0 |
## Providers
| Name | Version |
|------|---------|
-| [aws](#provider\_aws) | >= 3.22.0 |
-| [random](#provider\_random) | >= 2.1 |
+| [aws](#provider\_aws) | >= 3.56.0 |
## Modules
@@ -47,10 +43,7 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Type |
|------|------|
-| [random_string.suffix](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/string) | resource |
| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source |
-| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source |
-| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
## Inputs
diff --git a/examples/fargate/main.tf b/examples/fargate/main.tf
index 77320fcd67..70f747ad81 100644
--- a/examples/fargate/main.tf
+++ b/examples/fargate/main.tf
@@ -3,9 +3,15 @@ provider "aws" {
}
locals {
- name = "fargate-${random_string.suffix.result}"
+ name = "ex-${replace(basename(path.cwd), "_", "-")}"
cluster_version = "1.20"
region = "eu-west-1"
+
+ tags = {
+ Example = local.name
+ GithubRepo = "terraform-aws-eks"
+ GithubOrg = "terraform-aws-modules"
+ }
}
################################################################################
@@ -97,11 +103,7 @@ module "eks" {
}
}
- tags = {
- Example = local.name
- GithubRepo = "terraform-aws-eks"
- GithubOrg = "terraform-aws-modules"
- }
+ tags = local.tags
}
##############################################
@@ -163,42 +165,14 @@ module "fargate_profile_existing_cluster" {
}
}
- tags = {
- Example = local.name
- GithubRepo = "terraform-aws-eks"
- GithubOrg = "terraform-aws-modules"
- }
-}
-
-################################################################################
-# Kubernetes provider configuration
-################################################################################
-
-data "aws_eks_cluster" "cluster" {
- name = module.eks.cluster_id
-}
-
-data "aws_eks_cluster_auth" "cluster" {
- name = module.eks.cluster_id
-}
-
-provider "kubernetes" {
- host = data.aws_eks_cluster.cluster.endpoint
- cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data)
- token = data.aws_eks_cluster_auth.cluster.token
+ tags = local.tags
}
################################################################################
# Supporting Resources
################################################################################
-data "aws_availability_zones" "available" {
-}
-
-resource "random_string" "suffix" {
- length = 8
- special = false
-}
+data "aws_availability_zones" "available" {}
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
@@ -223,10 +197,5 @@ module "vpc" {
"kubernetes.io/role/internal-elb" = "1"
}
- tags = {
- Example = local.name
- GithubRepo = "terraform-aws-eks"
- GithubOrg = "terraform-aws-modules"
- }
+ tags = local.tags
}
-
diff --git a/examples/fargate/versions.tf b/examples/fargate/versions.tf
index bbcf893252..97955e9bc8 100644
--- a/examples/fargate/versions.tf
+++ b/examples/fargate/versions.tf
@@ -2,9 +2,9 @@ terraform {
required_version = ">= 0.13.1"
required_providers {
- aws = ">= 3.22.0"
- local = ">= 1.4"
- random = ">= 2.1"
- kubernetes = "~> 2.0"
+ aws = {
+ source = "hashicorp/aws"
+ version = ">= 3.56.0"
+ }
}
}
diff --git a/examples/instance_refresh/README.md b/examples/instance_refresh/README.md
index db4185fb51..3eebdf8724 100644
--- a/examples/instance_refresh/README.md
+++ b/examples/instance_refresh/README.md
@@ -22,19 +22,15 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Version |
|------|---------|
| [terraform](#requirement\_terraform) | >= 0.13.1 |
-| [aws](#requirement\_aws) | >= 3.22.0 |
-| [helm](#requirement\_helm) | ~> 2.0 |
-| [kubernetes](#requirement\_kubernetes) | ~> 2.0 |
-| [local](#requirement\_local) | >= 1.4 |
-| [random](#requirement\_random) | >= 2.1 |
+| [aws](#requirement\_aws) | >= 3.56.0 |
+| [helm](#requirement\_helm) | >= 2.0.0 |
## Providers
| Name | Version |
|------|---------|
-| [aws](#provider\_aws) | >= 3.22.0 |
-| [helm](#provider\_helm) | ~> 2.0 |
-| [random](#provider\_random) | >= 2.1 |
+| [aws](#provider\_aws) | >= 3.56.0 |
+| [helm](#provider\_helm) | >= 2.0.0 |
## Modules
@@ -56,14 +52,10 @@ Note that this example may create resources which cost money. Run `terraform des
| [aws_cloudwatch_event_target.aws_node_termination_handler_spot](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_target) | resource |
| [aws_iam_policy.aws_node_termination_handler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
| [helm_release.aws_node_termination_handler](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource |
-| [random_string.suffix](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/string) | resource |
| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source |
| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source |
-| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source |
-| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
| [aws_iam_policy_document.aws_node_termination_handler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
| [aws_iam_policy_document.aws_node_termination_handler_events](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
-| [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source |
## Inputs
@@ -75,6 +67,4 @@ No inputs.
|------|-------------|
| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for EKS control plane. |
| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Security group ids attached to the cluster control plane. |
-| [sqs\_queue\_asg\_notification\_arn](#output\_sqs\_queue\_asg\_notification\_arn) | SQS queue ASG notification ARN |
-| [sqs\_queue\_asg\_notification\_url](#output\_sqs\_queue\_asg\_notification\_url) | SQS queue ASG notification URL |
diff --git a/examples/instance_refresh/main.tf b/examples/instance_refresh/main.tf
index 42e46cfc07..32dc7d1f6e 100644
--- a/examples/instance_refresh/main.tf
+++ b/examples/instance_refresh/main.tf
@@ -3,9 +3,15 @@ provider "aws" {
}
locals {
- name = "instance_refresh-${random_string.suffix.result}"
+ name = "ex-${replace(basename(path.cwd), "_", "-")}"
cluster_version = "1.20"
region = "eu-west-1"
+
+ tags = {
+ Example = local.name
+ GithubRepo = "terraform-aws-eks"
+ GithubOrg = "terraform-aws-modules"
+ }
}
################################################################################
@@ -60,8 +66,6 @@ resource "aws_iam_policy" "aws_node_termination_handler" {
policy = data.aws_iam_policy_document.aws_node_termination_handler.json
}
-data "aws_region" "current" {}
-
data "aws_iam_policy_document" "aws_node_termination_handler_events" {
statement {
effect = "Allow"
@@ -76,7 +80,7 @@ data "aws_iam_policy_document" "aws_node_termination_handler_events" {
"sqs:SendMessage",
]
resources = [
- "arn:aws:sqs:${data.aws_region.current.name}:${data.aws_caller_identity.current.account_id}:${local.name}",
+ "arn:aws:sqs:${local.region}:${data.aws_caller_identity.current.account_id}:${local.name}",
]
}
}
@@ -158,7 +162,7 @@ resource "helm_release" "aws_node_termination_handler" {
set {
name = "awsRegion"
- value = data.aws_region.current.name
+ value = local.region
}
set {
name = "serviceAccount.name"
@@ -238,42 +242,14 @@ module "eks" {
}
}
- tags = {
- Example = local.name
- GithubRepo = "terraform-aws-eks"
- GithubOrg = "terraform-aws-modules"
- }
-}
-
-################################################################################
-# Kubernetes provider configuration
-################################################################################
-
-data "aws_eks_cluster" "cluster" {
- name = module.eks.cluster_id
-}
-
-data "aws_eks_cluster_auth" "cluster" {
- name = module.eks.cluster_id
-}
-
-provider "kubernetes" {
- host = data.aws_eks_cluster.cluster.endpoint
- cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data)
- token = data.aws_eks_cluster_auth.cluster.token
+ tags = local.tags
}
################################################################################
# Supporting Resources
################################################################################
-data "aws_availability_zones" "available" {
-}
-
-resource "random_string" "suffix" {
- length = 8
- special = false
-}
+data "aws_availability_zones" "available" {}
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
@@ -298,9 +274,5 @@ module "vpc" {
"kubernetes.io/role/internal-elb" = "1"
}
- tags = {
- Example = local.name
- GithubRepo = "terraform-aws-eks"
- GithubOrg = "terraform-aws-modules"
- }
+ tags = local.tags
}
diff --git a/examples/instance_refresh/outputs.tf b/examples/instance_refresh/outputs.tf
index 1addebfa63..440cd0f723 100644
--- a/examples/instance_refresh/outputs.tf
+++ b/examples/instance_refresh/outputs.tf
@@ -7,13 +7,3 @@ output "cluster_security_group_id" {
description = "Security group ids attached to the cluster control plane."
value = module.eks.cluster_security_group_id
}
-
-output "sqs_queue_asg_notification_arn" {
- description = "SQS queue ASG notification ARN"
- value = module.aws_node_termination_handler_sqs.sqs_queue_arn
-}
-
-output "sqs_queue_asg_notification_url" {
- description = "SQS queue ASG notification URL"
- value = module.aws_node_termination_handler_sqs.sqs_queue_id
-}
diff --git a/examples/instance_refresh/versions.tf b/examples/instance_refresh/versions.tf
index f546ca0cf0..5c4a43112e 100644
--- a/examples/instance_refresh/versions.tf
+++ b/examples/instance_refresh/versions.tf
@@ -2,10 +2,13 @@ terraform {
required_version = ">= 0.13.1"
required_providers {
- aws = ">= 3.22.0"
- local = ">= 1.4"
- random = ">= 2.1"
- kubernetes = "~> 2.0"
- helm = "~> 2.0"
+ aws = {
+ source = "hashicorp/aws"
+ version = ">= 3.56.0"
+ }
+ helm = {
+ source = "hashicorp/helm"
+ version = ">= 2.0.0"
+ }
}
}
diff --git a/examples/irsa/README.md b/examples/irsa/README.md
index 4ca06a03a2..71c16fc842 100644
--- a/examples/irsa/README.md
+++ b/examples/irsa/README.md
@@ -22,19 +22,15 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Version |
|------|---------|
| [terraform](#requirement\_terraform) | >= 0.13.1 |
-| [aws](#requirement\_aws) | >= 3.22.0 |
-| [helm](#requirement\_helm) | ~> 2.0 |
-| [kubernetes](#requirement\_kubernetes) | ~> 2.0 |
-| [local](#requirement\_local) | >= 1.4 |
-| [random](#requirement\_random) | >= 2.1 |
+| [aws](#requirement\_aws) | >= 3.56.0 |
+| [helm](#requirement\_helm) | >= 2.0.0 |
## Providers
| Name | Version |
|------|---------|
-| [aws](#provider\_aws) | >= 3.22.0 |
-| [helm](#provider\_helm) | ~> 2.0 |
-| [random](#provider\_random) | >= 2.1 |
+| [aws](#provider\_aws) | >= 3.56.0 |
+| [helm](#provider\_helm) | >= 2.0.0 |
## Modules
@@ -50,13 +46,9 @@ Note that this example may create resources which cost money. Run `terraform des
|------|------|
| [aws_iam_policy.cluster_autoscaler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
| [helm_release.cluster-autoscaler](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource |
-| [random_string.suffix](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/string) | resource |
| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source |
| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source |
-| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source |
-| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
| [aws_iam_policy_document.cluster_autoscaler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
-| [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source |
## Inputs
diff --git a/examples/irsa/irsa.tf b/examples/irsa/irsa.tf
index a36d0e3394..d7d8b2e981 100644
--- a/examples/irsa/irsa.tf
+++ b/examples/irsa/irsa.tf
@@ -1,7 +1,5 @@
data "aws_caller_identity" "current" {}
-data "aws_region" "current" {}
-
locals {
k8s_service_account_namespace = "kube-system"
k8s_service_account_name = "cluster-autoscaler-aws"
@@ -29,7 +27,7 @@ resource "helm_release" "cluster-autoscaler" {
set {
name = "awsRegion"
- value = data.aws_region.current.name
+ value = local.region
}
set {
name = "rbac.serviceAccount.name"
diff --git a/examples/irsa/main.tf b/examples/irsa/main.tf
index 80a931f169..7d2c95afc5 100644
--- a/examples/irsa/main.tf
+++ b/examples/irsa/main.tf
@@ -3,9 +3,15 @@ provider "aws" {
}
locals {
- name = "irsa-${random_string.suffix.result}"
+ name = "ex-${replace(basename(path.cwd), "_", "-")}"
cluster_version = "1.20"
region = "eu-west-1"
+
+ tags = {
+ Example = local.name
+ GithubRepo = "terraform-aws-eks"
+ GithubOrg = "terraform-aws-modules"
+ }
}
################################################################################
@@ -47,42 +53,14 @@ module "eks" {
}
}
- tags = {
- Example = local.name
- GithubRepo = "terraform-aws-eks"
- GithubOrg = "terraform-aws-modules"
- }
-}
-
-################################################################################
-# Kubernetes provider configuration
-################################################################################
-
-data "aws_eks_cluster" "cluster" {
- name = module.eks.cluster_id
-}
-
-data "aws_eks_cluster_auth" "cluster" {
- name = module.eks.cluster_id
-}
-
-provider "kubernetes" {
- host = data.aws_eks_cluster.cluster.endpoint
- cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data)
- token = data.aws_eks_cluster_auth.cluster.token
+ tags = local.tags
}
################################################################################
# Supporting Resources
################################################################################
-data "aws_availability_zones" "available" {
-}
-
-resource "random_string" "suffix" {
- length = 8
- special = false
-}
+data "aws_availability_zones" "available" {}
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
@@ -107,10 +85,5 @@ module "vpc" {
"kubernetes.io/role/internal-elb" = "1"
}
- tags = {
- Example = local.name
- GithubRepo = "terraform-aws-eks"
- GithubOrg = "terraform-aws-modules"
- }
+ tags = local.tags
}
-
diff --git a/examples/irsa/versions.tf b/examples/irsa/versions.tf
index f546ca0cf0..5c4a43112e 100644
--- a/examples/irsa/versions.tf
+++ b/examples/irsa/versions.tf
@@ -2,10 +2,13 @@ terraform {
required_version = ">= 0.13.1"
required_providers {
- aws = ">= 3.22.0"
- local = ">= 1.4"
- random = ">= 2.1"
- kubernetes = "~> 2.0"
- helm = "~> 2.0"
+ aws = {
+ source = "hashicorp/aws"
+ version = ">= 3.56.0"
+ }
+ helm = {
+ source = "hashicorp/helm"
+ version = ">= 2.0.0"
+ }
}
}
diff --git a/examples/secrets_encryption/README.md b/examples/secrets_encryption/README.md
index 567250d548..2b5d070709 100644
--- a/examples/secrets_encryption/README.md
+++ b/examples/secrets_encryption/README.md
@@ -22,17 +22,13 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Version |
|------|---------|
| [terraform](#requirement\_terraform) | >= 0.13.1 |
-| [aws](#requirement\_aws) | >= 3.22.0 |
-| [kubernetes](#requirement\_kubernetes) | ~> 2.0 |
-| [local](#requirement\_local) | >= 1.4 |
-| [random](#requirement\_random) | >= 2.1 |
+| [aws](#requirement\_aws) | >= 3.56.0 |
## Providers
| Name | Version |
|------|---------|
-| [aws](#provider\_aws) | >= 3.22.0 |
-| [random](#provider\_random) | >= 2.1 |
+| [aws](#provider\_aws) | >= 3.56.0 |
## Modules
@@ -46,10 +42,7 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Type |
|------|------|
| [aws_kms_key.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource |
-| [random_string.suffix](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/string) | resource |
| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source |
-| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source |
-| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
## Inputs
diff --git a/examples/secrets_encryption/main.tf b/examples/secrets_encryption/main.tf
index 45124ca350..6ff01c3880 100644
--- a/examples/secrets_encryption/main.tf
+++ b/examples/secrets_encryption/main.tf
@@ -3,9 +3,15 @@ provider "aws" {
}
locals {
- name = "secrets_encryption-${random_string.suffix.result}"
+ name = "ex-${replace(basename(path.cwd), "_", "-")}"
cluster_version = "1.20"
region = "eu-west-1"
+
+ tags = {
+ Example = local.name
+ GithubRepo = "terraform-aws-eks"
+ GithubOrg = "terraform-aws-modules"
+ }
}
################################################################################
@@ -24,7 +30,6 @@ module "eks" {
cluster_endpoint_private_access = true
cluster_endpoint_public_access = true
-
cluster_encryption_config = [
{
provider_key_arn = aws_kms_key.eks.arn
@@ -41,29 +46,7 @@ module "eks" {
},
}
- tags = {
- Example = local.name
- GithubRepo = "terraform-aws-eks"
- GithubOrg = "terraform-aws-modules"
- }
-}
-
-################################################################################
-# Kubernetes provider configuration
-################################################################################
-
-data "aws_eks_cluster" "cluster" {
- name = module.eks.cluster_id
-}
-
-data "aws_eks_cluster_auth" "cluster" {
- name = module.eks.cluster_id
-}
-
-provider "kubernetes" {
- host = data.aws_eks_cluster.cluster.endpoint
- cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data)
- token = data.aws_eks_cluster_auth.cluster.token
+ tags = local.tags
}
################################################################################
@@ -82,18 +65,11 @@ resource "aws_kms_key" "eks" {
}
}
-
################################################################################
# Supporting Resources
################################################################################
-data "aws_availability_zones" "available" {
-}
-
-resource "random_string" "suffix" {
- length = 8
- special = false
-}
+data "aws_availability_zones" "available" {}
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
@@ -118,9 +94,5 @@ module "vpc" {
"kubernetes.io/role/internal-elb" = "1"
}
- tags = {
- Example = local.name
- GithubRepo = "terraform-aws-eks"
- GithubOrg = "terraform-aws-modules"
- }
+ tags = local.tags
}
diff --git a/examples/secrets_encryption/versions.tf b/examples/secrets_encryption/versions.tf
index bbcf893252..97955e9bc8 100644
--- a/examples/secrets_encryption/versions.tf
+++ b/examples/secrets_encryption/versions.tf
@@ -2,9 +2,9 @@ terraform {
required_version = ">= 0.13.1"
required_providers {
- aws = ">= 3.22.0"
- local = ">= 1.4"
- random = ">= 2.1"
- kubernetes = "~> 2.0"
+ aws = {
+ source = "hashicorp/aws"
+ version = ">= 3.56.0"
+ }
}
}
diff --git a/examples/self_managed_node_groups/README.md b/examples/self_managed_node_groups/README.md
index 5d76a53321..21017e5e34 100644
--- a/examples/self_managed_node_groups/README.md
+++ b/examples/self_managed_node_groups/README.md
@@ -45,8 +45,6 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Type |
|------|------|
| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source |
-| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source |
-| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
## Inputs
diff --git a/examples/self_managed_node_groups/main.tf b/examples/self_managed_node_groups/main.tf
index 1708b803d2..43b9ea67bf 100644
--- a/examples/self_managed_node_groups/main.tf
+++ b/examples/self_managed_node_groups/main.tf
@@ -69,24 +69,6 @@ module "eks" {
tags = local.tags
}
-################################################################################
-# Kubernetes provider configuration
-################################################################################
-
-data "aws_eks_cluster" "cluster" {
- name = module.eks.cluster_id
-}
-
-data "aws_eks_cluster_auth" "cluster" {
- name = module.eks.cluster_id
-}
-
-provider "kubernetes" {
- host = data.aws_eks_cluster.cluster.endpoint
- cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data)
- token = data.aws_eks_cluster_auth.cluster.token
-}
-
################################################################################
# Supporting Resources
################################################################################
diff --git a/examples/self_managed_node_groups/outputs.tf b/examples/self_managed_node_groups/outputs.tf
index 8ea0263436..440cd0f723 100644
--- a/examples/self_managed_node_groups/outputs.tf
+++ b/examples/self_managed_node_groups/outputs.tf
@@ -7,8 +7,3 @@ output "cluster_security_group_id" {
description = "Security group ids attached to the cluster control plane."
value = module.eks.cluster_security_group_id
}
-
-# output "node_groups" {
-# description = "Outputs from node groups"
-# value = module.eks.node_groups
-# }
diff --git a/main.tf b/main.tf
index 352e20b318..710dbd2677 100644
--- a/main.tf
+++ b/main.tf
@@ -1,12 +1,4 @@
locals {
-
- # EKS Cluster
- cluster_id = try(aws_eks_cluster.this[0].id, "")
- cluster_arn = try(aws_eks_cluster.this[0].arn, "")
- cluster_endpoint = try(aws_eks_cluster.this[0].endpoint, "")
- cluster_auth_base64 = try(aws_eks_cluster.this[0].certificate_authority[0].data, "")
- cluster_primary_security_group_id = try(aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id, "")
-
cluster_security_group_id = var.create_cluster_security_group ? join("", aws_security_group.cluster.*.id) : var.cluster_security_group_id
# Worker groups
diff --git a/modules/eks-managed-node-group/README.md b/modules/eks-managed-node-group/README.md
index 64f1266d40..0a08421bd0 100644
--- a/modules/eks-managed-node-group/README.md
+++ b/modules/eks-managed-node-group/README.md
@@ -95,14 +95,14 @@ The role ARN specified in `var.default_iam_role_arn` will be used by default. In
|------|---------|
| [terraform](#requirement\_terraform) | >= 0.13.1 |
| [aws](#requirement\_aws) | >= 3.56.0 |
-| [cloudinit](#requirement\_cloudinit) | >= 2.0 |
+| [cloudinit](#requirement\_cloudinit) | >= 2.0.0 |
## Providers
| Name | Version |
|------|---------|
| [aws](#provider\_aws) | >= 3.56.0 |
-| [cloudinit](#provider\_cloudinit) | >= 2.0 |
+| [cloudinit](#provider\_cloudinit) | >= 2.0.0 |
## Modules
diff --git a/modules/eks-managed-node-group/versions.tf b/modules/eks-managed-node-group/versions.tf
index 5324b482ab..e41b1ab10a 100644
--- a/modules/eks-managed-node-group/versions.tf
+++ b/modules/eks-managed-node-group/versions.tf
@@ -2,7 +2,13 @@ terraform {
required_version = ">= 0.13.1"
required_providers {
- aws = ">= 3.56.0"
- cloudinit = ">= 2.0"
+ aws = {
+ source = "hashicorp/aws"
+ version = ">= 3.56.0"
+ }
+ cloudinit = {
+ source = "hashicorp/cloudinit"
+ version = ">= 2.0.0"
+ }
}
}
diff --git a/modules/self-managed-node-group/README.md b/modules/self-managed-node-group/README.md
index 8bbf2a2f36..a7cb62d157 100644
--- a/modules/self-managed-node-group/README.md
+++ b/modules/self-managed-node-group/README.md
@@ -6,13 +6,13 @@
| Name | Version |
|------|---------|
| [terraform](#requirement\_terraform) | >= 0.13.1 |
-| [aws](#requirement\_aws) | >= 3.53 |
+| [aws](#requirement\_aws) | >= 3.56 |
## Providers
| Name | Version |
|------|---------|
-| [aws](#provider\_aws) | >= 3.53 |
+| [aws](#provider\_aws) | >= 3.56 |
## Modules
diff --git a/modules/self-managed-node-group/versions.tf b/modules/self-managed-node-group/versions.tf
index 009bedfded..9480a77da8 100644
--- a/modules/self-managed-node-group/versions.tf
+++ b/modules/self-managed-node-group/versions.tf
@@ -4,7 +4,7 @@ terraform {
required_providers {
aws = {
source = "hashicorp/aws"
- version = ">= 3.53"
+ version = ">= 3.56"
}
}
}
diff --git a/outputs.tf b/outputs.tf
index 512949e810..a8622cb9e0 100644
--- a/outputs.tf
+++ b/outputs.tf
@@ -1,26 +1,26 @@
output "cluster_id" {
description = "The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready."
- value = local.cluster_id
+ value = try(aws_eks_cluster.this[0].id, "")
}
output "cluster_arn" {
description = "The Amazon Resource Name (ARN) of the cluster."
- value = local.cluster_arn
+ value = try(aws_eks_cluster.this[0].arn, "")
}
output "cluster_certificate_authority_data" {
description = "Nested attribute containing certificate-authority-data for your cluster. This is the base64 encoded certificate data required to communicate with your cluster."
- value = local.cluster_auth_base64
+ value = try(aws_eks_cluster.this[0].certificate_authority[0].data, "")
}
output "cluster_endpoint" {
description = "The endpoint for your EKS Kubernetes API."
- value = local.cluster_endpoint
+ value = try(aws_eks_cluster.this[0].endpoint, "")
}
output "cluster_version" {
description = "The Kubernetes server version for the EKS cluster."
- value = element(concat(aws_eks_cluster.this[*].version, [""]), 0)
+ value = try(aws_eks_cluster.this[0].version, "")
}
output "cluster_security_group_id" {
@@ -45,22 +45,22 @@ output "cluster_oidc_issuer_url" {
output "cluster_primary_security_group_id" {
description = "The cluster primary security group ID created by the EKS cluster on 1.14 or later. Referred to as 'Cluster security group' in the EKS console."
- value = local.cluster_primary_security_group_id
+ value = try(aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id, "")
}
output "cloudwatch_log_group_name" {
description = "Name of cloudwatch log group created"
- value = element(concat(aws_cloudwatch_log_group.this[*].name, [""]), 0)
+ value = try(aws_cloudwatch_log_group.this[0].name, "")
}
output "cloudwatch_log_group_arn" {
description = "Arn of cloudwatch log group created"
- value = element(concat(aws_cloudwatch_log_group.this[*].arn, [""]), 0)
+ value = try(aws_cloudwatch_log_group.this[0].arn, "")
}
output "oidc_provider_arn" {
description = "The ARN of the OIDC Provider if `enable_irsa = true`."
- value = var.enable_irsa ? concat(aws_iam_openid_connect_provider.oidc_provider[*].arn, [""])[0] : null
+ value = try(aws_iam_openid_connect_provider.oidc_provider[0].arn, "")
}
output "fargate_profile_ids" {
diff --git a/versions.tf b/versions.tf
index 4e574b1aa9..83a000f86f 100644
--- a/versions.tf
+++ b/versions.tf
@@ -6,10 +6,6 @@ terraform {
source = "hashicorp/aws"
version = ">= 3.56.0"
}
- cloudinit = {
- source = "hashicorp/cloudinit"
- version = ">= 2.0.0"
- }
tls = {
source = "hashicorp/tls"
version = ">= 2.2.0"
From 6581f0d336f99d89017b85f472b27a3e5ffc561b Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Fri, 12 Nov 2021 11:25:19 -0500
Subject: [PATCH 21/83] chore: working through validating examples to at least
pass checks first
---
README.md | 7 +-
examples/complete/README.md | 4 +-
examples/complete/main.tf | 106 +++++++-----------
examples/eks_managed_node_group/README.md | 2 -
.../eks_managed_node_group/launchtemplate.tf | 2 +-
examples/eks_managed_node_group/main.tf | 20 ++--
examples/fargate/main.tf | 2 +-
examples/instance_refresh/README.md | 4 +-
examples/instance_refresh/main.tf | 6 +-
examples/irsa/irsa.tf | 8 +-
examples/irsa/main.tf | 2 +-
examples/secrets_encryption/main.tf | 2 +-
main.tf | 14 +--
modules/eks-managed-node-group/README.md | 6 +-
modules/eks-managed-node-group/main.tf | 8 +-
modules/eks-managed-node-group/variables.tf | 8 +-
modules/self-managed-node-group/main.tf | 2 +-
outputs.tf | 16 ++-
variables.tf | 24 ++++
workers.tf | 12 +-
20 files changed, 136 insertions(+), 119 deletions(-)
diff --git a/README.md b/README.md
index 25cf9500ea..063d449925 100644
--- a/README.md
+++ b/README.md
@@ -143,7 +143,7 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
| [aws_iam_openid_connect_provider.oidc_provider](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_openid_connect_provider) | resource |
| [aws_iam_policy.cluster_additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
| [aws_iam_role.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
-| [aws_security_group.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
+| [aws_security_group.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
| [aws_security_group_rule.cluster_egress_internet](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
| [aws_security_group_rule.cluster_private_access_cidrs_source](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
| [aws_security_group_rule.cluster_private_access_sg_source](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
@@ -156,9 +156,13 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
| Name | Description | Type | Default | Required |
|------|-------------|------|---------|:--------:|
+| [cluster\_create\_endpoint\_private\_access\_sg\_rule](#input\_cluster\_create\_endpoint\_private\_access\_sg\_rule) | Whether to create security group rules for the access to the Amazon EKS private API server endpoint. If `true`, `cluster_endpoint_private_access_cidrs` and/or 'cluster\_endpoint\_private\_access\_sg' should be provided | `bool` | `false` | no |
+| [cluster\_egress\_cidrs](#input\_cluster\_egress\_cidrs) | List of CIDR blocks that are permitted for cluster egress traffic | `list(string)` |
[ "0.0.0.0/0" ]
| no |
| [cluster\_enabled\_log\_types](#input\_cluster\_enabled\_log\_types) | A list of the desired control plane logging to enable. For more information, see Amazon EKS Control Plane Logging documentation (https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html) | `list(string)` | `[]` | no |
| [cluster\_encryption\_config](#input\_cluster\_encryption\_config) | Configuration block with encryption configuration for the cluster. See examples/secrets\_encryption/main.tf for example format |
| `[]` | no |
| [cluster\_endpoint\_private\_access](#input\_cluster\_endpoint\_private\_access) | Indicates whether or not the Amazon EKS private API server endpoint is enabled | `bool` | `false` | no |
+| [cluster\_endpoint\_private\_access\_cidrs](#input\_cluster\_endpoint\_private\_access\_cidrs) | List of CIDR blocks which can access the Amazon EKS private API server endpoint. `cluster_endpoint_private_access` and `cluster_create_endpoint_private_access_sg_rule` must be set to `true` | `list(string)` | `[]` | no |
+| [cluster\_endpoint\_private\_access\_sg](#input\_cluster\_endpoint\_private\_access\_sg) | List of security group IDs which can access the Amazon EKS private API server endpoint. `cluster_endpoint_private_access` and `cluster_create_endpoint_private_access_sg_rule` must be set to `true` | `list(string)` | `[]` | no |
| [cluster\_endpoint\_public\_access](#input\_cluster\_endpoint\_public\_access) | Indicates whether or not the Amazon EKS public API server endpoint is enabled. When it's set to `false` ensure to have a proper private access with `cluster_endpoint_private_access = true` | `bool` | `true` | no |
| [cluster\_endpoint\_public\_access\_cidrs](#input\_cluster\_endpoint\_public\_access\_cidrs) | List of CIDR blocks which can access the Amazon EKS public API server endpoint | `list(string)` |
[ "0.0.0.0/0" ]
| no |
| [cluster\_iam\_role\_arn](#input\_cluster\_iam\_role\_arn) | Existing IAM role ARN for the cluster. Required if `create_cluster_iam_role` is set to `false` | `string` | `null` | no |
@@ -218,5 +222,4 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
| [fargate\_profile\_arns](#output\_fargate\_profile\_arns) | Amazon Resource Name (ARN) of the EKS Fargate Profiles. |
| [fargate\_profile\_ids](#output\_fargate\_profile\_ids) | EKS Cluster name and EKS Fargate Profile names separated by a colon (:). |
| [oidc\_provider\_arn](#output\_oidc\_provider\_arn) | The ARN of the OIDC Provider if `enable_irsa = true`. |
-| [security\_group\_rule\_cluster\_https\_worker\_ingress](#output\_security\_group\_rule\_cluster\_https\_worker\_ingress) | Security group rule responsible for allowing pods to communicate with the EKS cluster API. |
diff --git a/examples/complete/README.md b/examples/complete/README.md
index 4da52b45fa..2c12b42244 100644
--- a/examples/complete/README.md
+++ b/examples/complete/README.md
@@ -45,9 +45,7 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Type |
|------|------|
-| [aws_security_group.all_worker_mgmt](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
-| [aws_security_group.worker_group_mgmt_one](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
-| [aws_security_group.worker_group_mgmt_two](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
+| [aws_security_group.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source |
## Inputs
diff --git a/examples/complete/main.tf b/examples/complete/main.tf
index dca023bc75..c10a4e374e 100644
--- a/examples/complete/main.tf
+++ b/examples/complete/main.tf
@@ -31,10 +31,10 @@ module "eks" {
cluster_endpoint_private_access = true
cluster_endpoint_public_access = true
- worker_additional_security_group_ids = [aws_security_group.all_worker_mgmt.id]
+ # TODO
+ # vpc_security_group_ids = [aws_security_group.additional.id]
- # Worker groups
- worker_groups = {
+ self_managed_node_groups = {
one = {
name = "spot-1"
override_instance_types = ["m5.large", "m5a.large", "m5d.large", "m5ad.large"]
@@ -43,21 +43,25 @@ module "eks" {
asg_desired_capacity = 5
kubelet_extra_args = "--node-labels=node.kubernetes.io/lifecycle=spot"
public_ip = true
+
+ vpc_security_group_ids = [aws_security_group.additional.id] # TODO
}
}
- # Managed Node Groups
- node_groups_defaults = {
- ami_type = "AL2_x86_64"
- disk_size = 50
- }
+ # # Managed Node Groups
+ # node_groups_defaults = {
+ # ami_type = "AL2_x86_64"
+ # disk_size = 50
+ # }
- node_groups = {
+ eks_managed_node_groups = {
example = {
desired_capacity = 1
max_capacity = 10
min_capacity = 1
+ vpc_security_group_ids = [aws_security_group.additional.id] # TODO
+
instance_types = ["t3.large"]
capacity_type = "SPOT"
k8s_labels = {
@@ -68,45 +72,45 @@ module "eks" {
additional_tags = {
ExtraTag = "example"
}
- taints = [
- {
+ taints = {
+ dedicated = {
key = "dedicated"
value = "gpuGroup"
effect = "NO_SCHEDULE"
}
- ]
+ }
update_config = {
max_unavailable_percentage = 50 # or set `max_unavailable`
}
}
}
- # Fargate
- fargate_profiles = {
- default = {
- name = "default"
- selectors = [
- {
- namespace = "kube-system"
- labels = {
- k8s-app = "kube-dns"
- }
- },
- {
- namespace = "default"
- }
- ]
-
- tags = {
- Owner = "test"
- }
-
- timeouts = {
- create = "20m"
- delete = "20m"
- }
- }
- }
+ # # Fargate
+ # fargate_profiles = {
+ # default = {
+ # name = "default"
+ # selectors = [
+ # {
+ # namespace = "kube-system"
+ # labels = {
+ # k8s-app = "kube-dns"
+ # }
+ # },
+ # {
+ # namespace = "default"
+ # }
+ # ]
+
+ # tags = {
+ # Owner = "test"
+ # }
+
+ # timeouts = {
+ # create = "20m"
+ # delete = "20m"
+ # }
+ # }
+ # }
tags = local.tags
}
@@ -131,31 +135,7 @@ module "disabled_fargate" {
# Additional security groups for workers
################################################################################
-resource "aws_security_group" "worker_group_mgmt_one" {
- name_prefix = "worker_group_mgmt_one"
- vpc_id = module.vpc.vpc_id
-
- ingress {
- from_port = 22
- to_port = 22
- protocol = "tcp"
- cidr_blocks = ["10.0.0.0/8"]
- }
-}
-
-resource "aws_security_group" "worker_group_mgmt_two" {
- name_prefix = "worker_group_mgmt_two"
- vpc_id = module.vpc.vpc_id
-
- ingress {
- from_port = 22
- to_port = 22
- protocol = "tcp"
- cidr_blocks = ["192.168.0.0/16"]
- }
-}
-
-resource "aws_security_group" "all_worker_mgmt" {
+resource "aws_security_group" "additional" {
name_prefix = "all_worker_management"
vpc_id = module.vpc.vpc_id
diff --git a/examples/eks_managed_node_group/README.md b/examples/eks_managed_node_group/README.md
index 862b72db33..3a55a24c04 100644
--- a/examples/eks_managed_node_group/README.md
+++ b/examples/eks_managed_node_group/README.md
@@ -33,7 +33,6 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Version |
|------|---------|
| [aws](#provider\_aws) | >= 3.56.0 |
-| [cloudinit](#provider\_cloudinit) | >= 2.0.0 |
## Modules
@@ -48,7 +47,6 @@ Note that this example may create resources which cost money. Run `terraform des
|------|------|
| [aws_launch_template.default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource |
| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source |
-| [cloudinit_config.custom](https://registry.terraform.io/providers/hashicorp/cloudinit/latest/docs/data-sources/config) | data source |
## Inputs
diff --git a/examples/eks_managed_node_group/launchtemplate.tf b/examples/eks_managed_node_group/launchtemplate.tf
index 0f0e4ebf31..cd28e89061 100644
--- a/examples/eks_managed_node_group/launchtemplate.tf
+++ b/examples/eks_managed_node_group/launchtemplate.tf
@@ -45,7 +45,7 @@ resource "aws_launch_template" "default" {
network_interfaces {
associate_public_ip_address = false
delete_on_termination = true
- security_groups = [module.eks.worker_security_group_id]
+ # security_groups = [module.eks.worker_security_group_id] # TODO
}
# if you want to use a custom AMI
diff --git a/examples/eks_managed_node_group/main.tf b/examples/eks_managed_node_group/main.tf
index ad734de6db..4281d3bcdb 100644
--- a/examples/eks_managed_node_group/main.tf
+++ b/examples/eks_managed_node_group/main.tf
@@ -18,16 +18,16 @@ locals {
# EKS Module
################################################################################
-data "cloudinit_config" "custom" {
- gzip = false
- base64_encode = true
- boundary = "//"
-
- part {
- content_type = "text/x-shellscript"
- content = "echo 'hello world!'"
- }
-}
+# data "cloudinit_config" "custom" {
+# gzip = false
+# base64_encode = true
+# boundary = "//"
+
+# part {
+# content_type = "text/x-shellscript"
+# content = "echo 'hello world!'"
+# }
+# }
module "eks" {
source = "../.."
diff --git a/examples/fargate/main.tf b/examples/fargate/main.tf
index 70f747ad81..0ab24bebc6 100644
--- a/examples/fargate/main.tf
+++ b/examples/fargate/main.tf
@@ -34,7 +34,7 @@ module "eks" {
# You require a node group to schedule coredns which is critical for running correctly internal DNS.
# If you want to use only fargate you must follow docs `(Optional) Update CoreDNS`
# available under https://docs.aws.amazon.com/eks/latest/userguide/fargate-getting-started.html
- node_groups = {
+ eks_managed_node_groups = {
example = {
desired_capacity = 1
diff --git a/examples/instance_refresh/README.md b/examples/instance_refresh/README.md
index 3eebdf8724..4d47ee900c 100644
--- a/examples/instance_refresh/README.md
+++ b/examples/instance_refresh/README.md
@@ -36,8 +36,8 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Source | Version |
|------|--------|---------|
-| [aws\_node\_termination\_handler\_role](#module\_aws\_node\_termination\_handler\_role) | terraform-aws-modules/iam/aws//modules/iam-assumable-role-with-oidc | 4.1.0 |
-| [aws\_node\_termination\_handler\_sqs](#module\_aws\_node\_termination\_handler\_sqs) | terraform-aws-modules/sqs/aws | ~> 3.0.0 |
+| [aws\_node\_termination\_handler\_role](#module\_aws\_node\_termination\_handler\_role) | terraform-aws-modules/iam/aws//modules/iam-assumable-role-with-oidc | ~> 4.0 |
+| [aws\_node\_termination\_handler\_sqs](#module\_aws\_node\_termination\_handler\_sqs) | terraform-aws-modules/sqs/aws | ~> 3.0 |
| [eks](#module\_eks) | ../.. | n/a |
| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 |
diff --git a/examples/instance_refresh/main.tf b/examples/instance_refresh/main.tf
index 32dc7d1f6e..9abf6190c0 100644
--- a/examples/instance_refresh/main.tf
+++ b/examples/instance_refresh/main.tf
@@ -87,7 +87,7 @@ data "aws_iam_policy_document" "aws_node_termination_handler_events" {
module "aws_node_termination_handler_sqs" {
source = "terraform-aws-modules/sqs/aws"
- version = "~> 3.0.0"
+ version = "~> 3.0"
name = local.name
message_retention_seconds = 300
policy = data.aws_iam_policy_document.aws_node_termination_handler_events.json
@@ -139,7 +139,7 @@ resource "aws_cloudwatch_event_target" "aws_node_termination_handler_spot" {
module "aws_node_termination_handler_role" {
source = "terraform-aws-modules/iam/aws//modules/iam-assumable-role-with-oidc"
- version = "4.1.0"
+ version = "~> 4.0"
create_role = true
role_description = "IRSA role for ANTH, cluster ${local.name}"
role_name_prefix = local.name
@@ -216,7 +216,7 @@ module "eks" {
cluster_endpoint_public_access = true
enable_irsa = true
- worker_groups = {
+ self_managed_node_groups = {
one = {
name = "refresh"
asg_max_size = 2
diff --git a/examples/irsa/irsa.tf b/examples/irsa/irsa.tf
index d7d8b2e981..03ec6f28dc 100644
--- a/examples/irsa/irsa.tf
+++ b/examples/irsa/irsa.tf
@@ -1,5 +1,9 @@
data "aws_caller_identity" "current" {}
+data "aws_eks_cluster_auth" "cluster" {
+ name = module.eks.cluster_id
+}
+
locals {
k8s_service_account_namespace = "kube-system"
k8s_service_account_name = "cluster-autoscaler-aws"
@@ -7,8 +11,8 @@ locals {
provider "helm" {
kubernetes {
- host = data.aws_eks_cluster.cluster.endpoint
- cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data)
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.cluster.token
}
}
diff --git a/examples/irsa/main.tf b/examples/irsa/main.tf
index 7d2c95afc5..78bfdf55b5 100644
--- a/examples/irsa/main.tf
+++ b/examples/irsa/main.tf
@@ -32,7 +32,7 @@ module "eks" {
enable_irsa = true
- worker_groups = {
+ self_managed_node_groups = {
one = {
name = "worker-group-1"
instance_type = "t3.medium"
diff --git a/examples/secrets_encryption/main.tf b/examples/secrets_encryption/main.tf
index 6ff01c3880..560c46293a 100644
--- a/examples/secrets_encryption/main.tf
+++ b/examples/secrets_encryption/main.tf
@@ -37,7 +37,7 @@ module "eks" {
}
]
- worker_groups = {
+ self_managed_node_groups = {
one = {
name = "worker-group-1"
instance_type = "t3.small"
diff --git a/main.tf b/main.tf
index 710dbd2677..84c82b7550 100644
--- a/main.tf
+++ b/main.tf
@@ -1,5 +1,5 @@
locals {
- cluster_security_group_id = var.create_cluster_security_group ? join("", aws_security_group.cluster.*.id) : var.cluster_security_group_id
+ cluster_security_group_id = var.create_cluster_security_group ? join("", aws_security_group.this.*.id) : var.cluster_security_group_id
# Worker groups
policy_arn_prefix = "arn:${data.aws_partition.current.partition}:iam::aws:policy"
@@ -55,7 +55,7 @@ resource "aws_eks_cluster" "this" {
depends_on = [
aws_security_group_rule.cluster_egress_internet,
- aws_security_group_rule.cluster_https_worker_ingress,
+ # aws_security_group_rule.cluster_https_worker_ingress,
aws_cloudwatch_log_group.this
]
}
@@ -80,7 +80,7 @@ locals {
enable_cluster_private_endpoint_sg_access = local.create_cluster_sg && var.cluster_create_endpoint_private_access_sg_rule && var.cluster_endpoint_private_access
}
-resource "aws_security_group" "cluster" {
+resource "aws_security_group" "this" {
count = local.create_cluster_sg ? 1 : 0
name = var.cluster_security_group_use_name_prefix ? null : local.cluster_sg_name
@@ -122,27 +122,27 @@ resource "aws_security_group_rule" "cluster_egress_internet" {
# }
resource "aws_security_group_rule" "cluster_private_access_cidrs_source" {
- for_each = local.enable_cluster_private_endpoint_sg_access && var.cluster_endpoint_private_access_cidrs != null ? toset(var.cluster_endpoint_private_access_cidrs) : []
+ count = local.enable_cluster_private_endpoint_sg_access && length(var.cluster_endpoint_private_access_cidrs) > 0 ? 1 : 0
description = "Allow private K8S API ingress from custom CIDR source"
type = "ingress"
from_port = 443
to_port = 443
protocol = "tcp"
- cidr_blocks = [each.value]
+ cidr_blocks = var.cluster_endpoint_private_access_cidrs
security_group_id = aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id
}
resource "aws_security_group_rule" "cluster_private_access_sg_source" {
- count = local.enable_cluster_private_endpoint_sg_access && var.cluster_endpoint_private_access_sg != null ? length(var.cluster_endpoint_private_access_sg) : 0
+ for_each = local.enable_cluster_private_endpoint_sg_access ? toset(var.cluster_endpoint_private_access_sg) : toset([])
description = "Allow private K8S API ingress from custom Security Groups source"
type = "ingress"
from_port = 443
to_port = 443
protocol = "tcp"
- source_security_group_id = var.cluster_endpoint_private_access_sg[count.index]
+ source_security_group_id = each.value
security_group_id = aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id
}
diff --git a/modules/eks-managed-node-group/README.md b/modules/eks-managed-node-group/README.md
index 0a08421bd0..04ec400095 100644
--- a/modules/eks-managed-node-group/README.md
+++ b/modules/eks-managed-node-group/README.md
@@ -193,7 +193,7 @@ No modules.
| [post\_bootstrap\_user\_data](#input\_post\_bootstrap\_user\_data) | User data that is appended to the user data script after of the EKS bootstrap script. Only valid when using a custom EKS optimized AMI derivative | `string` | `""` | no |
| [pre\_bootstrap\_user\_data](#input\_pre\_bootstrap\_user\_data) | User data that is injected into the user data script ahead of the EKS bootstrap script | `string` | `""` | no |
| [ram\_disk\_id](#input\_ram\_disk\_id) | The ID of the ram disk | `string` | `null` | no |
-| [remote\_access](#input\_remote\_access) | Configuration block with remote access settings | `map(string)` | `null` | no |
+| [remote\_access](#input\_remote\_access) | Configuration block with remote access settings | `map(string)` | `{}` | no |
| [security\_group\_description](#input\_security\_group\_description) | Description for the security group | `string` | `"EKS worker security group"` | no |
| [security\_group\_egress\_cidr\_blocks](#input\_security\_group\_egress\_cidr\_blocks) | List of CIDR blocks that are permitted for security group egress traffic | `list(string)` |
[ "0.0.0.0/0" ]
| no |
| [security\_group\_name](#input\_security\_group\_name) | Name to use on security group created | `string` | `null` | no |
@@ -202,9 +202,9 @@ No modules.
| [subnet\_ids](#input\_subnet\_ids) | Identifiers of EC2 Subnets to associate with the EKS Node Group. These subnets must have the following resource tag: `kubernetes.io/cluster/CLUSTER_NAME` | `list(string)` | `null` | no |
| [tag\_specifications](#input\_tag\_specifications) | The tags to apply to the resources during launch | `list(any)` | `[]` | no |
| [tags](#input\_tags) | A map of tags to add to all resources | `map(string)` | `{}` | no |
-| [taints](#input\_taints) | The Kubernetes taints to be applied to the nodes in the node group. Maximum of 50 taints per node group | `map(string)` | `null` | no |
+| [taints](#input\_taints) | The Kubernetes taints to be applied to the nodes in the node group. Maximum of 50 taints per node group | `map(any)` | `{}` | no |
| [timeouts](#input\_timeouts) | Create, update, and delete timeout configurations for the node group | `map(string)` | `{}` | no |
-| [update\_config](#input\_update\_config) | Configuration block of settings for max unavailable resources during node group updates | `map(string)` | `null` | no |
+| [update\_config](#input\_update\_config) | Configuration block of settings for max unavailable resources during node group updates | `map(string)` | `{}` | no |
| [update\_default\_version](#input\_update\_default\_version) | Whether to update Default Version each update. Conflicts with `default_version` | `bool` | `true` | no |
| [use\_name\_prefix](#input\_use\_name\_prefix) | Determines whether to use `name` as is or create a unique name beginning with the `name` as the prefix | `bool` | `true` | no |
| [vpc\_id](#input\_vpc\_id) | ID of the VPC where the security group/nodes will be provisioned | `string` | `null` | no |
diff --git a/modules/eks-managed-node-group/main.tf b/modules/eks-managed-node-group/main.tf
index be29377b78..88f8fcbe0e 100644
--- a/modules/eks-managed-node-group/main.tf
+++ b/modules/eks-managed-node-group/main.tf
@@ -74,7 +74,7 @@ resource "aws_launch_template" "this" {
key_name = var.key_name
user_data = try(data.cloudinit_config.eks_optimized_ami_user_data[0].rendered, var.custom_user_data)
- vpc_security_group_ids = var.vpc_security_group_ids
+ vpc_security_group_ids = compact(concat([try(aws_security_group.this[0].id, "")], var.vpc_security_group_ids))
default_version = var.default_version
update_default_version = var.update_default_version
@@ -306,7 +306,7 @@ resource "aws_eks_node_group" "this" {
}
dynamic "remote_access" {
- for_each = var.remote_access != null ? [var.remote_access] : []
+ for_each = var.remote_access
content {
ec2_ssh_key = lookup(remote_access.value, "ec2_ssh_key", null)
source_security_group_ids = lookup(remote_access.value, "source_security_group_ids", [])
@@ -314,7 +314,7 @@ resource "aws_eks_node_group" "this" {
}
dynamic "taint" {
- for_each = var.taints != null ? var.taints : {}
+ for_each = var.taints
content {
key = taint.value.key
value = lookup(taint.value, "value")
@@ -323,7 +323,7 @@ resource "aws_eks_node_group" "this" {
}
dynamic "update_config" {
- for_each = var.update_config != null ? [var.update_config] : []
+ for_each = [var.update_config]
content {
max_unavailable_percentage = lookup(update_config.value, "max_unavailable_percentage", null)
max_unavailable = lookup(update_config.value, "max_unavailable", null)
diff --git a/modules/eks-managed-node-group/variables.tf b/modules/eks-managed-node-group/variables.tf
index a839bd0072..80fd169e72 100644
--- a/modules/eks-managed-node-group/variables.tf
+++ b/modules/eks-managed-node-group/variables.tf
@@ -361,19 +361,19 @@ variable "launch_template_version" {
variable "remote_access" {
description = "Configuration block with remote access settings"
type = map(string)
- default = null
+ default = {}
}
variable "taints" {
description = "The Kubernetes taints to be applied to the nodes in the node group. Maximum of 50 taints per node group"
- type = map(string)
- default = null
+ type = map(any)
+ default = {}
}
variable "update_config" {
description = "Configuration block of settings for max unavailable resources during node group updates"
type = map(string)
- default = null
+ default = {}
}
variable "timeouts" {
diff --git a/modules/self-managed-node-group/main.tf b/modules/self-managed-node-group/main.tf
index b523645c39..3d1c4a22db 100644
--- a/modules/self-managed-node-group/main.tf
+++ b/modules/self-managed-node-group/main.tf
@@ -29,7 +29,7 @@ resource "aws_launch_template" "this" {
key_name = var.key_name
user_data = var.user_data
- vpc_security_group_ids = var.vpc_security_group_ids
+ vpc_security_group_ids = compact(concat([try(aws_security_group.this[0].id, "")], var.vpc_security_group_ids))
default_version = var.default_version
update_default_version = var.update_default_version
diff --git a/outputs.tf b/outputs.tf
index a8622cb9e0..cbc78528d8 100644
--- a/outputs.tf
+++ b/outputs.tf
@@ -83,7 +83,17 @@ output "fargate_iam_role_arn" {
value = module.fargate.iam_role_arn
}
-output "security_group_rule_cluster_https_worker_ingress" {
- description = "Security group rule responsible for allowing pods to communicate with the EKS cluster API."
- value = aws_security_group_rule.cluster_https_worker_ingress
+# output "security_group_rule_cluster_https_worker_ingress" {
+# description = "Security group rule responsible for allowing pods to communicate with the EKS cluster API."
+# value = aws_security_group_rule.cluster_https_worker_ingress
+# }
+
+output "self_managed_node_groups" {
+ description = "Map of attribute maps for all self managed node groups created"
+ value = module.self_managed_node_groups
}
+
+output "eks_managed_node_groups" {
+ description = "Map of attribute maps for all EKS managed node groups created"
+ value = module.eks_managed_node_groups
+}
\ No newline at end of file
diff --git a/variables.tf b/variables.tf
index cf2c0b21d6..50145f1a6e 100644
--- a/variables.tf
+++ b/variables.tf
@@ -136,6 +136,30 @@ variable "cluster_security_group_use_name_prefix" {
default = true
}
+variable "cluster_egress_cidrs" {
+ description = "List of CIDR blocks that are permitted for cluster egress traffic"
+ type = list(string)
+ default = ["0.0.0.0/0"]
+}
+
+variable "cluster_create_endpoint_private_access_sg_rule" {
+ description = "Whether to create security group rules for the access to the Amazon EKS private API server endpoint. If `true`, `cluster_endpoint_private_access_cidrs` and/or 'cluster_endpoint_private_access_sg' should be provided"
+ type = bool
+ default = false
+}
+
+variable "cluster_endpoint_private_access_cidrs" {
+ description = "List of CIDR blocks which can access the Amazon EKS private API server endpoint. `cluster_endpoint_private_access` and `cluster_create_endpoint_private_access_sg_rule` must be set to `true`"
+ type = list(string)
+ default = []
+}
+
+variable "cluster_endpoint_private_access_sg" {
+ description = "List of security group IDs which can access the Amazon EKS private API server endpoint. `cluster_endpoint_private_access` and `cluster_create_endpoint_private_access_sg_rule` must be set to `true`"
+ type = list(string)
+ default = []
+}
+
variable "cluster_security_group_tags" {
description = "A map of additional tags to add to the cluster security group created"
type = map(string)
diff --git a/workers.tf b/workers.tf
index 4ba202d861..72fc9af944 100644
--- a/workers.tf
+++ b/workers.tf
@@ -9,7 +9,7 @@ module "fargate" {
create_fargate_pod_execution_role = var.create_fargate_pod_execution_role
fargate_pod_execution_role_arn = var.fargate_pod_execution_role_arn
- cluster_name = aws_eks_cluster.this[0].name
+ cluster_name = try(aws_eks_cluster.this[0].name, var.cluster_name) # TODO - why?!
subnet_ids = coalescelist(var.fargate_subnet_ids, var.subnet_ids, [""])
iam_path = var.fargate_iam_role_path
@@ -52,9 +52,9 @@ module "eks_managed_node_groups" {
labels = try(each.value.labels, null)
cluster_version = try(each.value.cluster_version, var.cluster_version)
- remote_access = try(each.value.remote_access, null)
- taints = try(each.value.taints, null)
- update_config = try(each.value.update_config, null)
+ remote_access = try(each.value.remote_access, {})
+ taints = try(each.value.taints, {})
+ update_config = try(each.value.update_config, {})
timeouts = try(each.value.timeouts, {})
# User data
@@ -79,7 +79,7 @@ module "eks_managed_node_groups" {
ebs_optimized = try(each.value.ebs_optimized, null)
key_name = try(each.value.key_name, null)
- vpc_security_group_ids = try(each.value.vpc_security_group_ids, null)
+ vpc_security_group_ids = try(each.value.vpc_security_group_ids, [])
default_version = try(each.value.default_version, null)
update_default_version = try(each.value.update_default_version, null)
@@ -181,7 +181,7 @@ module "self_managed_node_group" {
key_name = try(each.value.key_name, null)
user_data = try(each.value.user_data, null)
- vpc_security_group_ids = try(each.value.vpc_security_group_ids, null)
+ vpc_security_group_ids = try(each.value.vpc_security_group_ids, [])
default_version = try(each.value.default_version, null)
update_default_version = try(each.value.update_default_version, null)
From 0cc5a4acc93b151bd3640ac317fa12aadee8ff74 Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Fri, 12 Nov 2021 15:18:30 -0500
Subject: [PATCH 22/83] chore: update outputs
---
README.md | 30 +++---
examples/irsa/README.md | 1 +
main.tf | 5 +-
modules/eks-managed-node-group/README.md | 16 ++-
modules/eks-managed-node-group/main.tf | 12 +--
modules/eks-managed-node-group/outputs.tf | 89 ++++++++++++++++-
modules/self-managed-node-group/README.md | 34 ++++---
modules/self-managed-node-group/main.tf | 4 +-
modules/self-managed-node-group/outputs.tf | 105 +++++++++++++------
outputs.tf | 111 ++++++++++++++-------
10 files changed, 296 insertions(+), 111 deletions(-)
diff --git a/README.md b/README.md
index 063d449925..26d4c3621b 100644
--- a/README.md
+++ b/README.md
@@ -207,19 +207,23 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
|------|-------------|
| [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created |
| [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created |
-| [cluster\_arn](#output\_cluster\_arn) | The Amazon Resource Name (ARN) of the cluster. |
-| [cluster\_certificate\_authority\_data](#output\_cluster\_certificate\_authority\_data) | Nested attribute containing certificate-authority-data for your cluster. This is the base64 encoded certificate data required to communicate with your cluster. |
-| [cluster\_endpoint](#output\_cluster\_endpoint) | The endpoint for your EKS Kubernetes API. |
-| [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster. |
-| [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster. |
-| [cluster\_id](#output\_cluster\_id) | The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready. |
-| [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster OIDC Issuer |
-| [cluster\_primary\_security\_group\_id](#output\_cluster\_primary\_security\_group\_id) | The cluster primary security group ID created by the EKS cluster on 1.14 or later. Referred to as 'Cluster security group' in the EKS console. |
-| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Security group ID attached to the EKS cluster. On 1.14 or later, this is the 'Additional security groups' in the EKS console. |
-| [cluster\_version](#output\_cluster\_version) | The Kubernetes server version for the EKS cluster. |
+| [cluster\_arn](#output\_cluster\_arn) | The Amazon Resource Name (ARN) of the cluster |
+| [cluster\_certificate\_authority\_data](#output\_cluster\_certificate\_authority\_data) | Base64 encoded certificate data required to communicate with the cluster |
+| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for your Kubernetes API server |
+| [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster |
+| [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster |
+| [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role |
+| [cluster\_id](#output\_cluster\_id) | The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready |
+| [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider |
+| [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster |
+| [cluster\_security\_group\_arn](#output\_cluster\_security\_group\_arn) | Amazon Resource Name (ARN) of the cluster security group |
+| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console |
+| [cluster\_status](#output\_cluster\_status) | Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED` |
+| [eks\_managed\_node\_groups](#output\_eks\_managed\_node\_groups) | Map of attribute maps for all EKS managed node groups created |
| [fargate\_iam\_role\_arn](#output\_fargate\_iam\_role\_arn) | IAM role ARN for EKS Fargate pods |
| [fargate\_iam\_role\_name](#output\_fargate\_iam\_role\_name) | IAM role name for EKS Fargate pods |
-| [fargate\_profile\_arns](#output\_fargate\_profile\_arns) | Amazon Resource Name (ARN) of the EKS Fargate Profiles. |
-| [fargate\_profile\_ids](#output\_fargate\_profile\_ids) | EKS Cluster name and EKS Fargate Profile names separated by a colon (:). |
-| [oidc\_provider\_arn](#output\_oidc\_provider\_arn) | The ARN of the OIDC Provider if `enable_irsa = true`. |
+| [fargate\_profile\_arns](#output\_fargate\_profile\_arns) | Amazon Resource Name (ARN) of the EKS Fargate Profiles |
+| [fargate\_profile\_ids](#output\_fargate\_profile\_ids) | EKS Cluster name and EKS Fargate Profile names separated by a colon (:) |
+| [oidc\_provider\_arn](#output\_oidc\_provider\_arn) | The ARN of the OIDC Provider if `enable_irsa = true` |
+| [self\_managed\_node\_groups](#output\_self\_managed\_node\_groups) | Map of attribute maps for all self managed node groups created |
diff --git a/examples/irsa/README.md b/examples/irsa/README.md
index 71c16fc842..0299ef4ca6 100644
--- a/examples/irsa/README.md
+++ b/examples/irsa/README.md
@@ -48,6 +48,7 @@ Note that this example may create resources which cost money. Run `terraform des
| [helm_release.cluster-autoscaler](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource |
| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source |
| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source |
+| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
| [aws_iam_policy_document.cluster_autoscaler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
## Inputs
diff --git a/main.tf b/main.tf
index 84c82b7550..09b5264792 100644
--- a/main.tf
+++ b/main.tf
@@ -71,7 +71,8 @@ resource "aws_cloudwatch_log_group" "this" {
}
################################################################################
-# Cluster Security Group
+# Security Group
+# Defaults follow https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html
################################################################################
locals {
@@ -186,7 +187,7 @@ resource "aws_iam_openid_connect_provider" "oidc_provider" {
}
################################################################################
-# Cluster IAM Role
+# IAM Role
################################################################################
locals {
diff --git a/modules/eks-managed-node-group/README.md b/modules/eks-managed-node-group/README.md
index 04ec400095..b08613947e 100644
--- a/modules/eks-managed-node-group/README.md
+++ b/modules/eks-managed-node-group/README.md
@@ -212,5 +212,19 @@ No modules.
## Outputs
-No outputs.
+| Name | Description |
+|------|-------------|
+| [iam\_role\_arn](#output\_iam\_role\_arn) | The Amazon Resource Name (ARN) specifying the IAM role |
+| [iam\_role\_name](#output\_iam\_role\_name) | The name of the IAM role |
+| [iam\_role\_unique\_id](#output\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role |
+| [launch\_template\_arn](#output\_launch\_template\_arn) | The ARN of the launch template |
+| [launch\_template\_id](#output\_launch\_template\_id) | The ID of the launch template |
+| [launch\_template\_latest\_version](#output\_launch\_template\_latest\_version) | The latest version of the launch template |
+| [node\_group\_arn](#output\_node\_group\_arn) | Amazon Resource Name (ARN) of the EKS Node Group |
+| [node\_group\_id](#output\_node\_group\_id) | EKS Cluster name and EKS Node Group name separated by a colon (`:`) |
+| [node\_group\_resources](#output\_node\_group\_resources) | List of objects containing information about underlying resources |
+| [node\_group\_status](#output\_node\_group\_status) | Status of the EKS Node Group |
+| [security\_group\_arn](#output\_security\_group\_arn) | Amazon Resource Name (ARN) of the security group |
+| [security\_group\_id](#output\_security\_group\_id) | ID of the security group |
+| [user\_data](#output\_user\_data) | Rendered user data output that is supplied to the launch template when enabled/used |
diff --git a/modules/eks-managed-node-group/main.tf b/modules/eks-managed-node-group/main.tf
index 88f8fcbe0e..39f0357e65 100644
--- a/modules/eks-managed-node-group/main.tf
+++ b/modules/eks-managed-node-group/main.tf
@@ -1,8 +1,3 @@
-locals {
- use_custom_launch_template = var.create_launch_template || var.launch_template_name != null
-
-}
-
data "aws_partition" "current" {}
################################################################################
@@ -60,6 +55,10 @@ data "cloudinit_config" "eks_optimized_ami_user_data" {
# Launch template
################################################################################
+locals {
+ use_custom_launch_template = var.create_launch_template || var.launch_template_name != null
+}
+
resource "aws_launch_template" "this" {
count = var.create && var.create_launch_template ? 1 : 0
@@ -261,7 +260,7 @@ resource "aws_launch_template" "this" {
}
################################################################################
-# EKS Managed Node Group
+# Node Group
################################################################################
locals {
@@ -354,7 +353,6 @@ resource "aws_eks_node_group" "this" {
)
}
-
################################################################################
# Security Group
# Defaults follow https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html
diff --git a/modules/eks-managed-node-group/outputs.tf b/modules/eks-managed-node-group/outputs.tf
index ab3b051646..9ab256e170 100644
--- a/modules/eks-managed-node-group/outputs.tf
+++ b/modules/eks-managed-node-group/outputs.tf
@@ -1,4 +1,85 @@
-# output "node_groups" {
-# description = "Outputs from EKS node groups. Map of maps, keyed by `var.node_groups` keys. See `aws_eks_node_group` Terraform documentation for values"
-# value = aws_eks_node_group.this
-# }
+################################################################################
+# User Data
+################################################################################
+
+output "user_data" {
+ description = "Rendered user data output that is supplied to the launch template when enabled/used"
+ value = try(data.cloudinit_config.eks_optimized_ami_user_data[0].rendered, "")
+ sensitive = true
+}
+
+################################################################################
+# Launch template
+################################################################################
+
+output "launch_template_id" {
+ description = "The ID of the launch template"
+ value = try(aws_launch_template.this[0].id, "")
+}
+
+output "launch_template_arn" {
+ description = "The ARN of the launch template"
+ value = try(aws_launch_template.this[0].arn, "")
+}
+
+output "launch_template_latest_version" {
+ description = "The latest version of the launch template"
+ value = try(aws_launch_template.this[0].latest_version, "")
+}
+
+################################################################################
+# Node Group
+################################################################################
+
+output "node_group_arn" {
+ description = "Amazon Resource Name (ARN) of the EKS Node Group"
+ value = try(aws_eks_node_group.this[0].arn, "")
+}
+
+output "node_group_id" {
+ description = "EKS Cluster name and EKS Node Group name separated by a colon (`:`)"
+ value = try(aws_eks_node_group.this[0].id, "")
+}
+
+output "node_group_resources" {
+ description = "List of objects containing information about underlying resources"
+ value = try(aws_eks_node_group.this[0].resources, "")
+}
+
+output "node_group_status" {
+ description = "Status of the EKS Node Group"
+ value = try(aws_eks_node_group.this[0].arn, "")
+}
+
+################################################################################
+# Security Group
+################################################################################
+
+output "security_group_arn" {
+ description = "Amazon Resource Name (ARN) of the security group"
+ value = try(aws_security_group.this[0].arn, "")
+}
+
+output "security_group_id" {
+ description = "ID of the security group"
+ value = try(aws_security_group.this[0].id, "")
+}
+
+################################################################################
+# IAM Role
+################################################################################
+
+output "iam_role_name" {
+ description = "The name of the IAM role"
+ value = try(aws_iam_role.this[0].name, "")
+}
+
+output "iam_role_arn" {
+ description = "The Amazon Resource Name (ARN) specifying the IAM role"
+ value = try(aws_iam_role.this[0].arn, "")
+}
+
+output "iam_role_unique_id" {
+ description = "Stable and unique string identifying the IAM role"
+ value = try(aws_iam_role.this[0].unique_id, "")
+}
diff --git a/modules/self-managed-node-group/README.md b/modules/self-managed-node-group/README.md
index a7cb62d157..c637b1b898 100644
--- a/modules/self-managed-node-group/README.md
+++ b/modules/self-managed-node-group/README.md
@@ -136,21 +136,27 @@ No modules.
| Name | Description |
|------|-------------|
-| [autoscaling\_group\_arn](#output\_autoscaling\_group\_arn) | The ARN for this AutoScaling Group |
-| [autoscaling\_group\_availability\_zones](#output\_autoscaling\_group\_availability\_zones) | The availability zones of the autoscale group |
-| [autoscaling\_group\_default\_cooldown](#output\_autoscaling\_group\_default\_cooldown) | Time between a scaling activity and the succeeding scaling activity |
-| [autoscaling\_group\_desired\_capacity](#output\_autoscaling\_group\_desired\_capacity) | The number of Amazon EC2 instances that should be running in the group |
-| [autoscaling\_group\_health\_check\_grace\_period](#output\_autoscaling\_group\_health\_check\_grace\_period) | Time after instance comes into service before checking health |
-| [autoscaling\_group\_health\_check\_type](#output\_autoscaling\_group\_health\_check\_type) | EC2 or ELB. Controls how health checking is done |
-| [autoscaling\_group\_id](#output\_autoscaling\_group\_id) | The autoscaling group id |
-| [autoscaling\_group\_load\_balancers](#output\_autoscaling\_group\_load\_balancers) | The load balancer names associated with the autoscaling group |
-| [autoscaling\_group\_max\_size](#output\_autoscaling\_group\_max\_size) | The maximum size of the autoscale group |
-| [autoscaling\_group\_min\_size](#output\_autoscaling\_group\_min\_size) | The minimum size of the autoscale group |
-| [autoscaling\_group\_name](#output\_autoscaling\_group\_name) | The autoscaling group name |
-| [autoscaling\_group\_target\_group\_arns](#output\_autoscaling\_group\_target\_group\_arns) | List of Target Group ARNs that apply to this AutoScaling Group |
-| [autoscaling\_group\_vpc\_zone\_identifier](#output\_autoscaling\_group\_vpc\_zone\_identifier) | The VPC zone identifier |
-| [autoscaling\_schedule\_arns](#output\_autoscaling\_schedule\_arns) | ARNs of autoscaling group schedules |
+| [iam\_instance\_profile\_arn](#output\_iam\_instance\_profile\_arn) | ARN assigned by AWS to the instance profile |
+| [iam\_instance\_profile\_id](#output\_iam\_instance\_profile\_id) | Instance profile's ID |
+| [iam\_instance\_profile\_unique](#output\_iam\_instance\_profile\_unique) | Stable and unique string identifying the IAM instance profile |
+| [iam\_role\_arn](#output\_iam\_role\_arn) | The Amazon Resource Name (ARN) specifying the IAM role |
+| [iam\_role\_name](#output\_iam\_role\_name) | The name of the IAM role |
+| [iam\_role\_unique\_id](#output\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role |
| [launch\_template\_arn](#output\_launch\_template\_arn) | The ARN of the launch template |
| [launch\_template\_id](#output\_launch\_template\_id) | The ID of the launch template |
| [launch\_template\_latest\_version](#output\_launch\_template\_latest\_version) | The latest version of the launch template |
+| [node\_group\_arn](#output\_node\_group\_arn) | The ARN for this node group |
+| [node\_group\_availability\_zones](#output\_node\_group\_availability\_zones) | The availability zones of the node group |
+| [node\_group\_default\_cooldown](#output\_node\_group\_default\_cooldown) | Time between a scaling activity and the succeeding scaling activity |
+| [node\_group\_desired\_capacity](#output\_node\_group\_desired\_capacity) | The number of Amazon EC2 instances that should be running in the group |
+| [node\_group\_health\_check\_grace\_period](#output\_node\_group\_health\_check\_grace\_period) | Time after instance comes into service before checking health |
+| [node\_group\_health\_check\_type](#output\_node\_group\_health\_check\_type) | EC2 or ELB. Controls how health checking is done |
+| [node\_group\_id](#output\_node\_group\_id) | The node group id |
+| [node\_group\_max\_size](#output\_node\_group\_max\_size) | The maximum size of the node group |
+| [node\_group\_min\_size](#output\_node\_group\_min\_size) | The minimum size of the node group |
+| [node\_group\_name](#output\_node\_group\_name) | The node group name |
+| [node\_group\_vpc\_zone\_identifier](#output\_node\_group\_vpc\_zone\_identifier) | The VPC zone identifier |
+| [node\_schedule\_arns](#output\_node\_schedule\_arns) | ARNs of autoscaling group schedules |
+| [security\_group\_arn](#output\_security\_group\_arn) | Amazon Resource Name (ARN) of the security group |
+| [security\_group\_id](#output\_security\_group\_id) | ID of the security group |
diff --git a/modules/self-managed-node-group/main.tf b/modules/self-managed-node-group/main.tf
index 3d1c4a22db..817ea7b623 100644
--- a/modules/self-managed-node-group/main.tf
+++ b/modules/self-managed-node-group/main.tf
@@ -24,7 +24,7 @@ resource "aws_launch_template" "this" {
description = coalesce(var.description, "Custom launch template for ${var.name} self managed node group")
ebs_optimized = var.ebs_optimized
- image_id = var.image_id
+ image_id = coalesce(var.image_id, data.aws_ami.eks_default.image_id)
instance_type = var.instance_type
key_name = var.key_name
user_data = var.user_data
@@ -211,7 +211,7 @@ resource "aws_launch_template" "this" {
}
################################################################################
-# Self Managed Node Group (Autoscaling Group)
+# Node Group
################################################################################
locals {
diff --git a/modules/self-managed-node-group/outputs.tf b/modules/self-managed-node-group/outputs.tf
index 876a114034..7196dc3ebf 100644
--- a/modules/self-managed-node-group/outputs.tf
+++ b/modules/self-managed-node-group/outputs.tf
@@ -18,79 +18,122 @@ output "launch_template_latest_version" {
}
################################################################################
-# Autoscaling group
+# Node Group
################################################################################
-output "autoscaling_group_id" {
- description = "The autoscaling group id"
- value = try(aws_autoscaling_group.this[0].id, "")
+output "node_group_arn" {
+ description = "The ARN for this node group"
+ value = try(aws_autoscaling_group.this[0].arn, "")
}
-output "autoscaling_group_name" {
- description = "The autoscaling group name"
- value = try(aws_autoscaling_group.this[0].name, "")
+output "node_group_id" {
+ description = "The node group id"
+ value = try(aws_node_group.this[0].id, "")
}
-output "autoscaling_group_arn" {
- description = "The ARN for this AutoScaling Group"
- value = try(aws_autoscaling_group.this[0].arn, "")
+output "node_group_name" {
+ description = "The node group name"
+ value = try(aws_autoscaling_group.this[0].name, "")
}
-output "autoscaling_group_min_size" {
- description = "The minimum size of the autoscale group"
+output "node_group_min_size" {
+ description = "The minimum size of the node group"
value = try(aws_autoscaling_group.this[0].min_size, "")
}
-output "autoscaling_group_max_size" {
- description = "The maximum size of the autoscale group"
+output "node_group_max_size" {
+ description = "The maximum size of the node group"
value = try(aws_autoscaling_group.this[0].max_size, "")
}
-output "autoscaling_group_desired_capacity" {
+output "node_group_desired_capacity" {
description = "The number of Amazon EC2 instances that should be running in the group"
value = try(aws_autoscaling_group.this[0].desired_capacity, "")
}
-output "autoscaling_group_default_cooldown" {
+output "node_group_default_cooldown" {
description = "Time between a scaling activity and the succeeding scaling activity"
value = try(aws_autoscaling_group.this[0].default_cooldown, "")
}
-output "autoscaling_group_health_check_grace_period" {
+output "node_group_health_check_grace_period" {
description = "Time after instance comes into service before checking health"
value = try(aws_autoscaling_group.this[0].health_check_grace_period, "")
}
-output "autoscaling_group_health_check_type" {
+output "node_group_health_check_type" {
description = "EC2 or ELB. Controls how health checking is done"
value = try(aws_autoscaling_group.this[0].health_check_type, "")
}
-output "autoscaling_group_availability_zones" {
- description = "The availability zones of the autoscale group"
+output "node_group_availability_zones" {
+ description = "The availability zones of the node group"
value = try(aws_autoscaling_group.this[0].availability_zones, "")
}
-output "autoscaling_group_vpc_zone_identifier" {
+output "node_group_vpc_zone_identifier" {
description = "The VPC zone identifier"
value = try(aws_autoscaling_group.this[0].vpc_zone_identifier, "")
}
-output "autoscaling_group_load_balancers" {
- description = "The load balancer names associated with the autoscaling group"
- value = try(aws_autoscaling_group.this[0].load_balancers, "")
+################################################################################
+# Node group schedule
+################################################################################
+
+output "node_schedule_arns" {
+ description = "ARNs of autoscaling group schedules"
+ value = { for k, v in aws_autoscaling_schedule.this : k => v.arn }
+}
+
+
+################################################################################
+# Security Group
+################################################################################
+
+output "security_group_arn" {
+ description = "Amazon Resource Name (ARN) of the security group"
+ value = try(aws_security_group.this[0].arn, "")
+}
+
+output "security_group_id" {
+ description = "ID of the security group"
+ value = try(aws_security_group.this[0].id, "")
+}
+
+################################################################################
+# IAM Role
+################################################################################
+
+output "iam_role_name" {
+ description = "The name of the IAM role"
+ value = try(aws_iam_role.this[0].name, "")
}
-output "autoscaling_group_target_group_arns" {
- description = "List of Target Group ARNs that apply to this AutoScaling Group"
- value = try(aws_autoscaling_group.this[0].target_group_arns, "")
+output "iam_role_arn" {
+ description = "The Amazon Resource Name (ARN) specifying the IAM role"
+ value = try(aws_iam_role.this[0].arn, "")
+}
+
+output "iam_role_unique_id" {
+ description = "Stable and unique string identifying the IAM role"
+ value = try(aws_iam_role.this[0].unique_id, "")
}
################################################################################
-# Autoscaling group schedule
+# IAM Instance Profile
################################################################################
-output "autoscaling_schedule_arns" {
- description = "ARNs of autoscaling group schedules"
- value = { for k, v in aws_autoscaling_schedule.this : k => v.arn }
+output "iam_instance_profile_arn" {
+ description = "ARN assigned by AWS to the instance profile"
+ value = try(aws_iam_instance_profile.this[0].arn, "")
+}
+
+output "iam_instance_profile_id" {
+ description = "Instance profile's ID"
+ value = try(aws_iam_instance_profile.this[0].id, "")
+}
+
+output "iam_instance_profile_unique" {
+ description = "Stable and unique string identifying the IAM instance profile"
+ value = try(aws_iam_instance_profile.this[0].unique_id, "")
}
diff --git a/outputs.tf b/outputs.tf
index cbc78528d8..b19ff74c8d 100644
--- a/outputs.tf
+++ b/outputs.tf
@@ -1,52 +1,87 @@
-output "cluster_id" {
- description = "The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready."
- value = try(aws_eks_cluster.this[0].id, "")
-}
+################################################################################
+# Cluster
+################################################################################
output "cluster_arn" {
- description = "The Amazon Resource Name (ARN) of the cluster."
+ description = "The Amazon Resource Name (ARN) of the cluster"
value = try(aws_eks_cluster.this[0].arn, "")
}
output "cluster_certificate_authority_data" {
- description = "Nested attribute containing certificate-authority-data for your cluster. This is the base64 encoded certificate data required to communicate with your cluster."
+ description = "Base64 encoded certificate data required to communicate with the cluster"
value = try(aws_eks_cluster.this[0].certificate_authority[0].data, "")
}
output "cluster_endpoint" {
- description = "The endpoint for your EKS Kubernetes API."
+ description = "Endpoint for your Kubernetes API server"
value = try(aws_eks_cluster.this[0].endpoint, "")
}
-output "cluster_version" {
- description = "The Kubernetes server version for the EKS cluster."
- value = try(aws_eks_cluster.this[0].version, "")
+output "cluster_id" {
+ description = "The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready"
+ value = try(aws_eks_cluster.this[0].id, "")
+}
+
+output "cluster_oidc_issuer_url" {
+ description = "The URL on the EKS cluster for the OpenID Connect identity provider"
+ value = try(aws_eks_cluster.this[0].identity[0].oidc[0].issuer, "")
+}
+
+output "cluster_platform_version" {
+ description = "Platform version for the cluster"
+ value = try(aws_eks_cluster.this[0].platform_version, "")
+}
+
+output "cluster_status" {
+ description = "Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED`"
+ value = try(aws_eks_cluster.this[0].status, "")
}
output "cluster_security_group_id" {
- description = "Security group ID attached to the EKS cluster. On 1.14 or later, this is the 'Additional security groups' in the EKS console."
- value = local.cluster_security_group_id
+ description = "Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console"
+ value = try(aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id, "")
+}
+
+################################################################################
+# Security Group
+################################################################################
+
+output "cluster_security_group_arn" {
+ description = "Amazon Resource Name (ARN) of the cluster security group"
+ value = try(aws_security_group.this[0].arn, "")
+}
+
+################################################################################
+# IRSA
+################################################################################
+
+output "oidc_provider_arn" {
+ description = "The ARN of the OIDC Provider if `enable_irsa = true`"
+ value = try(aws_iam_openid_connect_provider.oidc_provider[0].arn, "")
}
+################################################################################
+# IAM Role
+################################################################################
+
output "cluster_iam_role_name" {
- description = "IAM role name of the EKS cluster."
+ description = "IAM role name of the EKS cluster"
value = try(aws_iam_role.cluster[0].name, "")
}
output "cluster_iam_role_arn" {
- description = "IAM role ARN of the EKS cluster."
+ description = "IAM role ARN of the EKS cluster"
value = try(aws_iam_role.cluster[0].arn, "")
}
-output "cluster_oidc_issuer_url" {
- description = "The URL on the EKS cluster OIDC Issuer"
- value = try(aws_eks_cluster.this[0].identity[0].oidc[0].issuer, "")
+output "cluster_iam_role_unique_id" {
+ description = "Stable and unique string identifying the IAM role"
+ value = try(aws_iam_role.cluster[0].unique_id, "")
}
-output "cluster_primary_security_group_id" {
- description = "The cluster primary security group ID created by the EKS cluster on 1.14 or later. Referred to as 'Cluster security group' in the EKS console."
- value = try(aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id, "")
-}
+################################################################################
+# CloudWatch Log Group
+################################################################################
output "cloudwatch_log_group_name" {
description = "Name of cloudwatch log group created"
@@ -58,18 +93,17 @@ output "cloudwatch_log_group_arn" {
value = try(aws_cloudwatch_log_group.this[0].arn, "")
}
-output "oidc_provider_arn" {
- description = "The ARN of the OIDC Provider if `enable_irsa = true`."
- value = try(aws_iam_openid_connect_provider.oidc_provider[0].arn, "")
-}
+################################################################################
+# Fargate Profile
+################################################################################
output "fargate_profile_ids" {
- description = "EKS Cluster name and EKS Fargate Profile names separated by a colon (:)."
+ description = "EKS Cluster name and EKS Fargate Profile names separated by a colon (:)"
value = module.fargate.fargate_profile_ids
}
output "fargate_profile_arns" {
- description = "Amazon Resource Name (ARN) of the EKS Fargate Profiles."
+ description = "Amazon Resource Name (ARN) of the EKS Fargate Profiles"
value = module.fargate.fargate_profile_arns
}
@@ -83,17 +117,20 @@ output "fargate_iam_role_arn" {
value = module.fargate.iam_role_arn
}
-# output "security_group_rule_cluster_https_worker_ingress" {
-# description = "Security group rule responsible for allowing pods to communicate with the EKS cluster API."
-# value = aws_security_group_rule.cluster_https_worker_ingress
-# }
+################################################################################
+# EKS Managed Node Group
+################################################################################
+
+output "eks_managed_node_groups" {
+ description = "Map of attribute maps for all EKS managed node groups created"
+ value = module.eks_managed_node_groups
+}
+
+################################################################################
+# Self Managed Node Group
+################################################################################
output "self_managed_node_groups" {
description = "Map of attribute maps for all self managed node groups created"
- value = module.self_managed_node_groups
+ value = module.self_managed_node_groups
}
-
-output "eks_managed_node_groups" {
- description = "Map of attribute maps for all EKS managed node groups created"
- value = module.eks_managed_node_groups
-}
\ No newline at end of file
From bcd73a377e59e7d9e26f5b485fc0b6b1b57ae72f Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Fri, 12 Nov 2021 16:40:07 -0500
Subject: [PATCH 23/83] chore: update fargate profile sub-module
---
README.md | 18 +--
examples/bottlerocket/README.md | 3 +-
examples/bottlerocket/main.tf | 25 ++--
examples/complete/README.md | 2 +-
examples/complete/main.tf | 9 +-
examples/fargate/README.md | 3 +-
examples/fargate/main.tf | 108 +++++++++---------
examples/fargate/outputs.tf | 8 +-
examples/instance_refresh/README.md | 1 +
examples/instance_refresh/main.tf | 18 +--
examples/irsa/README.md | 2 +-
examples/irsa/irsa.tf | 2 +-
modules/fargate-profile/README.md | 78 +++++++++++++
modules/fargate-profile/main.tf | 79 +++++++++++++
modules/fargate-profile/outputs.tf | 37 ++++++
modules/fargate-profile/variables.tf | 97 ++++++++++++++++
.../{fargate => fargate-profile}/versions.tf | 0
modules/fargate/README.md | 70 ------------
modules/fargate/main.tf | 56 ---------
modules/fargate/outputs.tf | 19 ---
modules/fargate/variables.tf | 53 ---------
modules/self-managed-node-group/README.md | 28 ++---
modules/self-managed-node-group/main.tf | 2 +-
modules/self-managed-node-group/outputs.tf | 43 ++++---
modules/self-managed-node-group/variables.tf | 12 +-
outputs.tf | 25 +---
variables.tf | 44 +------
workers.tf | 33 +++---
28 files changed, 458 insertions(+), 417 deletions(-)
create mode 100644 modules/fargate-profile/README.md
create mode 100644 modules/fargate-profile/main.tf
create mode 100644 modules/fargate-profile/outputs.tf
create mode 100644 modules/fargate-profile/variables.tf
rename modules/{fargate => fargate-profile}/versions.tf (100%)
delete mode 100644 modules/fargate/README.md
delete mode 100644 modules/fargate/main.tf
delete mode 100644 modules/fargate/outputs.tf
delete mode 100644 modules/fargate/variables.tf
diff --git a/README.md b/README.md
index 26d4c3621b..5c76f96e97 100644
--- a/README.md
+++ b/README.md
@@ -130,8 +130,8 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
| Name | Source | Version |
|------|--------|---------|
-| [eks\_managed\_node\_groups](#module\_eks\_managed\_node\_groups) | ./modules/eks-managed-node-group | n/a |
-| [fargate](#module\_fargate) | ./modules/fargate | n/a |
+| [eks\_managed\_node\_group](#module\_eks\_managed\_node\_group) | ./modules/eks-managed-node-group | n/a |
+| [fargate\_profile](#module\_fargate\_profile) | ./modules/fargate-profile | n/a |
| [self\_managed\_node\_group](#module\_self\_managed\_node\_group) | ./modules/self-managed-node-group | n/a |
## Resources
@@ -185,16 +185,9 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
| [create](#input\_create) | Controls if EKS resources should be created (it affects almost all resources) | `bool` | `true` | no |
| [create\_cluster\_iam\_role](#input\_create\_cluster\_iam\_role) | Determines whether a cluster IAM role is created or to use an existing IAM role | `bool` | `true` | no |
| [create\_cluster\_security\_group](#input\_create\_cluster\_security\_group) | Whether to create a security group for the cluster or attach the cluster to `cluster_security_group_id` | `bool` | `true` | no |
-| [create\_fargate](#input\_create\_fargate) | Determines whether Fargate resources are created | `bool` | `false` | no |
-| [create\_fargate\_pod\_execution\_role](#input\_create\_fargate\_pod\_execution\_role) | Controls if the EKS Fargate pod execution IAM role should be created | `bool` | `true` | no |
| [eks\_managed\_node\_groups](#input\_eks\_managed\_node\_groups) | Map of EKS managed node group definitions to create | `any` | `{}` | no |
| [enable\_irsa](#input\_enable\_irsa) | Whether to create OpenID Connect Provider for EKS to enable IRSA | `bool` | `false` | no |
-| [fargate\_iam\_role\_path](#input\_fargate\_iam\_role\_path) | Fargate IAM role path | `string` | `null` | no |
-| [fargate\_iam\_role\_permissions\_boundary](#input\_fargate\_iam\_role\_permissions\_boundary) | ARN of the policy that is used to set the permissions boundary for the Fargate role | `string` | `null` | no |
-| [fargate\_pod\_execution\_role\_arn](#input\_fargate\_pod\_execution\_role\_arn) | Existing Amazon Resource Name (ARN) of the IAM Role that provides permissions for the EKS Fargate Profile. Required if `create_fargate_pod_execution_role` is `false` | `string` | `null` | no |
-| [fargate\_profiles](#input\_fargate\_profiles) | Fargate profiles to create. See `fargate_profile` keys section in Fargate submodule's README.md for more details | `any` | `{}` | no |
-| [fargate\_subnet\_ids](#input\_fargate\_subnet\_ids) | A list of subnet IDs to place Fargate workers within (if different from `subnet_ids`) | `list(string)` | `[]` | no |
-| [fargate\_tags](#input\_fargate\_tags) | A map of additional tags to add to the Fargate resources created | `map(string)` | `{}` | no |
+| [fargate\_profiles](#input\_fargate\_profiles) | Map of Fargate Profile definitions to create | `any` | `{}` | no |
| [openid\_connect\_audiences](#input\_openid\_connect\_audiences) | List of OpenID Connect audience client IDs to add to the IRSA provider | `list(string)` | `[]` | no |
| [self\_managed\_node\_groups](#input\_self\_managed\_node\_groups) | Map of self-managed node group definitions to create | `any` | `{}` | no |
| [subnet\_ids](#input\_subnet\_ids) | A list of subnet IDs to place the EKS cluster and workers within | `list(string)` | `[]` | no |
@@ -220,10 +213,7 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf
| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console |
| [cluster\_status](#output\_cluster\_status) | Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED` |
| [eks\_managed\_node\_groups](#output\_eks\_managed\_node\_groups) | Map of attribute maps for all EKS managed node groups created |
-| [fargate\_iam\_role\_arn](#output\_fargate\_iam\_role\_arn) | IAM role ARN for EKS Fargate pods |
-| [fargate\_iam\_role\_name](#output\_fargate\_iam\_role\_name) | IAM role name for EKS Fargate pods |
-| [fargate\_profile\_arns](#output\_fargate\_profile\_arns) | Amazon Resource Name (ARN) of the EKS Fargate Profiles |
-| [fargate\_profile\_ids](#output\_fargate\_profile\_ids) | EKS Cluster name and EKS Fargate Profile names separated by a colon (:) |
+| [fargate\_profiles](#output\_fargate\_profiles) | Map of attribute maps for all EKS Fargate Profiles created |
| [oidc\_provider\_arn](#output\_oidc\_provider\_arn) | The ARN of the OIDC Provider if `enable_irsa = true` |
| [self\_managed\_node\_groups](#output\_self\_managed\_node\_groups) | Map of attribute maps for all self managed node groups created |
diff --git a/examples/bottlerocket/README.md b/examples/bottlerocket/README.md
index 2642df93ed..39845a0f46 100644
--- a/examples/bottlerocket/README.md
+++ b/examples/bottlerocket/README.md
@@ -32,6 +32,7 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Version |
|------|---------|
| [aws](#provider\_aws) | >= 3.56.0 |
+| [random](#provider\_random) | n/a |
| [tls](#provider\_tls) | >= 2.2.0 |
## Modules
@@ -45,8 +46,8 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Type |
|------|------|
-| [aws_iam_role_policy_attachment.ssm](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
| [aws_key_pair.nodes](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/key_pair) | resource |
+| [random_string.suffix](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/string) | resource |
| [tls_private_key.nodes](https://registry.terraform.io/providers/hashicorp/tls/latest/docs/resources/private_key) | resource |
| [aws_ami.bottlerocket_ami](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source |
| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source |
diff --git a/examples/bottlerocket/main.tf b/examples/bottlerocket/main.tf
index 8dcd569ace..85d5b0054d 100644
--- a/examples/bottlerocket/main.tf
+++ b/examples/bottlerocket/main.tf
@@ -24,14 +24,13 @@ module "eks" {
cluster_name = local.name
cluster_version = local.cluster_version
- vpc_id = module.vpc.vpc_id
- subnet_ids = [module.vpc.private_subnets[0], module.vpc.public_subnets[1]]
- fargate_subnet_ids = [module.vpc.private_subnets[2]]
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_subnets
cluster_endpoint_private_access = true
cluster_endpoint_public_access = true
- worker_groups = {
+ self_managed_node_groups = {
one = {
name = "bottlerocket-nodes"
ami_id = data.aws_ami.bottlerocket_ami.id
@@ -60,12 +59,13 @@ module "eks" {
tags = local.tags
}
-# SSM policy for bottlerocket control container access
-# https://github.com/bottlerocket-os/bottlerocket/blob/develop/QUICKSTART-EKS.md#enabling-ssm
-resource "aws_iam_role_policy_attachment" "ssm" {
- role = module.eks.worker_iam_role_name
- policy_arn = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"
-}
+# TODO
+# # SSM policy for bottlerocket control container access
+# # https://github.com/bottlerocket-os/bottlerocket/blob/develop/QUICKSTART-EKS.md#enabling-ssm
+# resource "aws_iam_role_policy_attachment" "ssm" {
+# role = module.eks.worker_iam_role_name
+# policy_arn = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"
+# }
################################################################################
# Supporting Resources
@@ -96,6 +96,11 @@ resource "aws_key_pair" "nodes" {
data "aws_availability_zones" "available" {}
+resource "random_string" "suffix" {
+ length = 8
+ special = false
+}
+
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
version = "~> 3.0"
diff --git a/examples/complete/README.md b/examples/complete/README.md
index 2c12b42244..003faa07dc 100644
--- a/examples/complete/README.md
+++ b/examples/complete/README.md
@@ -37,7 +37,7 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Source | Version |
|------|--------|---------|
| [disabled\_eks](#module\_disabled\_eks) | ../.. | n/a |
-| [disabled\_fargate](#module\_disabled\_fargate) | ../../modules/fargate | n/a |
+| [disabled\_fargate](#module\_disabled\_fargate) | ../../modules/fargate-profile | n/a |
| [eks](#module\_eks) | ../.. | n/a |
| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 |
diff --git a/examples/complete/main.tf b/examples/complete/main.tf
index c10a4e374e..f649eecc86 100644
--- a/examples/complete/main.tf
+++ b/examples/complete/main.tf
@@ -24,9 +24,8 @@ module "eks" {
cluster_name = local.name
cluster_version = local.cluster_version
- vpc_id = module.vpc.vpc_id
- subnet_ids = [module.vpc.private_subnets[0], module.vpc.public_subnets[1]]
- fargate_subnet_ids = [module.vpc.private_subnets[2]]
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_subnets
cluster_endpoint_private_access = true
cluster_endpoint_public_access = true
@@ -126,9 +125,9 @@ module "disabled_eks" {
}
module "disabled_fargate" {
- source = "../../modules/fargate"
+ source = "../../modules/fargate-profile"
- create_fargate_pod_execution_role = false
+ create = false
}
################################################################################
diff --git a/examples/fargate/README.md b/examples/fargate/README.md
index 80cf92d880..0404ea4d36 100644
--- a/examples/fargate/README.md
+++ b/examples/fargate/README.md
@@ -36,7 +36,7 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Source | Version |
|------|--------|---------|
| [eks](#module\_eks) | ../.. | n/a |
-| [fargate\_profile\_existing\_cluster](#module\_fargate\_profile\_existing\_cluster) | ../../modules/fargate | n/a |
+| [fargate\_profile\_existing\_cluster](#module\_fargate\_profile\_existing\_cluster) | ../../modules/fargate-profile | n/a |
| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 |
## Resources
@@ -55,5 +55,4 @@ No inputs.
|------|-------------|
| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for EKS control plane. |
| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Security group ids attached to the cluster control plane. |
-| [fargate\_profile\_arns](#output\_fargate\_profile\_arns) | Outputs from node groups |
diff --git a/examples/fargate/main.tf b/examples/fargate/main.tf
index 0ab24bebc6..22eae259a0 100644
--- a/examples/fargate/main.tf
+++ b/examples/fargate/main.tf
@@ -24,9 +24,8 @@ module "eks" {
cluster_name = local.name
cluster_version = local.cluster_version
- vpc_id = module.vpc.vpc_id
- subnet_ids = [module.vpc.private_subnets[0], module.vpc.public_subnets[1]]
- fargate_subnet_ids = [module.vpc.private_subnets[2]]
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_subnets
cluster_endpoint_private_access = true
cluster_endpoint_public_access = true
@@ -111,59 +110,60 @@ module "eks" {
##############################################
module "fargate_profile_existing_cluster" {
- source = "../../modules/fargate"
+ source = "../../modules/fargate-profile"
cluster_name = module.eks.cluster_id
- subnets_ids = [module.vpc.private_subnets[0], module.vpc.private_subnets[2]]
-
- fargate_profiles = {
- profile1 = {
- name = "profile1"
- selectors = [
- {
- namespace = "kube-system"
- labels = {
- k8s-app = "kube-dns"
- }
- },
- {
- namespace = "profile"
- labels = {
- WorkerType = "fargate"
- }
- }
- ]
-
- tags = {
- Owner = "profile1"
- submodule = "true"
- }
- }
-
- profile2 = {
- name = "profile2"
- selectors = [
- {
- namespace = "default"
- labels = {
- Fargate = "profile2"
- }
- }
- ]
-
- # Using specific subnets instead of the ones configured in EKS (`subnets` and `fargate_subnets`)
- subnet_ids = [module.vpc.private_subnets[0]]
-
- tags = {
- Owner = "profile2"
- submodule = "true"
- }
-
- timeouts = {
- delete = "20m"
- }
- }
- }
+ subnet_ids = [module.vpc.private_subnets[0], module.vpc.private_subnets[2]]
+
+ create = false # TODO
+ # fargate_profiles = {
+ # profile1 = {
+ # name = "profile1"
+ # selectors = [
+ # {
+ # namespace = "kube-system"
+ # labels = {
+ # k8s-app = "kube-dns"
+ # }
+ # },
+ # {
+ # namespace = "profile"
+ # labels = {
+ # WorkerType = "fargate"
+ # }
+ # }
+ # ]
+
+ # tags = {
+ # Owner = "profile1"
+ # submodule = "true"
+ # }
+ # }
+
+ # profile2 = {
+ # name = "profile2"
+ # selectors = [
+ # {
+ # namespace = "default"
+ # labels = {
+ # Fargate = "profile2"
+ # }
+ # }
+ # ]
+
+ # # Using specific subnets instead of the ones configured in EKS (`subnets` and `fargate_subnets`)
+ # subnet_ids = [module.vpc.private_subnets[0]]
+
+ # tags = {
+ # Owner = "profile2"
+ # submodule = "true"
+ # }
+
+ # timeouts = {
+ # delete = "20m"
+ # }
+ # }
+ # }
tags = local.tags
}
diff --git a/examples/fargate/outputs.tf b/examples/fargate/outputs.tf
index 4d4e2ecaff..1bc56942a5 100644
--- a/examples/fargate/outputs.tf
+++ b/examples/fargate/outputs.tf
@@ -8,7 +8,7 @@ output "cluster_security_group_id" {
value = module.eks.cluster_security_group_id
}
-output "fargate_profile_arns" {
- description = "Outputs from node groups"
- value = module.eks.fargate_profile_arns
-}
+# output "fargate_profile_arn" {
+# description = "Outputs from node groups"
+# value = module.eks.fargate_profile_arn
+# }
diff --git a/examples/instance_refresh/README.md b/examples/instance_refresh/README.md
index 4d47ee900c..131734e2f1 100644
--- a/examples/instance_refresh/README.md
+++ b/examples/instance_refresh/README.md
@@ -54,6 +54,7 @@ Note that this example may create resources which cost money. Run `terraform des
| [helm_release.aws_node_termination_handler](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource |
| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source |
| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source |
+| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
| [aws_iam_policy_document.aws_node_termination_handler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
| [aws_iam_policy_document.aws_node_termination_handler_events](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
diff --git a/examples/instance_refresh/main.tf b/examples/instance_refresh/main.tf
index 9abf6190c0..13b6cef46a 100644
--- a/examples/instance_refresh/main.tf
+++ b/examples/instance_refresh/main.tf
@@ -20,10 +20,14 @@ locals {
# Based on the official aws-node-termination-handler setup guide at https://github.com/aws/aws-node-termination-handler#infrastructure-setup
+data "aws_eks_cluster_auth" "cluster" {
+ name = module.eks.cluster_id
+}
+
provider "helm" {
kubernetes {
- host = data.aws_eks_cluster.cluster.endpoint
- cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data)
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.cluster.token
}
}
@@ -47,7 +51,7 @@ data "aws_iam_policy_document" "aws_node_termination_handler" {
actions = [
"autoscaling:CompleteLifecycleAction",
]
- resources = module.eks.workers_asg_arns
+ resources = [for k, v in module.eks.self_managed_node_groups : v.autoscaling_group_arn]
}
statement {
effect = "Allow"
@@ -104,7 +108,7 @@ resource "aws_cloudwatch_event_rule" "aws_node_termination_handler_asg" {
"detail-type" : [
"EC2 Instance-terminate Lifecycle Action"
]
- "resources" : module.eks.workers_asg_arns
+ "resources" : [for k, v in module.eks.self_managed_node_groups : v.autoscaling_group_arn]
}
)
}
@@ -126,7 +130,7 @@ resource "aws_cloudwatch_event_rule" "aws_node_termination_handler_spot" {
"detail-type" : [
"EC2 Spot Instance Interruption Warning"
]
- "resources" : module.eks.workers_asg_arns
+ "resources" : [for k, v in module.eks.self_managed_node_groups : v.autoscaling_group_arn]
}
)
}
@@ -195,9 +199,9 @@ resource "helm_release" "aws_node_termination_handler" {
# ensures that node termination does not require the lifecycle action to be completed,
# and thus allows the ASG to be destroyed cleanly.
resource "aws_autoscaling_lifecycle_hook" "aws_node_termination_handler" {
- count = length(module.eks.workers_asg_names)
+ for_each = module.eks.self_managed_node_groups
name = "aws-node-termination-handler"
- autoscaling_group_name = module.eks.workers_asg_names[count.index]
+ autoscaling_group_name = each.value.autoscaling_group_id
lifecycle_transition = "autoscaling:EC2_INSTANCE_TERMINATING"
heartbeat_timeout = 300
default_result = "CONTINUE"
diff --git a/examples/irsa/README.md b/examples/irsa/README.md
index 0299ef4ca6..d2a071fcd5 100644
--- a/examples/irsa/README.md
+++ b/examples/irsa/README.md
@@ -45,7 +45,7 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Type |
|------|------|
| [aws_iam_policy.cluster_autoscaler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
-| [helm_release.cluster-autoscaler](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource |
+| [helm_release.cluster_autoscaler](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource |
| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source |
| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source |
| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
diff --git a/examples/irsa/irsa.tf b/examples/irsa/irsa.tf
index 03ec6f28dc..9ea074d1f2 100644
--- a/examples/irsa/irsa.tf
+++ b/examples/irsa/irsa.tf
@@ -17,7 +17,7 @@ provider "helm" {
}
}
-resource "helm_release" "cluster-autoscaler" {
+resource "helm_release" "cluster_autoscaler" {
depends_on = [
module.eks
]
diff --git a/modules/fargate-profile/README.md b/modules/fargate-profile/README.md
new file mode 100644
index 0000000000..ee7a01de57
--- /dev/null
+++ b/modules/fargate-profile/README.md
@@ -0,0 +1,78 @@
+# EKS `fargate` submodule
+
+Helper submodule to create and manage resources related to `aws_eks_fargate_profile`.
+
+## `fargate_profile` keys
+
+`fargate_profile` is a map of maps. Key of first level will be used as unique value for `for_each` resources and in the `aws_eks_fargate_profile` name. Inner map can take the below values.
+
+## Example
+
+See example code in `examples/fargate`.
+
+| Name | Description | Type | Default | Required |
+|------|-------------|------|---------|:--------:|
+| name | Fargate profile name | `string` | Auto generated in the following format `[cluster_name]-fargate-[fargate_profile_map_key]`| no |
+| selectors | A list of Kubernetes selectors. See examples/fargate/main.tf for example format. |
| `[]` | no |
+| subnets | List of subnet IDs. Will replace the root module subnets. | `list(string)` | `var.subnets` | no |
+| timeouts | A map of timeouts for create/delete operations. | `map(string)` | Provider default behavior | no |
+| tags | Key-value map of resource tags. Will be merged with root module tags. | `map(string)` | `var.tags` | no |
+
+
+## Requirements
+
+| Name | Version |
+|------|---------|
+| [terraform](#requirement\_terraform) | >= 0.13.1 |
+| [aws](#requirement\_aws) | >= 3.56.0 |
+
+## Providers
+
+| Name | Version |
+|------|---------|
+| [aws](#provider\_aws) | >= 3.56.0 |
+
+## Modules
+
+No modules.
+
+## Resources
+
+| Name | Type |
+|------|------|
+| [aws_eks_fargate_profile.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_fargate_profile) | resource |
+| [aws_iam_role.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
+| [aws_iam_role_policy_attachment.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
+| [aws_iam_policy_document.assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
+| [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source |
+
+## Inputs
+
+| Name | Description | Type | Default | Required |
+|------|-------------|------|---------|:--------:|
+| [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster | `string` | `""` | no |
+| [create](#input\_create) | Controls if Fargate resources should be created (it affects all resources) | `bool` | `true` | no |
+| [create\_iam\_role](#input\_create\_iam\_role) | Controls if the the IAM Role that provides permissions for the EKS Fargate Profile will be created | `bool` | `true` | no |
+| [fargate\_profile\_name](#input\_fargate\_profile\_name) | Name of the EKS Fargate Profile | `string` | `null` | no |
+| [iam\_role\_additional\_policies](#input\_iam\_role\_additional\_policies) | Additional policies to be added to the IAM role | `list(string)` | `[]` | no |
+| [iam\_role\_arn](#input\_iam\_role\_arn) | Amazon Resource Name (ARN) of an existing IAM role that provides permissions for the Fargate pod executions | `string` | `null` | no |
+| [iam\_role\_name](#input\_iam\_role\_name) | Name to use on IAM role created | `string` | `null` | no |
+| [iam\_role\_path](#input\_iam\_role\_path) | IAM role path | `string` | `null` | no |
+| [iam\_role\_permissions\_boundary](#input\_iam\_role\_permissions\_boundary) | ARN of the policy that is used to set the permissions boundary for the IAM role | `string` | `null` | no |
+| [iam\_role\_tags](#input\_iam\_role\_tags) | A map of additional tags to add to the IAM role created | `map(string)` | `{}` | no |
+| [iam\_role\_use\_name\_prefix](#input\_iam\_role\_use\_name\_prefix) | Determines whether the IAM role name (`iam_role_name`) is used as a prefix | `string` | `true` | no |
+| [selectors](#input\_selectors) | Configuration block(s) for selecting Kubernetes Pods to execute with this Fargate Profile | `map(string)` | `{}` | no |
+| [subnet\_ids](#input\_subnet\_ids) | A list of subnet IDs for the EKS Fargate Profile | `list(string)` | `[]` | no |
+| [timeouts](#input\_timeouts) | Create, update, and delete timeout configurations for the Fargate Profile | `map(string)` | `{}` | no |
+
+## Outputs
+
+| Name | Description |
+|------|-------------|
+| [fargate\_profile\_arn](#output\_fargate\_profile\_arn) | Amazon Resource Name (ARN) of the EKS Fargate Profile |
+| [fargate\_profile\_id](#output\_fargate\_profile\_id) | EKS Cluster name and EKS Fargate Profile name separated by a colon (`:`) |
+| [fargate\_profile\_status](#output\_fargate\_profile\_status) | Status of the EKS Fargate Profile |
+| [iam\_role\_arn](#output\_iam\_role\_arn) | The Amazon Resource Name (ARN) specifying the IAM role |
+| [iam\_role\_name](#output\_iam\_role\_name) | The name of the IAM role |
+| [iam\_role\_unique\_id](#output\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role |
+
diff --git a/modules/fargate-profile/main.tf b/modules/fargate-profile/main.tf
new file mode 100644
index 0000000000..3750c44952
--- /dev/null
+++ b/modules/fargate-profile/main.tf
@@ -0,0 +1,79 @@
+data "aws_partition" "current" {}
+
+locals {
+ iam_role_name = coalesce(var.iam_role_name, var.fargate_profile_name)
+ policy_arn_prefix = "arn:${data.aws_partition.current.partition}:iam::aws:policy"
+}
+
+################################################################################
+# IAM Role
+################################################################################
+
+data "aws_iam_policy_document" "assume_role_policy" {
+ count = var.create && var.create_iam_role ? 1 : 0
+
+ statement {
+ effect = "Allow"
+ actions = ["sts:AssumeRole"]
+
+ principals {
+ type = "Service"
+ identifiers = ["eks-fargate-pods.amazonaws.com"]
+ }
+ }
+}
+
+resource "aws_iam_role" "this" {
+ count = var.create && var.create_iam_role ? 1 : 0
+
+ name = var.iam_role_use_name_prefix ? null : local.iam_role_name
+ name_prefix = var.iam_role_use_name_prefix ? "${local.iam_role_name}-" : null
+ path = var.iam_role_path
+
+ assume_role_policy = data.aws_iam_policy_document.assume_role_policy[0].json
+ permissions_boundary = var.iam_role_permissions_boundary
+ force_detach_policies = true
+
+ tags = var.tags
+}
+
+resource "aws_iam_role_policy_attachment" "this" {
+ for_each = var.create && var.create_iam_role ? toset(compact(distinct(concat([
+ "${local.policy_arn_prefix}/AmazonEKSFargatePodExecutionRolePolicy",
+ ], var.iam_role_additional_policies)))) : toset([])
+
+ policy_arn = each.value
+ role = aws_iam_role.this[0].name
+}
+
+################################################################################
+# Fargate Profile
+################################################################################
+
+resource "aws_eks_fargate_profile" "this" {
+ for_each = var.create ? 1 : 0
+
+ cluster_name = var.cluster_name
+ fargate_profile_name = var.fargate_profile_name
+ pod_execution_role_arn = var.create_iam_role ? aws_iam_role.this[0].arn : var.iam_role_arn
+ subnet_ids = var.subnet_ids
+
+ dynamic "selector" {
+ for_each = var.selectors
+
+ content {
+ namespace = selector.value.namespace
+ labels = lookup(selector.value, "labels", {})
+ }
+ }
+
+ dynamic "timeouts" {
+ for_each = [var.timeouts]
+ content {
+ create = lookup(var.timeouts, "create", null)
+ delete = lookup(var.timeouts, "delete", null)
+ }
+ }
+
+ tags = var.tags
+}
diff --git a/modules/fargate-profile/outputs.tf b/modules/fargate-profile/outputs.tf
new file mode 100644
index 0000000000..bb6e023248
--- /dev/null
+++ b/modules/fargate-profile/outputs.tf
@@ -0,0 +1,37 @@
+################################################################################
+# IAM Role
+################################################################################
+
+output "iam_role_name" {
+ description = "The name of the IAM role"
+ value = try(aws_iam_role.this[0].name, "")
+}
+
+output "iam_role_arn" {
+ description = "The Amazon Resource Name (ARN) specifying the IAM role"
+ value = try(aws_iam_role.this[0].arn, "")
+}
+
+output "iam_role_unique_id" {
+ description = "Stable and unique string identifying the IAM role"
+ value = try(aws_iam_role.this[0].unique_id, "")
+}
+
+################################################################################
+# Fargate Profile
+################################################################################
+
+output "fargate_profile_arn" {
+ description = "Amazon Resource Name (ARN) of the EKS Fargate Profile"
+ value = try(aws_eks_fargate_profile.this[0].arn, "")
+}
+
+output "fargate_profile_id" {
+ description = "EKS Cluster name and EKS Fargate Profile name separated by a colon (`:`)"
+ value = try(aws_eks_fargate_profile.this[0].id, "")
+}
+
+output "fargate_profile_status" {
+ description = "Status of the EKS Fargate Profile"
+ value = try(aws_eks_fargate_profile.this[0].status, "")
+}
diff --git a/modules/fargate-profile/variables.tf b/modules/fargate-profile/variables.tf
new file mode 100644
index 0000000000..13557e0ca9
--- /dev/null
+++ b/modules/fargate-profile/variables.tf
@@ -0,0 +1,97 @@
+variable "create" {
+ description = "Controls if Fargate resources should be created (it affects all resources)"
+ type = bool
+ default = true
+}
+
+variable "tags" {
+ description = "A map of tags to add to all resources"
+ type = map(string)
+ default = {}
+}
+
+################################################################################
+# IAM Role
+################################################################################
+
+variable "create_iam_role" {
+ description = "Controls if the the IAM Role that provides permissions for the EKS Fargate Profile will be created"
+ type = bool
+ default = true
+}
+
+variable "iam_role_arn" {
+ description = "Amazon Resource Name (ARN) of an existing IAM role that provides permissions for the Fargate pod executions"
+ type = string
+ default = null
+}
+
+variable "iam_role_name" {
+ description = "Name to use on IAM role created"
+ type = string
+ default = null
+}
+
+variable "iam_role_use_name_prefix" {
+ description = "Determines whether the IAM role name (`iam_role_name`) is used as a prefix"
+ type = string
+ default = true
+}
+
+variable "iam_role_path" {
+ description = "IAM role path"
+ type = string
+ default = null
+}
+
+variable "iam_role_permissions_boundary" {
+ description = "ARN of the policy that is used to set the permissions boundary for the IAM role"
+ type = string
+ default = null
+}
+
+variable "iam_role_additional_policies" {
+ description = "Additional policies to be added to the IAM role"
+ type = list(string)
+ default = []
+}
+
+variable "iam_role_tags" {
+ description = "A map of additional tags to add to the IAM role created"
+ type = map(string)
+ default = {}
+}
+
+################################################################################
+# Fargate Profile
+################################################################################
+
+variable "cluster_name" {
+ description = "Name of the EKS cluster"
+ type = string
+ default = null
+}
+
+variable "fargate_profile_name" {
+ description = "Name of the EKS Fargate Profile"
+ type = string
+ default = null
+}
+
+variable "subnet_ids" {
+ description = "A list of subnet IDs for the EKS Fargate Profile"
+ type = list(string)
+ default = []
+}
+
+variable "selectors" {
+ description = "Configuration block(s) for selecting Kubernetes Pods to execute with this Fargate Profile"
+ type = map(string)
+ default = {}
+}
+
+variable "timeouts" {
+ description = "Create and delete timeout configurations for the Fargate Profile"
+ type = map(string)
+ default = {}
+}
diff --git a/modules/fargate/versions.tf b/modules/fargate-profile/versions.tf
similarity index 100%
rename from modules/fargate/versions.tf
rename to modules/fargate-profile/versions.tf
diff --git a/modules/fargate/README.md b/modules/fargate/README.md
deleted file mode 100644
index 27f3c258b8..0000000000
--- a/modules/fargate/README.md
+++ /dev/null
@@ -1,70 +0,0 @@
-# EKS `fargate` submodule
-
-Helper submodule to create and manage resources related to `aws_eks_fargate_profile`.
-
-## `fargate_profile` keys
-
-`fargate_profile` is a map of maps. Key of first level will be used as unique value for `for_each` resources and in the `aws_eks_fargate_profile` name. Inner map can take the below values.
-
-## Example
-
-See example code in `examples/fargate`.
-
-| Name | Description | Type | Default | Required |
-|------|-------------|------|---------|:--------:|
-| name | Fargate profile name | `string` | Auto generated in the following format `[cluster_name]-fargate-[fargate_profile_map_key]`| no |
-| selectors | A list of Kubernetes selectors. See examples/fargate/main.tf for example format. |
| `[]` | no |
-| subnets | List of subnet IDs. Will replace the root module subnets. | `list(string)` | `var.subnets` | no |
-| timeouts | A map of timeouts for create/delete operations. | `map(string)` | Provider default behavior | no |
-| tags | Key-value map of resource tags. Will be merged with root module tags. | `map(string)` | `var.tags` | no |
-
-
-## Requirements
-
-| Name | Version |
-|------|---------|
-| [terraform](#requirement\_terraform) | >= 0.13.1 |
-| [aws](#requirement\_aws) | >= 3.56.0 |
-
-## Providers
-
-| Name | Version |
-|------|---------|
-| [aws](#provider\_aws) | >= 3.56.0 |
-
-## Modules
-
-No modules.
-
-## Resources
-
-| Name | Type |
-|------|------|
-| [aws_eks_fargate_profile.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_fargate_profile) | resource |
-| [aws_iam_role.eks_fargate_pod](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
-| [aws_iam_policy_document.eks_fargate_pod_assume_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
-| [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source |
-
-## Inputs
-
-| Name | Description | Type | Default | Required |
-|------|-------------|------|---------|:--------:|
-| [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster | `string` | `""` | no |
-| [create](#input\_create) | Controls if Fargate resources should be created (it affects all resources) | `bool` | `true` | no |
-| [create\_fargate\_pod\_execution\_role](#input\_create\_fargate\_pod\_execution\_role) | Controls if the the IAM Role that provides permissions for the EKS Fargate Profile should be created | `bool` | `true` | no |
-| [fargate\_pod\_execution\_role\_arn](#input\_fargate\_pod\_execution\_role\_arn) | Existing Amazon Resource Name (ARN) of the IAM Role that provides permissions for the EKS Fargate Profile. Required if `create_fargate_pod_execution_role` is `false` | `string` | `null` | no |
-| [fargate\_profiles](#input\_fargate\_profiles) | Fargate profiles to create. See `fargate_profile` keys section in README.md for more details | `any` | `{}` | no |
-| [iam\_path](#input\_iam\_path) | Path to the role | `string` | `null` | no |
-| [permissions\_boundary](#input\_permissions\_boundary) | ARN of the policy that is used to set the permissions boundary for the role | `string` | `null` | no |
-| [subnet\_ids](#input\_subnet\_ids) | A list of subnet IDs for the EKS Fargate profiles | `list(string)` | `[]` | no |
-| [tags](#input\_tags) | A map of tags to add to all resources | `map(string)` | `{}` | no |
-
-## Outputs
-
-| Name | Description |
-|------|-------------|
-| [fargate\_profile\_arns](#output\_fargate\_profile\_arns) | Amazon Resource Name (ARN) of the EKS Fargate Profiles. |
-| [fargate\_profile\_ids](#output\_fargate\_profile\_ids) | EKS Cluster name and EKS Fargate Profile names separated by a colon (:). |
-| [iam\_role\_arn](#output\_iam\_role\_arn) | ARN of IAM role of the EKS Fargate pods |
-| [iam\_role\_name](#output\_iam\_role\_name) | Name of IAM role created for EKS Fargate pods |
-
diff --git a/modules/fargate/main.tf b/modules/fargate/main.tf
deleted file mode 100644
index 565a0d93e8..0000000000
--- a/modules/fargate/main.tf
+++ /dev/null
@@ -1,56 +0,0 @@
-data "aws_partition" "current" {}
-
-data "aws_iam_policy_document" "eks_fargate_pod_assume_role" {
- count = var.create && var.create_fargate_pod_execution_role ? 1 : 0
-
- statement {
- effect = "Allow"
- actions = ["sts:AssumeRole"]
-
- principals {
- type = "Service"
- identifiers = ["eks-fargate-pods.amazonaws.com"]
- }
- }
-}
-
-resource "aws_iam_role" "eks_fargate_pod" {
- count = var.create && var.create_fargate_pod_execution_role ? 1 : 0
-
- name_prefix = format("%s-fargate", substr(var.cluster_name, 0, 24))
- path = var.iam_path
-
- assume_role_policy = data.aws_iam_policy_document.eks_fargate_pod_assume_role[0].json
- permissions_boundary = var.permissions_boundary
- managed_policy_arns = [
- "arn:${data.aws_partition.current.partition}:iam::aws:policy/AmazonEKSFargatePodExecutionRolePolicy",
- ]
- force_detach_policies = true
-
- tags = var.tags
-}
-
-resource "aws_eks_fargate_profile" "this" {
- for_each = var.create ? var.fargate_profiles : {}
-
- cluster_name = var.cluster_name
- fargate_profile_name = lookup(each.value, "name", format("%s-fargate-%s", var.cluster_name, replace(each.key, "_", "-")))
- pod_execution_role_arn = var.create_fargate_pod_execution_role ? aws_iam_role.eks_fargate_pod[0].arn : var.fargate_pod_execution_role_arn
- subnet_ids = lookup(each.value, "subnet_ids", var.subnet_ids)
-
- dynamic "selector" {
- for_each = each.value.selectors
-
- content {
- namespace = selector.value["namespace"]
- labels = lookup(selector.value, "labels", {})
- }
- }
-
- timeouts {
- create = try(each.value["timeouts"].create, null)
- delete = try(each.value["timeouts"].delete, null)
- }
-
- tags = merge(var.tags, lookup(each.value, "tags", {}))
-}
diff --git a/modules/fargate/outputs.tf b/modules/fargate/outputs.tf
deleted file mode 100644
index 7ecfeebf47..0000000000
--- a/modules/fargate/outputs.tf
+++ /dev/null
@@ -1,19 +0,0 @@
-output "fargate_profile_ids" {
- description = "EKS Cluster name and EKS Fargate Profile names separated by a colon (:)."
- value = [for f in aws_eks_fargate_profile.this : f.id]
-}
-
-output "fargate_profile_arns" {
- description = "Amazon Resource Name (ARN) of the EKS Fargate Profiles."
- value = [for f in aws_eks_fargate_profile.this : f.arn]
-}
-
-output "iam_role_name" {
- description = "Name of IAM role created for EKS Fargate pods"
- value = try(aws_iam_role.eks_fargate_pod[0].name, "")
-}
-
-output "iam_role_arn" {
- description = "ARN of IAM role of the EKS Fargate pods"
- value = try(aws_iam_role.eks_fargate_pod[0].arn, var.fargate_pod_execution_role_arn, "")
-}
diff --git a/modules/fargate/variables.tf b/modules/fargate/variables.tf
deleted file mode 100644
index 86fefbb39d..0000000000
--- a/modules/fargate/variables.tf
+++ /dev/null
@@ -1,53 +0,0 @@
-variable "create" {
- description = "Controls if Fargate resources should be created (it affects all resources)"
- type = bool
- default = true
-}
-
-variable "create_fargate_pod_execution_role" {
- description = "Controls if the the IAM Role that provides permissions for the EKS Fargate Profile should be created"
- type = bool
- default = true
-}
-
-variable "cluster_name" {
- description = "Name of the EKS cluster"
- type = string
- default = ""
-}
-
-variable "iam_path" {
- description = "Path to the role"
- type = string
- default = null
-}
-
-variable "permissions_boundary" {
- description = "ARN of the policy that is used to set the permissions boundary for the role"
- type = string
- default = null
-}
-
-variable "fargate_profiles" {
- description = "Fargate profiles to create. See `fargate_profile` keys section in README.md for more details"
- type = any
- default = {}
-}
-
-variable "fargate_pod_execution_role_arn" {
- description = "Existing Amazon Resource Name (ARN) of the IAM Role that provides permissions for the EKS Fargate Profile. Required if `create_fargate_pod_execution_role` is `false`"
- type = string
- default = null
-}
-
-variable "subnet_ids" {
- description = "A list of subnet IDs for the EKS Fargate profiles"
- type = list(string)
- default = []
-}
-
-variable "tags" {
- description = "A map of tags to add to all resources"
- type = map(string)
- default = {}
-}
diff --git a/modules/self-managed-node-group/README.md b/modules/self-managed-node-group/README.md
index c637b1b898..d811b30a3b 100644
--- a/modules/self-managed-node-group/README.md
+++ b/modules/self-managed-node-group/README.md
@@ -74,7 +74,7 @@ No modules.
| [health\_check\_grace\_period](#input\_health\_check\_grace\_period) | Time (in seconds) after instance comes into service before checking health | `number` | `null` | no |
| [health\_check\_type](#input\_health\_check\_type) | `EC2` or `ELB`. Controls how health checking is done | `string` | `null` | no |
| [hibernation\_options](#input\_hibernation\_options) | The hibernation options for the instance | `map(string)` | `null` | no |
-| [iam\_instance\_profile\_arn](#input\_iam\_instance\_profile\_arn) | The IAM Instance Profile ARN to launch the instance with | `string` | `null` | no |
+| [iam\_instance\_profile\_arn](#input\_iam\_instance\_profile\_arn) | Amazon Resource Name (ARN) of an existing IAM instance profile that provides permissions for the node group | `string` | `null` | no |
| [iam\_instance\_profile\_name](#input\_iam\_instance\_profile\_name) | The name attribute of the IAM instance profile to associate with launched instances | `string` | `null` | no |
| [iam\_role\_additional\_policies](#input\_iam\_role\_additional\_policies) | Additional policies to be added to the IAM role | `list(string)` | `[]` | no |
| [iam\_role\_attach\_cni\_policy](#input\_iam\_role\_attach\_cni\_policy) | Whether to attach the Amazon managed `AmazonEKS_CNI_Policy` IAM policy to the IAM IAM role. WARNING: If set `false` the permissions must be assigned to the `aws-node` DaemonSet pods via another method or nodes will not be able to join the cluster | `bool` | `true` | no |
@@ -82,7 +82,7 @@ No modules.
| [iam\_role\_path](#input\_iam\_role\_path) | IAM role path | `string` | `null` | no |
| [iam\_role\_permissions\_boundary](#input\_iam\_role\_permissions\_boundary) | ARN of the policy that is used to set the permissions boundary for the IAM role | `string` | `null` | no |
| [iam\_role\_tags](#input\_iam\_role\_tags) | A map of additional tags to add to the IAM role created | `map(string)` | `{}` | no |
-| [iam\_role\_use\_name\_prefix](#input\_iam\_role\_use\_name\_prefix) | Amazon Resource Name (ARN) of an existing IAM instance profile that provides permissions for the EKS Node Group | `string` | `true` | no |
+| [iam\_role\_use\_name\_prefix](#input\_iam\_role\_use\_name\_prefix) | Determines whether cluster IAM role name (`iam_role_name`) is used as a prefix | `string` | `true` | no |
| [image\_id](#input\_image\_id) | The AMI from which to launch the instance | `string` | `null` | no |
| [initial\_lifecycle\_hooks](#input\_initial\_lifecycle\_hooks) | One or more Lifecycle Hooks to attach to the Auto Scaling Group before instances are launched. The syntax is exactly the same as the separate `aws_autoscaling_lifecycle_hook` resource, without the `autoscaling_group_name` attribute. Please note that this will only work when creating a new Auto Scaling Group. For all other use-cases, please use `aws_autoscaling_lifecycle_hook` resource | `list(map(string))` | `[]` | no |
| [instance\_initiated\_shutdown\_behavior](#input\_instance\_initiated\_shutdown\_behavior) | Shutdown behavior for the instance. Can be `stop` or `terminate`. (Default: `stop`) | `string` | `null` | no |
@@ -136,6 +136,18 @@ No modules.
| Name | Description |
|------|-------------|
+| [autoscaling\_group\_arn](#output\_autoscaling\_group\_arn) | The ARN for this autoscaling group |
+| [autoscaling\_group\_availability\_zones](#output\_autoscaling\_group\_availability\_zones) | The availability zones of the autoscaling group |
+| [autoscaling\_group\_default\_cooldown](#output\_autoscaling\_group\_default\_cooldown) | Time between a scaling activity and the succeeding scaling activity |
+| [autoscaling\_group\_desired\_capacity](#output\_autoscaling\_group\_desired\_capacity) | The number of Amazon EC2 instances that should be running in the group |
+| [autoscaling\_group\_health\_check\_grace\_period](#output\_autoscaling\_group\_health\_check\_grace\_period) | Time after instance comes into service before checking health |
+| [autoscaling\_group\_health\_check\_type](#output\_autoscaling\_group\_health\_check\_type) | EC2 or ELB. Controls how health checking is done |
+| [autoscaling\_group\_id](#output\_autoscaling\_group\_id) | The autoscaling group id |
+| [autoscaling\_group\_max\_size](#output\_autoscaling\_group\_max\_size) | The maximum size of the autoscaling group |
+| [autoscaling\_group\_min\_size](#output\_autoscaling\_group\_min\_size) | The minimum size of the autoscaling group |
+| [autoscaling\_group\_name](#output\_autoscaling\_group\_name) | The autoscaling group name |
+| [autoscaling\_group\_schedule\_arns](#output\_autoscaling\_group\_schedule\_arns) | ARNs of autoscaling group schedules |
+| [autoscaling\_group\_vpc\_zone\_identifier](#output\_autoscaling\_group\_vpc\_zone\_identifier) | The VPC zone identifier |
| [iam\_instance\_profile\_arn](#output\_iam\_instance\_profile\_arn) | ARN assigned by AWS to the instance profile |
| [iam\_instance\_profile\_id](#output\_iam\_instance\_profile\_id) | Instance profile's ID |
| [iam\_instance\_profile\_unique](#output\_iam\_instance\_profile\_unique) | Stable and unique string identifying the IAM instance profile |
@@ -145,18 +157,6 @@ No modules.
| [launch\_template\_arn](#output\_launch\_template\_arn) | The ARN of the launch template |
| [launch\_template\_id](#output\_launch\_template\_id) | The ID of the launch template |
| [launch\_template\_latest\_version](#output\_launch\_template\_latest\_version) | The latest version of the launch template |
-| [node\_group\_arn](#output\_node\_group\_arn) | The ARN for this node group |
-| [node\_group\_availability\_zones](#output\_node\_group\_availability\_zones) | The availability zones of the node group |
-| [node\_group\_default\_cooldown](#output\_node\_group\_default\_cooldown) | Time between a scaling activity and the succeeding scaling activity |
-| [node\_group\_desired\_capacity](#output\_node\_group\_desired\_capacity) | The number of Amazon EC2 instances that should be running in the group |
-| [node\_group\_health\_check\_grace\_period](#output\_node\_group\_health\_check\_grace\_period) | Time after instance comes into service before checking health |
-| [node\_group\_health\_check\_type](#output\_node\_group\_health\_check\_type) | EC2 or ELB. Controls how health checking is done |
-| [node\_group\_id](#output\_node\_group\_id) | The node group id |
-| [node\_group\_max\_size](#output\_node\_group\_max\_size) | The maximum size of the node group |
-| [node\_group\_min\_size](#output\_node\_group\_min\_size) | The minimum size of the node group |
-| [node\_group\_name](#output\_node\_group\_name) | The node group name |
-| [node\_group\_vpc\_zone\_identifier](#output\_node\_group\_vpc\_zone\_identifier) | The VPC zone identifier |
-| [node\_schedule\_arns](#output\_node\_schedule\_arns) | ARNs of autoscaling group schedules |
| [security\_group\_arn](#output\_security\_group\_arn) | Amazon Resource Name (ARN) of the security group |
| [security\_group\_id](#output\_security\_group\_id) | ID of the security group |
diff --git a/modules/self-managed-node-group/main.tf b/modules/self-managed-node-group/main.tf
index 817ea7b623..431912344d 100644
--- a/modules/self-managed-node-group/main.tf
+++ b/modules/self-managed-node-group/main.tf
@@ -24,7 +24,7 @@ resource "aws_launch_template" "this" {
description = coalesce(var.description, "Custom launch template for ${var.name} self managed node group")
ebs_optimized = var.ebs_optimized
- image_id = coalesce(var.image_id, data.aws_ami.eks_default.image_id)
+ image_id = coalesce(var.image_id, data.aws_ami.eks_default[0].image_id)
instance_type = var.instance_type
key_name = var.key_name
user_data = var.user_data
diff --git a/modules/self-managed-node-group/outputs.tf b/modules/self-managed-node-group/outputs.tf
index 7196dc3ebf..2187eb9661 100644
--- a/modules/self-managed-node-group/outputs.tf
+++ b/modules/self-managed-node-group/outputs.tf
@@ -18,74 +18,73 @@ output "launch_template_latest_version" {
}
################################################################################
-# Node Group
+# autoscaling group
################################################################################
-output "node_group_arn" {
- description = "The ARN for this node group"
+output "autoscaling_group_arn" {
+ description = "The ARN for this autoscaling group"
value = try(aws_autoscaling_group.this[0].arn, "")
}
-output "node_group_id" {
- description = "The node group id"
- value = try(aws_node_group.this[0].id, "")
+output "autoscaling_group_id" {
+ description = "The autoscaling group id"
+ value = try(aws_autoscaling_group.this[0].id, "")
}
-output "node_group_name" {
- description = "The node group name"
+output "autoscaling_group_name" {
+ description = "The autoscaling group name"
value = try(aws_autoscaling_group.this[0].name, "")
}
-output "node_group_min_size" {
- description = "The minimum size of the node group"
+output "autoscaling_group_min_size" {
+ description = "The minimum size of the autoscaling group"
value = try(aws_autoscaling_group.this[0].min_size, "")
}
-output "node_group_max_size" {
- description = "The maximum size of the node group"
+output "autoscaling_group_max_size" {
+ description = "The maximum size of the autoscaling group"
value = try(aws_autoscaling_group.this[0].max_size, "")
}
-output "node_group_desired_capacity" {
+output "autoscaling_group_desired_capacity" {
description = "The number of Amazon EC2 instances that should be running in the group"
value = try(aws_autoscaling_group.this[0].desired_capacity, "")
}
-output "node_group_default_cooldown" {
+output "autoscaling_group_default_cooldown" {
description = "Time between a scaling activity and the succeeding scaling activity"
value = try(aws_autoscaling_group.this[0].default_cooldown, "")
}
-output "node_group_health_check_grace_period" {
+output "autoscaling_group_health_check_grace_period" {
description = "Time after instance comes into service before checking health"
value = try(aws_autoscaling_group.this[0].health_check_grace_period, "")
}
-output "node_group_health_check_type" {
+output "autoscaling_group_health_check_type" {
description = "EC2 or ELB. Controls how health checking is done"
value = try(aws_autoscaling_group.this[0].health_check_type, "")
}
-output "node_group_availability_zones" {
- description = "The availability zones of the node group"
+output "autoscaling_group_availability_zones" {
+ description = "The availability zones of the autoscaling group"
value = try(aws_autoscaling_group.this[0].availability_zones, "")
}
-output "node_group_vpc_zone_identifier" {
+output "autoscaling_group_vpc_zone_identifier" {
description = "The VPC zone identifier"
value = try(aws_autoscaling_group.this[0].vpc_zone_identifier, "")
}
################################################################################
-# Node group schedule
+# autoscaling group schedule
################################################################################
-output "node_schedule_arns" {
+output "autoscaling_group_schedule_arns" {
description = "ARNs of autoscaling group schedules"
value = { for k, v in aws_autoscaling_schedule.this : k => v.arn }
}
-
################################################################################
# Security Group
################################################################################
diff --git a/modules/self-managed-node-group/variables.tf b/modules/self-managed-node-group/variables.tf
index cf92b55dc5..e7e9c9c7f2 100644
--- a/modules/self-managed-node-group/variables.tf
+++ b/modules/self-managed-node-group/variables.tf
@@ -204,6 +204,7 @@ variable "cluster_name" {
type = string
default = null
}
+
variable "tags" {
description = "A map of tags and values in the same format as other resources accept. This will be converted into the non-standard format that the aws_autoscaling_group requires."
type = map(string)
@@ -352,7 +353,6 @@ variable "tag_specifications" {
default = []
}
-
variable "ebs_optimized" {
description = "If true, the launched EC2 instance will be EBS-optimized"
type = bool
@@ -491,20 +491,20 @@ variable "create_iam_instance_profile" {
default = true
}
-variable "iam_role_name" {
- description = "Name to use on IAM role created"
+variable "iam_instance_profile_arn" {
+ description = "Amazon Resource Name (ARN) of an existing IAM instance profile that provides permissions for the node group"
type = string
default = null
}
-variable "iam_instance_profile_arn" {
- description = "The IAM Instance Profile ARN to launch the instance with"
+variable "iam_role_name" {
+ description = "Name to use on IAM role created"
type = string
default = null
}
variable "iam_role_use_name_prefix" {
- description = "Amazon Resource Name (ARN) of an existing IAM instance profile that provides permissions for the EKS Node Group"
+ description = "Determines whether cluster IAM role name (`iam_role_name`) is used as a prefix"
type = string
default = true
}
diff --git a/outputs.tf b/outputs.tf
index b19ff74c8d..d754efc99d 100644
--- a/outputs.tf
+++ b/outputs.tf
@@ -97,24 +97,9 @@ output "cloudwatch_log_group_arn" {
# Fargate Profile
################################################################################
-output "fargate_profile_ids" {
- description = "EKS Cluster name and EKS Fargate Profile names separated by a colon (:)"
- value = module.fargate.fargate_profile_ids
-}
-
-output "fargate_profile_arns" {
- description = "Amazon Resource Name (ARN) of the EKS Fargate Profiles"
- value = module.fargate.fargate_profile_arns
-}
-
-output "fargate_iam_role_name" {
- description = "IAM role name for EKS Fargate pods"
- value = module.fargate.iam_role_name
-}
-
-output "fargate_iam_role_arn" {
- description = "IAM role ARN for EKS Fargate pods"
- value = module.fargate.iam_role_arn
+output "fargate_profiles" {
+ description = "Map of attribute maps for all EKS Fargate Profiles created"
+ value = module.fargate_profile
}
################################################################################
@@ -123,7 +108,7 @@ output "fargate_iam_role_arn" {
output "eks_managed_node_groups" {
description = "Map of attribute maps for all EKS managed node groups created"
- value = module.eks_managed_node_groups
+ value = module.eks_managed_node_group
}
################################################################################
@@ -132,5 +117,5 @@ output "eks_managed_node_groups" {
output "self_managed_node_groups" {
description = "Map of attribute maps for all self managed node groups created"
- value = module.self_managed_node_groups
+ value = module.self_managed_node_group
}
diff --git a/variables.tf b/variables.tf
index 50145f1a6e..040bc9232b 100644
--- a/variables.tf
+++ b/variables.tf
@@ -226,54 +226,12 @@ variable "cluster_iam_role_tags" {
# Fargate
################################################################################
-variable "create_fargate" {
- description = "Determines whether Fargate resources are created"
- type = bool
- default = false
-}
-
-variable "create_fargate_pod_execution_role" {
- description = "Controls if the EKS Fargate pod execution IAM role should be created"
- type = bool
- default = true
-}
-
-variable "fargate_pod_execution_role_arn" {
- description = "Existing Amazon Resource Name (ARN) of the IAM Role that provides permissions for the EKS Fargate Profile. Required if `create_fargate_pod_execution_role` is `false`"
- type = string
- default = null
-}
-
-variable "fargate_subnet_ids" {
- description = "A list of subnet IDs to place Fargate workers within (if different from `subnet_ids`)"
- type = list(string)
- default = []
-}
-
-variable "fargate_iam_role_path" {
- description = "Fargate IAM role path"
- type = string
- default = null
-}
-
-variable "fargate_iam_role_permissions_boundary" {
- description = "ARN of the policy that is used to set the permissions boundary for the Fargate role"
- type = string
- default = null
-}
-
variable "fargate_profiles" {
- description = "Fargate profiles to create. See `fargate_profile` keys section in Fargate submodule's README.md for more details"
+ description = "Map of Fargate Profile definitions to create"
type = any
default = {}
}
-variable "fargate_tags" {
- description = "A map of additional tags to add to the Fargate resources created"
- type = map(string)
- default = {}
-}
-
################################################################################
# Self Managed Node Group
################################################################################
diff --git a/workers.tf b/workers.tf
index 72fc9af944..8cd02d6350 100644
--- a/workers.tf
+++ b/workers.tf
@@ -2,29 +2,36 @@
# Fargate
################################################################################
-module "fargate" {
- source = "./modules/fargate"
+module "fargate_profile" {
+ source = "./modules/fargate-profile"
- create = var.create_fargate
- create_fargate_pod_execution_role = var.create_fargate_pod_execution_role
- fargate_pod_execution_role_arn = var.fargate_pod_execution_role_arn
+ for_each = var.create ? var.fargate_profiles : {}
- cluster_name = try(aws_eks_cluster.this[0].name, var.cluster_name) # TODO - why?!
- subnet_ids = coalescelist(var.fargate_subnet_ids, var.subnet_ids, [""])
+ # Fargate Profile
+ cluster_name = aws_eks_cluster.this[0].name
+ fargate_profile_name = try(each.value.fargate_profile_name, each.key, true)
+ subnet_ids = try(each.value.subnet_ids, var.subnet_ids)
+ selectors = try(each.value.selectors, {})
+ timeouts = try(each.value.timeouts, {})
- iam_path = var.fargate_iam_role_path
- permissions_boundary = var.fargate_iam_role_permissions_boundary
-
- fargate_profiles = var.fargate_profiles
+ # IAM role
+ create_iam_role = try(each.value.create_iam_role, true)
+ iam_role_arn = try(each.value.iam_role_arn, null)
+ iam_role_name = try(each.value.iam_role_name, null)
+ iam_role_use_name_prefix = try(each.value.iam_role_use_name_prefix, true)
+ iam_role_path = try(each.value.iam_role_path, null)
+ iam_role_permissions_boundary = try(each.value.iam_role_permissions_boundary, null)
+ iam_role_tags = try(each.value.iam_role_tags, {})
+ iam_role_additional_policies = try(each.value.iam_role_additional_policies, [])
- tags = merge(var.tags, var.fargate_tags)
+ tags = merge(var.tags, try(each.value.tags, {}))
}
################################################################################
# EKS Managed Node Group
################################################################################
-module "eks_managed_node_groups" {
+module "eks_managed_node_group" {
source = "./modules/eks-managed-node-group"
for_each = var.create ? var.eks_managed_node_groups : {}
From 8caca6bf674e42ce61351a02a4ed9868e8de767b Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Fri, 12 Nov 2021 18:35:11 -0500
Subject: [PATCH 24/83] chore: fix static check errors, replace release process
with current standard, misc clean-up
---
.chglog/CHANGELOG.tpl.md | 75 ---
.chglog/config.yml | 54 --
.github/ISSUE_TEMPLATE.md | 30 -
.github/PULL_REQUEST_TEMPLATE.md | 9 -
.github/semantic.yml | 30 -
.github/workflows/pre-commit.yml | 103 ++-
.github/workflows/release.yml | 32 +
.releaserc.json | 35 +
CHANGELOG.md | 14 -
Makefile | 17 -
{.github => docs}/CHANGELOG.pre-v11.0.0.md | 0
docs/{upgrades.md => UPGRADE-17.0.md} | 0
docs/faq.md | 39 --
docs/iam-permissions.md | 155 -----
docs/spot-instances.md | 4 +-
examples/bottlerocket/README.md | 3 +-
examples/bottlerocket/versions.tf | 4 +
examples/complete/main.tf | 2 +-
.../eks_managed_node_group/launchtemplate.tf | 3 -
examples/eks_managed_node_group/main.tf | 2 +-
main.tf | 10 +-
modules/eks-managed-node-group/README.md | 3 -
modules/eks-managed-node-group/main.tf | 1 -
modules/eks-managed-node-group/variables.tf | 18 -
modules/fargate-profile/README.md | 5 +-
modules/fargate-profile/main.tf | 2 +-
modules/self-managed-node-group/README.md | 2 -
modules/self-managed-node-group/variables.tf | 12 -
templates/default.sh.tpl | 56 --
templates/linux_user_data.sh.tpl | 3 -
templates/windows_user_data.tpl | 6 +-
tools/semtag | 627 ------------------
workers.tf | 3 -
33 files changed, 127 insertions(+), 1232 deletions(-)
delete mode 100644 .chglog/CHANGELOG.tpl.md
delete mode 100644 .chglog/config.yml
delete mode 100644 .github/ISSUE_TEMPLATE.md
delete mode 100644 .github/PULL_REQUEST_TEMPLATE.md
delete mode 100644 .github/semantic.yml
create mode 100644 .github/workflows/release.yml
create mode 100644 .releaserc.json
delete mode 100644 Makefile
rename {.github => docs}/CHANGELOG.pre-v11.0.0.md (100%)
rename docs/{upgrades.md => UPGRADE-17.0.md} (100%)
delete mode 100644 docs/iam-permissions.md
delete mode 100644 templates/default.sh.tpl
delete mode 100755 tools/semtag
diff --git a/.chglog/CHANGELOG.tpl.md b/.chglog/CHANGELOG.tpl.md
deleted file mode 100644
index 1af11b8438..0000000000
--- a/.chglog/CHANGELOG.tpl.md
+++ /dev/null
@@ -1,75 +0,0 @@
-# Change Log
-
-All notable changes to this project will be documented in this file.
-
-The format is based on [Keep a Changelog](http://keepachangelog.com/) and this
-project adheres to [Semantic Versioning](http://semver.org/).
-
-{{ if .Versions -}}
-
-## [Unreleased]
-{{ if .Unreleased.CommitGroups -}}
-{{ range .Unreleased.CommitGroups -}}
-{{ .Title }}:
-{{ range .Commits -}}
-{{- if .Subject -}}
-- {{ if .Scope }}**{{ .Scope }}:** {{ end }}{{ .Subject | upperFirst }}
-{{ end -}}
-{{ end }}
-{{ end -}}
-{{ else }}
-{{ range .Unreleased.Commits -}}
-{{- if .Subject -}}
-- {{ if .Scope }}**{{ .Scope }}:** {{ end }}{{ .Subject | upperFirst}}
-{{ end -}}
-{{ end }}
-{{ end -}}
-
-{{- if .Unreleased.NoteGroups -}}
-{{ range .Unreleased.NoteGroups -}}
-{{ .Title }}:
-{{ range .Notes -}}
-- {{ .Body }}
-{{ end }}
-{{ end -}}
-{{ end -}}
-{{ end -}}
-
-{{ range .Versions }}
-
-## {{ if .Tag.Previous }}[{{ .Tag.Name }}]{{ else }}{{ .Tag.Name }}{{ end }} - {{ datetime "2006-01-02" .Tag.Date }}
-{{ if .CommitGroups -}}
-{{ range .CommitGroups -}}
-{{ .Title }}:
-{{ range .Commits -}}
-{{- if .Subject -}}
-- {{ if .Scope }}**{{ .Scope }}:** {{ end }}{{ .Subject | upperFirst }}
-{{ end -}}
-{{ end }}
-{{ end -}}
-{{ else }}
-{{ range .Commits -}}
-{{- if .Subject -}}
-- {{ if .Scope }}**{{ .Scope }}:** {{ end }}{{ .Subject | upperFirst }}
-{{ end -}}
-{{ end }}
-{{ end -}}
-
-{{- if .NoteGroups -}}
-{{ range .NoteGroups -}}
-{{ .Title }}:
-{{ range .Notes -}}
-- {{ .Body }}
-{{ end }}
-{{ end -}}
-{{ end -}}
-{{ end -}}
-
-{{- if .Versions }}
-[Unreleased]: {{ .Info.RepositoryURL }}/compare/{{ $latest := index .Versions 0 }}{{ $latest.Tag.Name }}...HEAD
-{{ range .Versions -}}
-{{ if .Tag.Previous -}}
-[{{ .Tag.Name }}]: {{ $.Info.RepositoryURL }}/compare/{{ .Tag.Previous.Name }}...{{ .Tag.Name }}
-{{ end -}}
-{{ end -}}
-{{ end -}}
diff --git a/.chglog/config.yml b/.chglog/config.yml
deleted file mode 100644
index 06f2cfb375..0000000000
--- a/.chglog/config.yml
+++ /dev/null
@@ -1,54 +0,0 @@
-style: github
-template: CHANGELOG.tpl.md
-info:
- title: CHANGELOG
- repository_url: https://github.com/terraform-aws-modules/terraform-aws-eks
-options:
- commits:
- sort_by: Type
- filters:
- Type:
- - feat
- - fix
- - improvement
- - docs
- - refactor
- - test
- - ci
-
- commit_groups:
- group_by: Type
- sort_by: Custom
- title_order:
- - feat
- - improvement
- - refactor
- - fix
- - docs
- - test
- - ci
- title_maps:
- feat: FEATURES
- fix: BUG FIXES
- improvement: ENHANCEMENTS
- docs: DOCS
- refactor: REFACTORS
- test: TESTS
- ci: CI
-
- header:
- pattern: "^(.+)\\s*:\\s*(.+)$"
- pattern_maps:
- - Type
- - Subject
-
- notes:
- keywords:
- - BREAKING CHANGES
- - NOTES
-
- refs:
- actions:
- - Closes
- - Fixes
- - Resolves
diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
deleted file mode 100644
index bcb7188d94..0000000000
--- a/.github/ISSUE_TEMPLATE.md
+++ /dev/null
@@ -1,30 +0,0 @@
-# I have issues
-
-## I'm submitting a...
-
-* [ ] bug report
-* [ ] feature request
-* [ ] support request - read the [FAQ](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/faq.md) first!
-* [ ] kudos, thank you, warm fuzzy
-
-## What is the current behavior?
-
-
-
-## If this is a bug, how to reproduce? Please include a code sample if relevant.
-
-
-
-## What's the expected behavior?
-
-
-
-## Are you able to fix this problem and submit a PR? Link here if you have already.
-
-## Environment details
-
-* Affected module version:
-* OS:
-* Terraform version:
-
-## Any other relevant info
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
deleted file mode 100644
index ce48f0220a..0000000000
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ /dev/null
@@ -1,9 +0,0 @@
-# PR o'clock
-
-## Description
-
-Please explain the changes you made here and link to any relevant issues.
-
-### Checklist
-
-- [ ] README.md has been updated after any changes to variables and outputs. See https://github.com/terraform-aws-modules/terraform-aws-eks/#doc-generation
diff --git a/.github/semantic.yml b/.github/semantic.yml
deleted file mode 100644
index 376c06a6a8..0000000000
--- a/.github/semantic.yml
+++ /dev/null
@@ -1,30 +0,0 @@
-# Always validate the PR title, and ignore the commits
-titleOnly: true
-
-# Always validate all commits, and ignore the PR title
-commitsOnly: false
-
-# Always validate the PR title AND all the commits
-titleAndCommits: false
-
-# Require at least one commit to be valid
-# this is only relevant when using commitsOnly: true or titleAndCommits: true,
-# which validate all commits by default
-anyCommit: false
-
-# By default types specified in commitizen/conventional-commit-types is used.
-# See: https://github.com/commitizen/conventional-commit-types/blob/v2.3.0/index.json
-# You can override the valid types
-types:
- - feat
- - fix
- - improvement
- - docs
- - refactor
- - test
- - ci
- - chore
-
-# Allow use of Merge commits (eg on github: "Merge branch 'master' into feature/ride-unicorns")
-# this is only relevant when using commitsOnly: true (or titleAndCommits: true)
-allowMergeCommits: false
diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml
index 5a3c0ce7e1..b8f1b8a5ab 100644
--- a/.github/workflows/pre-commit.yml
+++ b/.github/workflows/pre-commit.yml
@@ -2,98 +2,77 @@ name: Pre-Commit
on:
pull_request:
- push:
branches:
+ - main
- master
+env:
+ TERRAFORM_DOCS_VERSION: v0.16.0
+
jobs:
- # Min Terraform version(s)
- getDirectories:
- name: Get root directories
+ collectInputs:
+ name: Collect workflow inputs
runs-on: ubuntu-latest
+ outputs:
+ directories: ${{ steps.dirs.outputs.directories }}
steps:
- name: Checkout
uses: actions/checkout@v2
- - name: Install Python
- uses: actions/setup-python@v2
- - name: Build matrix
- id: matrix
- run: |
- DIRS=$(python -c "import json; import glob; print(json.dumps([x.replace('/versions.tf', '') for x in glob.glob('./**/versions.tf', recursive=True)]))")
- echo "::set-output name=directories::$DIRS"
- outputs:
- directories: ${{ steps.matrix.outputs.directories }}
+
+ - name: Get root directories
+ id: dirs
+ uses: clowdhaus/terraform-composite-actions/directories@v1.3.0
preCommitMinVersions:
- name: Min TF validate
- needs: getDirectories
+ name: Min TF pre-commit
+ needs: collectInputs
runs-on: ubuntu-latest
strategy:
matrix:
- directory: ${{ fromJson(needs.getDirectories.outputs.directories) }}
+ directory: ${{ fromJson(needs.collectInputs.outputs.directories) }}
steps:
- name: Checkout
uses: actions/checkout@v2
- - name: Install Python
- uses: actions/setup-python@v2
+
- name: Terraform min/max versions
id: minMax
- uses: clowdhaus/terraform-min-max@v1.0.2
+ uses: clowdhaus/terraform-min-max@v1.0.3
with:
directory: ${{ matrix.directory }}
- - name: Install Terraform v${{ steps.minMax.outputs.minVersion }}
- uses: hashicorp/setup-terraform@v1
- with:
- terraform_version: ${{ steps.minMax.outputs.minVersion }}
- - name: Install pre-commit dependencies
- run: pip install pre-commit
- - name: Execute pre-commit
+
+ - name: Pre-commit Terraform ${{ steps.minMax.outputs.minVersion }}
# Run only validate pre-commit check on min version supported
if: ${{ matrix.directory != '.' }}
- run: pre-commit run terraform_validate --color=always --show-diff-on-failure --files ${{ matrix.directory }}/*
- - name: Execute pre-commit
+ uses: clowdhaus/terraform-composite-actions/pre-commit@v1.3.0
+ with:
+ terraform-version: ${{ steps.minMax.outputs.minVersion }}
+ args: 'terraform_validate --color=always --show-diff-on-failure --files ${{ matrix.directory }}/*'
+
+ - name: Pre-commit Terraform ${{ steps.minMax.outputs.minVersion }}
# Run only validate pre-commit check on min version supported
if: ${{ matrix.directory == '.' }}
- run: pre-commit run terraform_validate --color=always --show-diff-on-failure --files $(ls *.tf)
+ uses: clowdhaus/terraform-composite-actions/pre-commit@v1.3.0
+ with:
+ terraform-version: ${{ steps.minMax.outputs.minVersion }}
+ args: 'terraform_validate --color=always --show-diff-on-failure --files $(ls *.tf)'
- # Max Terraform version
- getBaseVersion:
- name: Module max TF version
+ preCommitMaxVersion:
+ name: Max TF pre-commit
runs-on: ubuntu-latest
+ needs: collectInputs
steps:
- name: Checkout
uses: actions/checkout@v2
+ with:
+ ref: ${{ github.event.pull_request.head.ref }}
+ repository: ${{github.event.pull_request.head.repo.full_name}}
+
- name: Terraform min/max versions
id: minMax
- uses: clowdhaus/terraform-min-max@v1.0.2
- outputs:
- minVersion: ${{ steps.minMax.outputs.minVersion }}
- maxVersion: ${{ steps.minMax.outputs.maxVersion }}
+ uses: clowdhaus/terraform-min-max@v1.0.3
- preCommitMaxVersion:
- name: Max TF pre-commit
- runs-on: ubuntu-latest
- needs: getBaseVersion
- strategy:
- fail-fast: false
- matrix:
- version:
- - ${{ needs.getBaseVersion.outputs.maxVersion }}
- steps:
- - name: Checkout
- uses: actions/checkout@v2
- - name: Install Python
- uses: actions/setup-python@v2
- - name: Install Terraform v${{ matrix.version }}
- uses: hashicorp/setup-terraform@v1
+ - name: Pre-commit Terraform ${{ steps.minMax.outputs.maxVersion }}
+ uses: clowdhaus/terraform-composite-actions/pre-commit@v1.3.0
with:
- terraform_version: ${{ matrix.version }}
- - name: Install pre-commit dependencies
- run: |
- pip install pre-commit
- curl -Lo ./terraform-docs.tar.gz https://github.com/terraform-docs/terraform-docs/releases/download/v0.13.0/terraform-docs-v0.13.0-$(uname)-amd64.tar.gz && tar -xzf terraform-docs.tar.gz terraform-docs && chmod +x terraform-docs && sudo mv terraform-docs /usr/bin/
- curl -L "$(curl -s https://api.github.com/repos/terraform-linters/tflint/releases/latest | grep -o -E "https://.+?_linux_amd64.zip")" > tflint.zip && unzip tflint.zip tflint && rm tflint.zip && sudo mv tflint /usr/bin/
- - name: Execute pre-commit
- # Run all pre-commit checks on max version supported
- if: ${{ matrix.version == needs.getBaseVersion.outputs.maxVersion }}
- run: pre-commit run --color=always --show-diff-on-failure --all-files
+ terraform-version: ${{ steps.minMax.outputs.maxVersion }}
+ terraform-docs-version: ${{ env.TERRAFORM_DOCS_VERSION }}
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
new file mode 100644
index 0000000000..141937d863
--- /dev/null
+++ b/.github/workflows/release.yml
@@ -0,0 +1,32 @@
+name: Release
+
+on:
+ workflow_dispatch:
+ push:
+ branches:
+ - main
+ - master
+ paths:
+ - '**/*.py'
+ - '**/*.tf'
+
+jobs:
+ release:
+ name: Release
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v2
+ with:
+ persist-credentials: false
+ fetch-depth: 0
+
+ - name: Release
+ uses: cycjimmy/semantic-release-action@v2
+ with:
+ semantic_version: 18.0.0
+ extra_plugins: |
+ @semantic-release/changelog@6.0.0
+ @semantic-release/git@10.0.0
+ env:
+ GITHUB_TOKEN: ${{ secrets.SEMANTIC_RELEASE_TOKEN }}
diff --git a/.releaserc.json b/.releaserc.json
new file mode 100644
index 0000000000..2c82c6160d
--- /dev/null
+++ b/.releaserc.json
@@ -0,0 +1,35 @@
+{
+ "branches": [
+ "main",
+ "master"
+ ],
+ "ci": false,
+ "plugins": [
+ "@semantic-release/commit-analyzer",
+ "@semantic-release/release-notes-generator",
+ [
+ "@semantic-release/github",
+ {
+ "successComment": "This ${issue.pull_request ? 'PR is included' : 'issue has been resolved'} in version [${nextRelease.version}](${releaseInfos[0].url}) :tada:",
+ "labels": false,
+ "releasedLabels": false
+ }
+ ],
+ [
+ "@semantic-release/changelog",
+ {
+ "changelogFile": "CHANGELOG.md",
+ "changelogTitle": "# Changelog\n\nAll notable changes to this project will be documented in this file."
+ }
+ ],
+ [
+ "@semantic-release/git",
+ {
+ "assets": [
+ "CHANGELOG.md"
+ ],
+ "message": "chore(release): version ${nextRelease.version} [skip ci]\n\n${nextRelease.notes}"
+ }
+ ]
+ ]
+}
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 93228a9cd8..2153e79f74 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,17 +1,3 @@
-# Change Log
-
-All notable changes to this project will be documented in this file.
-
-The format is based on [Keep a Changelog](http://keepachangelog.com/) and this
-project adheres to [Semantic Versioning](http://semver.org/).
-
-For changes prior to v11.0.0, see [CHANGELOG.pre-v11.0.0.md](.github/CHANGELOG.pre-v11.0.0.md)
-
-
-## [Unreleased]
-
-
-
## [v17.23.0] - 2021-11-02
FEATURES:
diff --git a/Makefile b/Makefile
deleted file mode 100644
index 5120cc143a..0000000000
--- a/Makefile
+++ /dev/null
@@ -1,17 +0,0 @@
-.PHONY: changelog release
-
-SEMTAG=tools/semtag
-
-CHANGELOG_FILE=CHANGELOG.md
-TAG_QUERY=v11.0.0..
-
-scope ?= "minor"
-
-changelog-unrelease:
- git-chglog --no-case -o $(CHANGELOG_FILE) $(TAG_QUERY)
-
-changelog:
- git-chglog --no-case -o $(CHANGELOG_FILE) --next-tag `$(SEMTAG) final -s $(scope) -o -f` $(TAG_QUERY)
-
-release:
- $(SEMTAG) final -s $(scope)
diff --git a/.github/CHANGELOG.pre-v11.0.0.md b/docs/CHANGELOG.pre-v11.0.0.md
similarity index 100%
rename from .github/CHANGELOG.pre-v11.0.0.md
rename to docs/CHANGELOG.pre-v11.0.0.md
diff --git a/docs/upgrades.md b/docs/UPGRADE-17.0.md
similarity index 100%
rename from docs/upgrades.md
rename to docs/UPGRADE-17.0.md
diff --git a/docs/faq.md b/docs/faq.md
index 7f3dc718d6..ed9fa69cd1 100644
--- a/docs/faq.md
+++ b/docs/faq.md
@@ -43,12 +43,6 @@ You need to add the tags to the VPC and subnets yourself. See the [basic example
An alternative is to use the aws provider's [`ignore_tags` variable](https://www.terraform.io/docs/providers/aws/#ignore_tags-configuration-block). However this can also cause terraform to display a perpetual difference.
-## How do I safely remove old worker groups?
-
-You've added new worker groups. Deleting worker groups from earlier in the list causes Terraform to want to recreate all worker groups. This is a limitation with how Terraform works and the module using `count` to create the ASGs and other resources.
-
-The safest and easiest option is to set `asg_min_size` and `asg_max_size` to 0 on the worker groups to "remove".
-
## Why does changing the node or worker group's desired count not do anything?
The module is configured to ignore this value. Unfortunately, Terraform does not support variables within the `lifecycle` block.
@@ -85,31 +79,6 @@ You are using the cluster autoscaler:
You can also use a 3rd party tool like Gruntwork's kubergrunt. See the [`eks deploy`](https://github.com/gruntwork-io/kubergrunt#deploy) subcommand.
-## How do I create kubernetes resources when creating the cluster?
-
-You do not need to do anything extra since v12.1.0 of the module as long as the following conditions are met:
-
-- `manage_aws_auth = true` on the module (default)
-- the kubernetes provider is correctly configured like in the [Usage Example](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/README.md#usage-example). Primarily the module's `cluster_id` output is used as input to the `aws_eks_cluster*` data sources.
-
-The `cluster_id` depends on a `data.http.wait_for_cluster` that polls the EKS cluster's endpoint until it is alive. This blocks initialisation of the kubernetes provider.
-
-## `aws_auth.tf: At 2:14: Unknown token: 2:14 IDENT`
-
-You are attempting to use a Terraform 0.12 module with Terraform 0.11.
-
-We highly recommend that you upgrade your EKS Terraform config to 0.12 to take advantage of new features in the module.
-
-Alternatively you can lock your module to a compatible version if you must stay with terraform 0.11:
-
-```hcl
-module "eks" {
- source = "terraform-aws-modules/eks/aws"
- version = "~> 4.0"
- # ...
-}
-```
-
## How can I use Windows workers?
To enable Windows support for your EKS cluster, you should apply some configs manually. See the [Enabling Windows Support (Windows/MacOS/Linux)](https://docs.aws.amazon.com/eks/latest/userguide/windows-support.html#enable-windows-support).
@@ -163,14 +132,6 @@ Older configurations used labels like `kubernetes.io/lifecycle=spot` and this is
Reference the `--node-labels` argument for your version of Kubenetes for the allowed prefixes. [Documentation for 1.16](https://v1-16.docs.kubernetes.io/docs/reference/command-line-tools-reference/kubelet/)
-## What is the difference between `node_groups` and `worker_groups`?
-
-`node_groups` are [AWS-managed node groups](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html) (configures "Node Groups" that you can find on the EKS dashboard). This system is supposed to ease some of the lifecycle around upgrading nodes. Although they do not do this automatically and you still need to manually trigger the updates.
-
-`worker_groups` are [self-managed nodes](https://docs.aws.amazon.com/eks/latest/userguide/worker.html) (provisions a typical "Autoscaling group" on EC2). It gives you full control over nodes in the cluster like using custom AMI for the nodes. As AWS says, "with worker groups the customer controls the data plane & AWS controls the control plane".
-
-Both can be used together in the same cluster.
-
## I'm using both AWS-Managed node groups and Self-Managed worker groups and pods scheduled on a AWS Managed node groups are unable resolve DNS (even communication between pods)
This happen because Core DNS can be scheduled on Self-Managed worker groups and by default, the terraform module doesn't create security group rules to ensure communication between pods schedulled on Self-Managed worker group and AWS-Managed node groups.
diff --git a/docs/iam-permissions.md b/docs/iam-permissions.md
deleted file mode 100644
index e6a867a9ec..0000000000
--- a/docs/iam-permissions.md
+++ /dev/null
@@ -1,155 +0,0 @@
-# IAM Permissions
-
-Following IAM permissions are the minimum permissions needed for your IAM user or IAM role to create an EKS cluster.
-
-```json
-{
- "Version": "2012-10-17",
- "Statement": [
- {
- "Sid": "VisualEditor0",
- "Effect": "Allow",
- "Action": [
- "autoscaling:AttachInstances",
- "autoscaling:CreateAutoScalingGroup",
- "autoscaling:CreateLaunchConfiguration",
- "autoscaling:CreateOrUpdateTags",
- "autoscaling:DeleteAutoScalingGroup",
- "autoscaling:DeleteLaunchConfiguration",
- "autoscaling:DeleteTags",
- "autoscaling:Describe*",
- "autoscaling:DetachInstances",
- "autoscaling:SetDesiredCapacity",
- "autoscaling:UpdateAutoScalingGroup",
- "autoscaling:SuspendProcesses",
- "ec2:AllocateAddress",
- "ec2:AssignPrivateIpAddresses",
- "ec2:Associate*",
- "ec2:AttachInternetGateway",
- "ec2:AttachNetworkInterface",
- "ec2:AuthorizeSecurityGroupEgress",
- "ec2:AuthorizeSecurityGroupIngress",
- "ec2:CreateDefaultSubnet",
- "ec2:CreateDhcpOptions",
- "ec2:CreateEgressOnlyInternetGateway",
- "ec2:CreateInternetGateway",
- "ec2:CreateNatGateway",
- "ec2:CreateNetworkInterface",
- "ec2:CreateRoute",
- "ec2:CreateRouteTable",
- "ec2:CreateSecurityGroup",
- "ec2:CreateSubnet",
- "ec2:CreateTags",
- "ec2:CreateVolume",
- "ec2:CreateVpc",
- "ec2:CreateVpcEndpoint",
- "ec2:DeleteDhcpOptions",
- "ec2:DeleteEgressOnlyInternetGateway",
- "ec2:DeleteInternetGateway",
- "ec2:DeleteNatGateway",
- "ec2:DeleteNetworkInterface",
- "ec2:DeleteRoute",
- "ec2:DeleteRouteTable",
- "ec2:DeleteSecurityGroup",
- "ec2:DeleteSubnet",
- "ec2:DeleteTags",
- "ec2:DeleteVolume",
- "ec2:DeleteVpc",
- "ec2:DeleteVpnGateway",
- "ec2:Describe*",
- "ec2:DetachInternetGateway",
- "ec2:DetachNetworkInterface",
- "ec2:DetachVolume",
- "ec2:Disassociate*",
- "ec2:ModifySubnetAttribute",
- "ec2:ModifyVpcAttribute",
- "ec2:ModifyVpcEndpoint",
- "ec2:ReleaseAddress",
- "ec2:RevokeSecurityGroupEgress",
- "ec2:RevokeSecurityGroupIngress",
- "ec2:UpdateSecurityGroupRuleDescriptionsEgress",
- "ec2:UpdateSecurityGroupRuleDescriptionsIngress",
- "ec2:CreateLaunchTemplate",
- "ec2:CreateLaunchTemplateVersion",
- "ec2:DeleteLaunchTemplate",
- "ec2:DeleteLaunchTemplateVersions",
- "ec2:DescribeLaunchTemplates",
- "ec2:DescribeLaunchTemplateVersions",
- "ec2:GetLaunchTemplateData",
- "ec2:ModifyLaunchTemplate",
- "ec2:RunInstances",
- "eks:CreateCluster",
- "eks:DeleteCluster",
- "eks:DescribeCluster",
- "eks:ListClusters",
- "eks:UpdateClusterConfig",
- "eks:UpdateClusterVersion",
- "eks:DescribeUpdate",
- "eks:TagResource",
- "eks:UntagResource",
- "eks:ListTagsForResource",
- "eks:CreateFargateProfile",
- "eks:DeleteFargateProfile",
- "eks:DescribeFargateProfile",
- "eks:ListFargateProfiles",
- "eks:CreateNodegroup",
- "eks:DeleteNodegroup",
- "eks:DescribeNodegroup",
- "eks:ListNodegroups",
- "eks:UpdateNodegroupConfig",
- "eks:UpdateNodegroupVersion",
- "iam:AddRoleToInstanceProfile",
- "iam:AttachRolePolicy",
- "iam:CreateInstanceProfile",
- "iam:CreateOpenIDConnectProvider",
- "iam:CreateServiceLinkedRole",
- "iam:CreatePolicy",
- "iam:CreatePolicyVersion",
- "iam:CreateRole",
- "iam:DeleteInstanceProfile",
- "iam:DeleteOpenIDConnectProvider",
- "iam:DeletePolicy",
- "iam:DeletePolicyVersion",
- "iam:DeleteRole",
- "iam:DeleteRolePolicy",
- "iam:DeleteServiceLinkedRole",
- "iam:DetachRolePolicy",
- "iam:GetInstanceProfile",
- "iam:GetOpenIDConnectProvider",
- "iam:GetPolicy",
- "iam:GetPolicyVersion",
- "iam:GetRole",
- "iam:GetRolePolicy",
- "iam:List*",
- "iam:PassRole",
- "iam:PutRolePolicy",
- "iam:RemoveRoleFromInstanceProfile",
- "iam:TagOpenIDConnectProvider",
- "iam:TagRole",
- "iam:UntagRole",
- "iam:TagPolicy",
- "iam:TagInstanceProfile",
- "iam:UpdateAssumeRolePolicy",
- // Following permissions are needed if cluster_enabled_log_types is enabled
- "logs:CreateLogGroup",
- "logs:DescribeLogGroups",
- "logs:DeleteLogGroup",
- "logs:ListTagsLogGroup",
- "logs:PutRetentionPolicy",
- // Following permissions for working with secrets_encryption example
- "kms:CreateAlias",
- "kms:CreateGrant",
- "kms:CreateKey",
- "kms:DeleteAlias",
- "kms:DescribeKey",
- "kms:GetKeyPolicy",
- "kms:GetKeyRotationStatus",
- "kms:ListAliases",
- "kms:ListResourceTags",
- "kms:ScheduleKeyDeletion"
- ],
- "Resource": "*"
- }
- ]
-}
-```
diff --git a/docs/spot-instances.md b/docs/spot-instances.md
index 739d2d923b..d9be09eaa7 100644
--- a/docs/spot-instances.md
+++ b/docs/spot-instances.md
@@ -34,7 +34,7 @@ Only Launch Template is supported in this module; Launch Configuration support h
spot_instance_pools = 4
asg_max_size = 5
asg_desired_capacity = 5
- kubelet_extra_args = "--node-labels=node.kubernetes.io/lifecycle=spot"
+ bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"
public_ip = true
},
}
@@ -61,7 +61,7 @@ Example launch template to launch 2 on demand instances of type m5.large, and ha
asg_max_size = 20
spot_instance_pools = 3
- kubelet_extra_args = "--node-labels=node.kubernetes.io/lifecycle=`curl -s http://169.254.169.254/latest/meta-data/instance-life-cycle`"
+ bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=`curl -s http://169.254.169.254/latest/meta-data/instance-life-cycle`'"
}
}
```
diff --git a/examples/bottlerocket/README.md b/examples/bottlerocket/README.md
index 39845a0f46..e430d572c0 100644
--- a/examples/bottlerocket/README.md
+++ b/examples/bottlerocket/README.md
@@ -25,6 +25,7 @@ Note that this example may create resources which cost money. Run `terraform des
|------|---------|
| [terraform](#requirement\_terraform) | >= 0.13.1 |
| [aws](#requirement\_aws) | >= 3.56.0 |
+| [random](#requirement\_random) | >= 2.0.0 |
| [tls](#requirement\_tls) | >= 2.2.0 |
## Providers
@@ -32,7 +33,7 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Version |
|------|---------|
| [aws](#provider\_aws) | >= 3.56.0 |
-| [random](#provider\_random) | n/a |
+| [random](#provider\_random) | >= 2.0.0 |
| [tls](#provider\_tls) | >= 2.2.0 |
## Modules
diff --git a/examples/bottlerocket/versions.tf b/examples/bottlerocket/versions.tf
index 83a000f86f..4e1b818eed 100644
--- a/examples/bottlerocket/versions.tf
+++ b/examples/bottlerocket/versions.tf
@@ -6,6 +6,10 @@ terraform {
source = "hashicorp/aws"
version = ">= 3.56.0"
}
+ random = {
+ source = "hashicorp/random"
+ version = ">= 2.0.0"
+ }
tls = {
source = "hashicorp/tls"
version = ">= 2.2.0"
diff --git a/examples/complete/main.tf b/examples/complete/main.tf
index f649eecc86..9598e3ab9c 100644
--- a/examples/complete/main.tf
+++ b/examples/complete/main.tf
@@ -40,7 +40,7 @@ module "eks" {
spot_instance_pools = 4
asg_max_size = 5
asg_desired_capacity = 5
- kubelet_extra_args = "--node-labels=node.kubernetes.io/lifecycle=spot"
+ bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"
public_ip = true
vpc_security_group_ids = [aws_security_group.additional.id] # TODO
diff --git a/examples/eks_managed_node_group/launchtemplate.tf b/examples/eks_managed_node_group/launchtemplate.tf
index cd28e89061..48a38a6e5b 100644
--- a/examples/eks_managed_node_group/launchtemplate.tf
+++ b/examples/eks_managed_node_group/launchtemplate.tf
@@ -5,9 +5,6 @@
# cluster_name = local.name
# endpoint = module.eks.cluster_endpoint
# cluster_auth_base64 = module.eks.cluster_certificate_authority_data
-#
-# bootstrap_extra_args = ""
-# kubelet_extra_args = ""
# }
#}
diff --git a/examples/eks_managed_node_group/main.tf b/examples/eks_managed_node_group/main.tf
index 4281d3bcdb..92358ee5a5 100644
--- a/examples/eks_managed_node_group/main.tf
+++ b/examples/eks_managed_node_group/main.tf
@@ -92,7 +92,7 @@ module "eks" {
# CONTAINER_RUNTIME = "containerd"
# USE_MAX_PODS = false
# }
- # kubelet_extra_args = "--max-pods=110"
+ # bootstrap_extra_args = "--kubelet-extra-args '--max-pods=110'"
# k8s_labels = {
# GithubRepo = "terraform-aws-eks"
# GithubOrg = "terraform-aws-modules"
diff --git a/main.tf b/main.tf
index 09b5264792..04e3b79d27 100644
--- a/main.tf
+++ b/main.tf
@@ -1,10 +1,3 @@
-locals {
- cluster_security_group_id = var.create_cluster_security_group ? join("", aws_security_group.this.*.id) : var.cluster_security_group_id
-
- # Worker groups
- policy_arn_prefix = "arn:${data.aws_partition.current.partition}:iam::aws:policy"
-}
-
data "aws_partition" "current" {}
################################################################################
@@ -20,7 +13,7 @@ resource "aws_eks_cluster" "this" {
enabled_cluster_log_types = var.cluster_enabled_log_types
vpc_config {
- security_group_ids = compact([local.cluster_security_group_id])
+ security_group_ids = var.create_cluster_security_group ? aws_security_group.this[0].id : var.cluster_security_group_id
subnet_ids = var.subnet_ids
endpoint_private_access = var.cluster_endpoint_private_access
endpoint_public_access = var.cluster_endpoint_public_access
@@ -192,6 +185,7 @@ resource "aws_iam_openid_connect_provider" "oidc_provider" {
locals {
cluster_iam_role_name = coalesce(var.cluster_iam_role_name, "${var.cluster_name}-cluster")
+ policy_arn_prefix = "arn:${data.aws_partition.current.partition}:iam::aws:policy"
}
resource "aws_iam_role" "cluster" {
diff --git a/modules/eks-managed-node-group/README.md b/modules/eks-managed-node-group/README.md
index b08613947e..729617344e 100644
--- a/modules/eks-managed-node-group/README.md
+++ b/modules/eks-managed-node-group/README.md
@@ -177,7 +177,6 @@ No modules.
| [instance\_types](#input\_instance\_types) | Set of instance types associated with the EKS Node Group. Defaults to `["t3.medium"]` | `list(string)` | `null` | no |
| [kernel\_id](#input\_kernel\_id) | The kernel ID | `string` | `null` | no |
| [key\_name](#input\_key\_name) | The key name that should be used for the instance | `string` | `null` | no |
-| [kubelet\_extra\_args](#input\_kubelet\_extra\_args) | Additional arguments passed to the --kubelet flag | `string` | `""` | no |
| [labels](#input\_labels) | Key-value map of Kubernetes labels. Only labels that are applied with the EKS API are managed by this argument. Other Kubernetes labels applied to the EKS Node Group will not be managed | `map(string)` | `null` | no |
| [launch\_template\_name](#input\_launch\_template\_name) | Launch template name - either to be created (`var.create_launch_template` = `true`) or existing (`var.create_launch_template` = `false`) | `string` | `null` | no |
| [launch\_template\_use\_name\_prefix](#input\_launch\_template\_use\_name\_prefix) | Determines whether to use `launch_template_name` as is or create a unique name beginning with the `launch_template_name` as the prefix | `bool` | `true` | no |
@@ -188,7 +187,6 @@ No modules.
| [min\_size](#input\_min\_size) | Minimum number of worker nodes | `number` | `0` | no |
| [name](#input\_name) | Name of the EKS Node Group | `string` | `null` | no |
| [network\_interfaces](#input\_network\_interfaces) | Customize network interfaces to be attached at instance boot time | `list(any)` | `[]` | no |
-| [node\_labels](#input\_node\_labels) | Key-value map of additional labels | `map(string)` | `{}` | no |
| [placement](#input\_placement) | The placement of the instance | `map(string)` | `null` | no |
| [post\_bootstrap\_user\_data](#input\_post\_bootstrap\_user\_data) | User data that is appended to the user data script after of the EKS bootstrap script. Only valid when using a custom EKS optimized AMI derivative | `string` | `""` | no |
| [pre\_bootstrap\_user\_data](#input\_pre\_bootstrap\_user\_data) | User data that is injected into the user data script ahead of the EKS bootstrap script | `string` | `""` | no |
@@ -200,7 +198,6 @@ No modules.
| [security\_group\_tags](#input\_security\_group\_tags) | A map of additional tags to add to the security group created | `map(string)` | `{}` | no |
| [security\_group\_use\_name\_prefix](#input\_security\_group\_use\_name\_prefix) | Determines whether the security group name (`security_group_name`) is used as a prefix | `string` | `true` | no |
| [subnet\_ids](#input\_subnet\_ids) | Identifiers of EC2 Subnets to associate with the EKS Node Group. These subnets must have the following resource tag: `kubernetes.io/cluster/CLUSTER_NAME` | `list(string)` | `null` | no |
-| [tag\_specifications](#input\_tag\_specifications) | The tags to apply to the resources during launch | `list(any)` | `[]` | no |
| [tags](#input\_tags) | A map of tags to add to all resources | `map(string)` | `{}` | no |
| [taints](#input\_taints) | The Kubernetes taints to be applied to the nodes in the node group. Maximum of 50 taints per node group | `map(any)` | `{}` | no |
| [timeouts](#input\_timeouts) | Create, update, and delete timeout configurations for the node group | `map(string)` | `{}` | no |
diff --git a/modules/eks-managed-node-group/main.tf b/modules/eks-managed-node-group/main.tf
index 39f0357e65..8ba6b55881 100644
--- a/modules/eks-managed-node-group/main.tf
+++ b/modules/eks-managed-node-group/main.tf
@@ -42,7 +42,6 @@ data "cloudinit_config" "eks_optimized_ami_user_data" {
# Optional
cluster_dns_ip = var.cluster_dns_ip
bootstrap_extra_args = var.bootstrap_extra_args
- kubelet_extra_args = var.kubelet_extra_args
post_bootstrap_user_data = var.post_bootstrap_user_data
}
diff --git a/modules/eks-managed-node-group/variables.tf b/modules/eks-managed-node-group/variables.tf
index 80fd169e72..9e0fbe1481 100644
--- a/modules/eks-managed-node-group/variables.tf
+++ b/modules/eks-managed-node-group/variables.tf
@@ -62,18 +62,6 @@ variable "bootstrap_extra_args" {
default = ""
}
-variable "kubelet_extra_args" {
- description = "Additional arguments passed to the --kubelet flag"
- type = string
- default = ""
-}
-
-variable "node_labels" {
- description = "Key-value map of additional labels"
- type = map(string)
- default = {}
-}
-
################################################################################
# Launch template
################################################################################
@@ -246,12 +234,6 @@ variable "placement" {
default = null
}
-variable "tag_specifications" {
- description = "The tags to apply to the resources during launch"
- type = list(any)
- default = []
-}
-
################################################################################
# EKS Managed Node Group
################################################################################
diff --git a/modules/fargate-profile/README.md b/modules/fargate-profile/README.md
index ee7a01de57..89b6dc40c2 100644
--- a/modules/fargate-profile/README.md
+++ b/modules/fargate-profile/README.md
@@ -50,7 +50,7 @@ No modules.
| Name | Description | Type | Default | Required |
|------|-------------|------|---------|:--------:|
-| [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster | `string` | `""` | no |
+| [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster | `string` | `null` | no |
| [create](#input\_create) | Controls if Fargate resources should be created (it affects all resources) | `bool` | `true` | no |
| [create\_iam\_role](#input\_create\_iam\_role) | Controls if the the IAM Role that provides permissions for the EKS Fargate Profile will be created | `bool` | `true` | no |
| [fargate\_profile\_name](#input\_fargate\_profile\_name) | Name of the EKS Fargate Profile | `string` | `null` | no |
@@ -63,7 +63,8 @@ No modules.
| [iam\_role\_use\_name\_prefix](#input\_iam\_role\_use\_name\_prefix) | Determines whether the IAM role name (`iam_role_name`) is used as a prefix | `string` | `true` | no |
| [selectors](#input\_selectors) | Configuration block(s) for selecting Kubernetes Pods to execute with this Fargate Profile | `map(string)` | `{}` | no |
| [subnet\_ids](#input\_subnet\_ids) | A list of subnet IDs for the EKS Fargate Profile | `list(string)` | `[]` | no |
-| [timeouts](#input\_timeouts) | Create, update, and delete timeout configurations for the Fargate Profile | `map(string)` | `{}` | no |
+| [tags](#input\_tags) | A map of tags to add to all resources | `map(string)` | `{}` | no |
+| [timeouts](#input\_timeouts) | Create and delete timeout configurations for the Fargate Profile | `map(string)` | `{}` | no |
## Outputs
diff --git a/modules/fargate-profile/main.tf b/modules/fargate-profile/main.tf
index 3750c44952..dfe5b4f34c 100644
--- a/modules/fargate-profile/main.tf
+++ b/modules/fargate-profile/main.tf
@@ -34,7 +34,7 @@ resource "aws_iam_role" "this" {
permissions_boundary = var.iam_role_permissions_boundary
force_detach_policies = true
- tags = var.tags
+ tags = merge(var.tags, var.iam_role_tags)
}
resource "aws_iam_role_policy_attachment" "this" {
diff --git a/modules/self-managed-node-group/README.md b/modules/self-managed-node-group/README.md
index d811b30a3b..740ed6603e 100644
--- a/modules/self-managed-node-group/README.md
+++ b/modules/self-managed-node-group/README.md
@@ -75,7 +75,6 @@ No modules.
| [health\_check\_type](#input\_health\_check\_type) | `EC2` or `ELB`. Controls how health checking is done | `string` | `null` | no |
| [hibernation\_options](#input\_hibernation\_options) | The hibernation options for the instance | `map(string)` | `null` | no |
| [iam\_instance\_profile\_arn](#input\_iam\_instance\_profile\_arn) | Amazon Resource Name (ARN) of an existing IAM instance profile that provides permissions for the node group | `string` | `null` | no |
-| [iam\_instance\_profile\_name](#input\_iam\_instance\_profile\_name) | The name attribute of the IAM instance profile to associate with launched instances | `string` | `null` | no |
| [iam\_role\_additional\_policies](#input\_iam\_role\_additional\_policies) | Additional policies to be added to the IAM role | `list(string)` | `[]` | no |
| [iam\_role\_attach\_cni\_policy](#input\_iam\_role\_attach\_cni\_policy) | Whether to attach the Amazon managed `AmazonEKS_CNI_Policy` IAM policy to the IAM IAM role. WARNING: If set `false` the permissions must be assigned to the `aws-node` DaemonSet pods via another method or nodes will not be able to join the cluster | `bool` | `true` | no |
| [iam\_role\_name](#input\_iam\_role\_name) | Name to use on IAM role created | `string` | `null` | no |
@@ -118,7 +117,6 @@ No modules.
| [service\_linked\_role\_arn](#input\_service\_linked\_role\_arn) | The ARN of the service-linked role that the ASG will use to call other AWS services | `string` | `null` | no |
| [subnet\_ids](#input\_subnet\_ids) | A list of subnet IDs to launch resources in. Subnets automatically determine which availability zones the group will reside. Conflicts with `availability_zones` | `list(string)` | `null` | no |
| [suspended\_processes](#input\_suspended\_processes) | A list of processes to suspend for the Auto Scaling Group. The allowed values are `Launch`, `Terminate`, `HealthCheck`, `ReplaceUnhealthy`, `AZRebalance`, `AlarmNotification`, `ScheduledActions`, `AddToLoadBalancer`. Note that if you suspend either the `Launch` or `Terminate` process types, it can prevent your Auto Scaling Group from functioning properly | `list(string)` | `null` | no |
-| [tag\_specifications](#input\_tag\_specifications) | The tags to apply to the resources during launch | `list(any)` | `[]` | no |
| [tags](#input\_tags) | A map of tags and values in the same format as other resources accept. This will be converted into the non-standard format that the aws\_autoscaling\_group requires. | `map(string)` | `{}` | no |
| [target\_group\_arns](#input\_target\_group\_arns) | A set of `aws_alb_target_group` ARNs, for use with Application or Network Load Balancing | `list(string)` | `[]` | no |
| [termination\_policies](#input\_termination\_policies) | A list of policies to decide how the instances in the Auto Scaling Group should be terminated. The allowed values are `OldestInstance`, `NewestInstance`, `OldestLaunchConfiguration`, `ClosestToNextInstanceHour`, `OldestLaunchTemplate`, `AllocationStrategy`, `Default` | `list(string)` | `null` | no |
diff --git a/modules/self-managed-node-group/variables.tf b/modules/self-managed-node-group/variables.tf
index e7e9c9c7f2..94777de1c4 100644
--- a/modules/self-managed-node-group/variables.tf
+++ b/modules/self-managed-node-group/variables.tf
@@ -347,24 +347,12 @@ variable "placement" {
default = null
}
-variable "tag_specifications" {
- description = "The tags to apply to the resources during launch"
- type = list(any)
- default = []
-}
-
variable "ebs_optimized" {
description = "If true, the launched EC2 instance will be EBS-optimized"
type = bool
default = null
}
-variable "iam_instance_profile_name" {
- description = "The name attribute of the IAM instance profile to associate with launched instances"
- type = string
- default = null
-}
-
variable "image_id" {
description = "The AMI from which to launch the instance"
type = string
diff --git a/templates/default.sh.tpl b/templates/default.sh.tpl
deleted file mode 100644
index f104be3786..0000000000
--- a/templates/default.sh.tpl
+++ /dev/null
@@ -1,56 +0,0 @@
-### Default user data
-MIME-Version: 1.0
-Content-Type: multipart/mixed; boundary="//"
-
---//
-Content-Type: text/x-shellscript; charset="us-ascii"
-#!/bin/bash
-set -ex
-B64_CLUSTER_CA=xxx
-API_SERVER_URL=xxx
-K8S_CLUSTER_DNS_IP=172.20.0.10
-/etc/eks/bootstrap.sh \
- --kubelet-extra-args '--node-labels=
- eks.amazonaws.com/nodegroup-image=ami-0caf35bc73450c396,
- eks.amazonaws.com/capacityType=ON_DEMAND,
- eks.amazonaws.com/nodegroup=default_node_group'
- --b64-cluster-ca $B64_CLUSTER_CA
- --apiserver-endpoint $API_SERVER_URL
- --dns-cluster-ip $K8S_CLUSTER_DNS_IP
-
---//--
-
-
-### Custom launch template with user added user data
-MIME-Version: 1.0
-Content-Type: multipart/mixed; boundary="//"
-
---//
-Content-Transfer-Encoding: 7bit
-Content-Type: text/x-shellscript
-Mime-Version: 1.0
-
-echo 'hello world!'
---//
-Content-Type: text/x-shellscript; charset="us-ascii"
-#!/bin/bash
-set -ex
-B64_CLUSTER_CA=xxx
-API_SERVER_URL=xxx
-K8S_CLUSTER_DNS_IP=172.20.0.10
-/etc/eks/bootstrap.sh --kubelet-extra-args '--node-labels=eks.amazonaws.com/sourceLaunchTemplateVersion=1,eks.amazonaws.com/nodegroup-image=ami-0caf35bc73450c396,eks.amazonaws.com/capacityType=ON_DEMAND,eks.amazonaws.com/nodegroup=create_launch_template,eks.amazonaws.com/sourceLaunchTemplateId=lt-003a9022005aa0062' --b64-cluster-ca $B64_CLUSTER_CA --apiserver-endpoint $API_SERVER_URL --dns-cluster-ip $K8S_CLUSTER_DNS_IP
-
-
---//--
-
-### Custom AMI - even when using EKS AMI
-Content-Type: multipart/mixed; boundary="//"
-MIME-Version: 1.0
-
---//
-Content-Transfer-Encoding: 7bit
-Content-Type: text/x-shellscript
-Mime-Version: 1.0
-
-echo 'hello world!'
---//--
diff --git a/templates/linux_user_data.sh.tpl b/templates/linux_user_data.sh.tpl
index 8d8c758121..38e336acd9 100644
--- a/templates/linux_user_data.sh.tpl
+++ b/templates/linux_user_data.sh.tpl
@@ -1,9 +1,6 @@
#!/bin/bash -ex
/etc/eks/bootstrap.sh ${cluster_name} ${bootstrap_extra_args} \
-%{ if length(kubelet_extra_args) > 0 ~}
- --kubelet-extra-args '${kubelet_extra_args}' \
-%{ endif ~}
%{ if length(cluster_dns_ip) > 0 ~}
--dns-cluster-ip ${cluster_dns_ip} \
%{ endif ~}
diff --git a/templates/windows_user_data.tpl b/templates/windows_user_data.tpl
index e8856838f1..8112309235 100644
--- a/templates/windows_user_data.tpl
+++ b/templates/windows_user_data.tpl
@@ -1,11 +1,11 @@
-${pre_userdata}
+${pre_bootstrap_user_data}
[string]$EKSBinDir = "$env:ProgramFiles\Amazon\EKS"
[string]$EKSBootstrapScriptName = 'Start-EKSBootstrap.ps1'
[string]$EKSBootstrapScriptFile = "$EKSBinDir\$EKSBootstrapScriptName"
-& $EKSBootstrapScriptFile -EKSClusterName ${cluster_name} -KubeletExtraArgs "${kubelet_extra_args}" 3>&1 4>&1 5>&1 6>&1
+& $EKSBootstrapScriptFile -EKSClusterName ${cluster_name} ${bootstrap_extra_args} 3>&1 4>&1 5>&1 6>&1
$LastError = if ($?) { 0 } else { $Error[0].Exception.HResult }
-${additional_userdata}
+${post_bootstrap_user_data}
diff --git a/tools/semtag b/tools/semtag
deleted file mode 100755
index 568d4241ad..0000000000
--- a/tools/semtag
+++ /dev/null
@@ -1,627 +0,0 @@
-#!/usr/bin/env bash
-#
-# Thanks to @pnikosis for this script https://github.com/pnikosis/semtag
-#
-PROG=semtag
-PROG_VERSION="v0.1.0"
-
-SEMVER_REGEX="^v?(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)(\-[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*)?(\+[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*)?$"
-IDENTIFIER_REGEX="^\-([0-9A-Za-z-]+)\.([0-9A-Za-z-]+)*$"
-
-# Global variables
-FIRST_VERSION="v0.0.0"
-finalversion=$FIRST_VERSION
-lastversion=$FIRST_VERSION
-hasversiontag="false"
-scope="patch"
-displayonly="false"
-forcetag="false"
-forcedversion=
-versionname=
-identifier=
-
-HELP="\
-Usage:
- $PROG
- $PROG getlast
- $PROG getfinal
- $PROG (final|alpha|beta|candidate) [-s (major|minor|patch|auto) | -o]
- $PROG --help
- $PROG --version
-Options:
- -s The scope that must be increased, can be major, minor or patch.
- The resulting version will match X.Y.Z(-PRERELEASE)(+BUILD)
- where X, Y and Z are positive integers, PRERELEASE is an optionnal
- string composed of alphanumeric characters describing if the build is
- a release candidate, alpha or beta version, with a number.
- BUILD is also an optional string composed of alphanumeric
- characters and hyphens.
- Setting the scope as 'auto', the script will chose the scope between
- 'minor' and 'patch', depending on the amount of lines added (<10% will
- choose patch).
- -v Specifies manually the version to be tagged, must be a valid semantic version
- in the format X.Y.Z where X, Y and Z are positive integers.
- -o Output the version only, shows the bumped version, but doesn't tag.
- -f Forces to tag, even if there are unstaged or uncommited changes.
-Commands:
- --help Print this help message.
- --version Prints the program's version.
- get Returns both current final version and last tagged version.
- getlast Returns the latest tagged version.
- getfinal Returns the latest tagged final version.
- getcurrent Returns the current version, based on the latest one, if there are uncommited or
- unstaged changes, they will be reflected in the version, adding the number of
- pending commits, current branch and commit hash.
- final Tags the current build as a final version, this only can be done on the master branch.
- candidate Tags the current build as a release candidate, the tag will contain all
- the commits from the last final version.
- alpha Tags the current build as an alpha version, the tag will contain all
- the commits from the last final version.
- beta Tags the current build as a beta version, the tag will contain all
- the commits from the last final version."
-
-# Commands and options
-ACTION="getlast"
-ACTION="$1"
-shift
-
-# We get the parameters
-while getopts "v:s:of" opt; do
- case $opt in
- v)
- forcedversion="$OPTARG"
- ;;
- s)
- scope="$OPTARG"
- ;;
- o)
- displayonly="true"
- ;;
- f)
- forcetag="true"
- ;;
- \?)
- echo "Invalid option: -$OPTARG" >&2
- exit 1
- ;;
- :)
- echo "Option -$OPTARG requires an argument." >&2
- exit 1
- ;;
- esac
-done
-
-# Gets a string with the version and returns an array of maximum size of 5 with all the parts of the sematinc version
-# $1 The string containing the version in semantic format
-# $2 The variable to store the result array:
-# position 0: major number
-# position 1: minor number
-# position 2: patch number
-# position 3: identifier (or prerelease identifier)
-# position 4: build info
-function explode_version {
- local __version=$1
- local __result=$2
- if [[ $__version =~ $SEMVER_REGEX ]] ; then
- local __major=${BASH_REMATCH[1]}
- local __minor=${BASH_REMATCH[2]}
- local __patch=${BASH_REMATCH[3]}
- local __prere=${BASH_REMATCH[4]}
- local __build=${BASH_REMATCH[5]}
- eval "$__result=(\"$__major\" \"$__minor\" \"$__patch\" \"$__prere\" \"$__build\")"
- else
- eval "$__result="
- fi
-}
-
-# Compare two versions and returns -1, 0 or 1
-# $1 The first version to compare
-# $2 The second version to compare
-# $3 The variable where to store the result
-function compare_versions {
- local __first
- local __second
- explode_version $1 __first
- explode_version $2 __second
- local lv=$3
-
- # Compares MAJOR, MINOR and PATCH
- for i in 0 1 2; do
- local __numberfirst=${__first[$i]}
- local __numbersecond=${__second[$i]}
- case $(($__numberfirst - $__numbersecond)) in
- 0)
- ;;
- -[0-9]*)
- eval "$lv=-1"
- return 0
- ;;
- [0-9]*)
- eval "$lv=1"
- return 0
- ;;
- esac
- done
-
- # Identifiers should compare with the ASCII order.
- local __identifierfirst=${__first[3]}
- local __identifiersecond=${__second[3]}
- if [[ -n "$__identifierfirst" ]] && [[ -n "$__identifiersecond" ]]; then
- if [[ "$__identifierfirst" > "$__identifiersecond" ]]; then
- eval "$lv=1"
- return 0
- elif [[ "$__identifierfirst" < "$__identifiersecond" ]]; then
- eval "$lv=-1"
- return 0
- fi
- elif [[ -z "$__identifierfirst" ]] && [[ -n "$__identifiersecond" ]]; then
- eval "$lv=1"
- return 0
- elif [[ -n "$__identifierfirst" ]] && [[ -z "$__identifiersecond" ]]; then
- eval "$lv=-1"
- return 0
- fi
-
- eval "$lv=0"
-}
-
-# Returns the last version of two
-# $1 The first version to compare
-# $2 The second version to compare
-# $3 The variable where to store the last one
-function get_latest_of_two {
- local __first=$1
- local __second=$2
- local __result
- local __latest=$3
- compare_versions $__first $__second __result
- case $__result in
- 0)
- eval "$__latest=$__second"
- ;;
- -1)
- eval "$__latest=$__second"
- ;;
- 1)
- eval "$__latest=$__first"
- ;;
- esac
-}
-
-# Assigns a 2 size array with the identifier, having the identifier at pos 0, and the number in pos 1
-# $1 The identifier in the format -id.#
-# $2 The vferiable where to store the 2 size array
-function explode_identifier {
- local __identifier=$1
- local __result=$2
- if [[ $__identifier =~ $IDENTIFIER_REGEX ]] ; then
- local __id=${BASH_REMATCH[1]}
- local __number=${BASH_REMATCH[2]}
- if [[ -z "$__number" ]]; then
- __number=1
- fi
- eval "$__result=(\"$__id\" \"$__number\")"
- else
- eval "$__result="
- fi
-}
-
-# Gets a list of tags and assigns the base and latest versions
-# Receives an array with the tags containing the versions
-# Assigns to the global variables finalversion and lastversion the final version and the latest version
-function get_latest {
- local __taglist=("$@")
- local __tagsnumber=${#__taglist[@]}
- local __current
- case $__tagsnumber in
- 0)
- finalversion=$FIRST_VERSION
- lastversion=$FIRST_VERSION
- ;;
- 1)
- __current=${__taglist[0]}
- explode_version $__current ver
- if [ -n "$ver" ]; then
- if [ -n "${ver[3]}" ]; then
- finalversion=$FIRST_VERSION
- else
- finalversion=$__current
- fi
- lastversion=$__current
- else
- finalversion=$FIRST_VERSION
- lastversion=$FIRST_VERSION
- fi
- ;;
- *)
- local __lastpos=$(($__tagsnumber-1))
- for i in $(seq 0 $__lastpos)
- do
- __current=${__taglist[i]}
- explode_version ${__taglist[i]} ver
- if [ -n "$ver" ]; then
- if [ -z "${ver[3]}" ]; then
- get_latest_of_two $finalversion $__current finalversion
- get_latest_of_two $lastversion $finalversion lastversion
- else
- get_latest_of_two $lastversion $__current lastversion
- fi
- fi
- done
- ;;
- esac
-
- if git rev-parse -q --verify "refs/tags/$lastversion" >/dev/null; then
- hasversiontag="true"
- else
- hasversiontag="false"
- fi
-}
-
-# Gets the next version given the provided scope
-# $1 The version that is going to be bumped
-# $2 The scope to bump
-# $3 The variable where to stoer the result
-function get_next_version {
- local __exploded
- local __fromversion=$1
- local __scope=$2
- local __result=$3
- explode_version $__fromversion __exploded
- case $__scope in
- major)
- __exploded[0]=$((${__exploded[0]}+1))
- __exploded[1]=0
- __exploded[2]=0
- ;;
- minor)
- __exploded[1]=$((${__exploded[1]}+1))
- __exploded[2]=0
- ;;
- patch)
- __exploded[2]=$((${__exploded[2]}+1))
- ;;
- esac
-
- eval "$__result=v${__exploded[0]}.${__exploded[1]}.${__exploded[2]}"
-}
-
-function bump_version {
- ## First we try to get the next version based on the existing last one
- if [ "$scope" == "auto" ]; then
- get_scope_auto scope
- fi
-
- local __candidatefromlast=$FIRST_VERSION
- local __explodedlast
- explode_version $lastversion __explodedlast
- if [[ -n "${__explodedlast[3]}" ]]; then
- # Last version is not final
- local __idlast
- explode_identifier ${__explodedlast[3]} __idlast
-
- # We get the last, given the desired id based on the scope
- __candidatefromlast="v${__explodedlast[0]}.${__explodedlast[1]}.${__explodedlast[2]}"
- if [[ -n "$identifier" ]]; then
- local __nextid="$identifier.1"
- if [ "$identifier" == "${__idlast[0]}" ]; then
- # We target the same identifier as the last so we increase one
- __nextid="$identifier.$(( ${__idlast[1]}+1 ))"
- __candidatefromlast="$__candidatefromlast-$__nextid"
- else
- # Different identifiers, we make sure we are assigning a higher identifier, if not, we increase the version
- __candidatefromlast="$__candidatefromlast-$__nextid"
- local __comparedwithlast
- compare_versions $__candidatefromlast $lastversion __comparedwithlast
- if [ "$__comparedwithlast" == -1 ]; then
- get_next_version $__candidatefromlast $scope __candidatefromlast
- __candidatefromlast="$__candidatefromlast-$__nextid"
- fi
- fi
- fi
- fi
-
- # Then we try to get the version based on the latest final one
- local __candidatefromfinal=$FIRST_VERSION
- get_next_version $finalversion $scope __candidatefromfinal
- if [[ -n "$identifier" ]]; then
- __candidatefromfinal="$__candidatefromfinal-$identifier.1"
- fi
-
- # Finally we compare both candidates
- local __resultversion
- local __result
- compare_versions $__candidatefromlast $__candidatefromfinal __result
- case $__result in
- 0)
- __resultversion=$__candidatefromlast
- ;;
- -1)
- __resultversion="$__candidatefromfinal"
- ;;
- 1)
- __resultversion=$__candidatefromlast
- ;;
- esac
-
- eval "$1=$__resultversion"
-}
-
-function increase_version {
- local __version=
-
- if [ -z $forcedversion ]; then
- bump_version __version
- else
- if [[ $forcedversion =~ $SEMVER_REGEX ]] ; then
- compare_versions $forcedversion $lastversion __result
- if [ $__result -le 0 ]; then
- echo "Version can't be lower than last version: $lastversion"
- exit 1
- fi
- else
- echo "Non valid version to bump"
- exit 1
- fi
- __version=$forcedversion
- fi
-
- if [ "$displayonly" == "true" ]; then
- echo "$__version"
- else
- if [ "$forcetag" == "false" ]; then
- check_git_dirty_status
- fi
- local __commitlist
- if [ "$finalversion" == "$FIRST_VERSION" ] || [ "$hasversiontag" != "true" ]; then
- __commitlist="$(git log --pretty=oneline | cat)"
- else
- __commitlist="$(git log --pretty=oneline $finalversion... | cat)"
- fi
-
- # If we are forcing a bump, we add bump to the commit list
- if [[ -z $__commitlist && "$forcetag" == "true" ]]; then
- __commitlist="bump"
- fi
-
- if [[ -z $__commitlist ]]; then
- echo "No commits since the last final version, not bumping version"
- else
- if [[ -z $versionname ]]; then
- versionname=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
- fi
- local __message="$versionname
-$__commitlist"
-
- # We check we have info on the user
- local __username=$(git config user.name)
- if [ -z "$__username" ]; then
- __username=$(id -u -n)
- git config user.name $__username
- fi
- local __useremail=$(git config user.email)
- if [ -z "$__useremail" ]; then
- __useremail=$(hostname)
- git config user.email "$__username@$__useremail"
- fi
-
- git tag -a $__version -m "$__message"
-
- # If we have a remote, we push there
- local __remotes=$(git remote)
- if [[ -n $__remotes ]]; then
- for __remote in $__remotes; do
- git push $__remote $__version > /dev/null
- if [ $? -eq 0 ]; then
- echo "$__version pushed to $__remote"
- else
- echo "Error pushing the tag $__version to $__remote"
- exit 1
- fi
- done
- else
- echo "$__version"
- fi
- fi
- fi
-}
-
-function check_git_dirty_status {
- local __repostatus=
- get_work_tree_status __repostatus
-
- if [ "$__repostatus" == "uncommitted" ]; then
- echo "ERROR: You have uncommitted changes"
- git status --porcelain
- exit 1
- fi
-
- if [ "$__repostatus" == "unstaged" ]; then
- echo "ERROR: You have unstaged changes"
- git status --porcelain
- exit 1
- fi
-}
-
-# Get the total amount of lines of code in the repo
-function get_total_lines {
- local __empty_id="$(git hash-object -t tree /dev/null)"
- local __changes="$(git diff --numstat $__empty_id | cat)"
- local __added_deleted=$1
- get_changed_lines "$__changes" $__added_deleted
-}
-
-# Get the total amount of lines of code since the provided tag
-function get_sincetag_lines {
- local __sincetag=$1
- local __changes="$(git diff --numstat $__sincetag | cat)"
- local __added_deleted=$2
- get_changed_lines "$__changes" $__added_deleted
-}
-
-function get_changed_lines {
- local __changes_numstat=$1
- local __result=$2
- IFS=$'\n' read -rd '' -a __changes_array <<<"$__changes_numstat"
- local __diff_regex="^([0-9]+)[[:space:]]+([0-9]+)[[:space:]]+.+$"
-
- local __total_added=0
- local __total_deleted=0
- for i in "${__changes_array[@]}"
- do
- if [[ $i =~ $__diff_regex ]] ; then
- local __added=${BASH_REMATCH[1]}
- local __deleted=${BASH_REMATCH[2]}
- __total_added=$(( $__total_added+$__added ))
- __total_deleted=$(( $__total_deleted+$__deleted ))
- fi
- done
- eval "$2=( $__total_added $__total_deleted )"
-}
-
-function get_scope_auto {
- local __verbose=$2
- local __total=0
- local __since=0
- local __scope=
-
- get_total_lines __total
- get_sincetag_lines $finalversion __since
-
- local __percentage=0
- if [ "$__total" != "0" ]; then
- local __percentage=$(( 100*$__since/$__total ))
- if [ $__percentage -gt "10" ]; then
- __scope="minor"
- else
- __scope="patch"
- fi
- fi
-
- eval "$1=$__scope"
- if [[ -n "$__verbose" ]]; then
- echo "[Auto Scope] Percentage of lines changed: $__percentage"
- echo "[Auto Scope] : $__scope"
- fi
-}
-
-function get_work_tree_status {
- # Update the index
- git update-index -q --ignore-submodules --refresh > /dev/null
- eval "$1="
-
- if ! git diff-files --quiet --ignore-submodules -- > /dev/null
- then
- eval "$1=unstaged"
- fi
-
- if ! git diff-index --cached --quiet HEAD --ignore-submodules -- > /dev/null
- then
- eval "$1=uncommitted"
- fi
-}
-
-function get_current {
- if [ "$hasversiontag" == "true" ]; then
- local __commitcount="$(git rev-list $lastversion.. --count)"
- else
- local __commitcount="$(git rev-list --count HEAD)"
- fi
- local __status=
- get_work_tree_status __status
-
- if [ "$__commitcount" == "0" ] && [ -z "$__status" ]; then
- eval "$1=$lastversion"
- else
- local __buildinfo="$(git rev-parse --short HEAD)"
- local __currentbranch="$(git rev-parse --abbrev-ref HEAD)"
- if [ "$__currentbranch" != "master" ]; then
- __buildinfo="$__currentbranch.$__buildinfo"
- fi
-
- local __suffix=
- if [ "$__commitcount" != "0" ]; then
- if [ -n "$__suffix" ]; then
- __suffix="$__suffix."
- fi
- __suffix="$__suffix$__commitcount"
- fi
- if [ -n "$__status" ]; then
- if [ -n "$__suffix" ]; then
- __suffix="$__suffix."
- fi
- __suffix="$__suffix$__status"
- fi
-
- __suffix="$__suffix+$__buildinfo"
- if [ "$lastversion" == "$finalversion" ]; then
- scope="patch"
- identifier=
- local __bumped=
- bump_version __bumped
- eval "$1=$__bumped-dev.$__suffix"
- else
- eval "$1=$lastversion.$__suffix"
- fi
- fi
-}
-
-function init {
- git fetch > /dev/null
- TAGS="$(git tag)"
- IFS=$'\n' read -rd '' -a TAG_ARRAY <<<"$TAGS"
-
- get_latest ${TAG_ARRAY[@]}
- currentbranch="$(git rev-parse --abbrev-ref HEAD)"
-}
-
-case $ACTION in
- --help)
- echo -e "$HELP"
- ;;
- --version)
- echo -e "${PROG}: $PROG_VERSION"
- ;;
- final)
- init
- diff=$(git diff master | cat)
- if [ "$forcetag" == "false" ]; then
- if [ -n "$diff" ]; then
- echo "ERROR: Branch must be updated with master for final versions"
- exit 1
- fi
- fi
- increase_version
- ;;
- alpha|beta)
- init
- identifier="$ACTION"
- increase_version
- ;;
- candidate)
- init
- identifier="rc"
- increase_version
- ;;
- getlast)
- init
- echo "$lastversion"
- ;;
- getfinal)
- init
- echo "$finalversion"
- ;;
- getcurrent)
- init
- get_current current
- echo "$current"
- ;;
- get)
- init
- echo "Current final version: $finalversion"
- echo "Last tagged version: $lastversion"
- ;;
- *)
- echo "'$ACTION' is not a valid command, see --help for available commands."
- ;;
-esac
diff --git a/workers.tf b/workers.tf
index 8cd02d6350..2ef75569cf 100644
--- a/workers.tf
+++ b/workers.tf
@@ -73,8 +73,6 @@ module "eks_managed_node_group" {
pre_bootstrap_user_data = try(each.value.pre_bootstrap_user_data, "")
post_bootstrap_user_data = try(each.value.post_bootstrap_user_data, "")
bootstrap_extra_args = try(each.value.bootstrap_extra_args, "")
- kubelet_extra_args = try(each.value.kubelet_extra_args, "")
- node_labels = try(each.value.node_labels, {})
# Launch Template
create_launch_template = try(each.value.create_launch_template, false)
@@ -211,7 +209,6 @@ module "self_managed_node_group" {
enable_monitoring = try(each.value.enable_monitoring, null)
network_interfaces = try(each.value.network_interfaces, [])
placement = try(each.value.placement, null)
- tag_specifications = try(each.value.tag_specifications, [])
# IAM role
create_iam_instance_profile = try(each.value.create_iam_instance_profile, true)
From 3b86430092a73e14eabdc027ddcde3239bd324d8 Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Fri, 12 Nov 2021 19:40:46 -0500
Subject: [PATCH 25/83] chore: updating documentation
---
{docs => .github}/CHANGELOG.pre-v11.0.0.md | 0
{docs => .github}/UPGRADE-17.0.md | 0
README.md | 582 +++++++++++++++------
docs/autoscaling.md | 98 ----
docs/enable-docker-bridge-network.md | 23 -
docs/faq.md | 173 ------
docs/spot-instances.md | 73 ---
modules/eks-managed-node-group/README.md | 72 +--
modules/fargate-profile/README.md | 16 +-
modules/self-managed-node-group/README.md | 12 +
workers.tf => node_groups.tf | 0
11 files changed, 468 insertions(+), 581 deletions(-)
rename {docs => .github}/CHANGELOG.pre-v11.0.0.md (100%)
rename {docs => .github}/UPGRADE-17.0.md (100%)
delete mode 100644 docs/autoscaling.md
delete mode 100644 docs/enable-docker-bridge-network.md
delete mode 100644 docs/faq.md
delete mode 100644 docs/spot-instances.md
rename workers.tf => node_groups.tf (100%)
diff --git a/docs/CHANGELOG.pre-v11.0.0.md b/.github/CHANGELOG.pre-v11.0.0.md
similarity index 100%
rename from docs/CHANGELOG.pre-v11.0.0.md
rename to .github/CHANGELOG.pre-v11.0.0.md
diff --git a/docs/UPGRADE-17.0.md b/.github/UPGRADE-17.0.md
similarity index 100%
rename from docs/UPGRADE-17.0.md
rename to .github/UPGRADE-17.0.md
diff --git a/README.md b/README.md
index 5c76f96e97..51b1635609 100644
--- a/README.md
+++ b/README.md
@@ -1,219 +1,501 @@
# AWS EKS Terraform module
-[![Lint Status](https://github.com/terraform-aws-modules/terraform-aws-eks/workflows/Lint/badge.svg)](https://github.com/terraform-aws-modules/terraform-aws-eks/actions)
-[![LICENSE](https://img.shields.io/github/license/terraform-aws-modules/terraform-aws-eks)](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/LICENSE)
+Terraform module which creates AWS EKS (Kubernetes) resources
-Terraform module which creates Kubernetes cluster resources on AWS EKS.
+## Available Features
-## Features
-
-- Create an EKS cluster
-- All node types are supported:
- - [Managed Node Groups](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html)
- - [Self-managed Nodes](https://docs.aws.amazon.com/eks/latest/userguide/worker.html)
- - [Fargate](https://docs.aws.amazon.com/eks/latest/userguide/fargate.html)
-- Support AWS EKS Optimized or Custom AMI
+- EKS cluster
+- All [node types](https://docs.aws.amazon.com/eks/latest/userguide/eks-compute.html) are supported:
+ - [EKS Managed Node Group](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html)
+ - [Self Managed Node Group](https://docs.aws.amazon.com/eks/latest/userguide/worker.html)
+ - [Fargate Profile](https://docs.aws.amazon.com/eks/latest/userguide/fargate.html)
+- Support for custom AMI, custom launch template, and custom user data
- Create or manage security groups that allow communication and coordination
-## Important note
+## Usage
-Kubernetes is evolving a lot, and each minor version includes new features, fixes, or changes.
+```hcl
+module "eks" {
+ source = "terraform-aws-modules/eks/aws"
-**Always check [Kubernetes Release Notes](https://kubernetes.io/docs/setup/release/notes/) before updating the major version, and [CHANGELOG.md](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/CHANGELOG.md) for all changes in this EKS module.**
+ cluster_version = "1.22"
+ cluster_name = "my-cluster"
-You also need to ensure that your applications and add ons are updated, or workloads could fail after the upgrade is complete. For action, you may need to take before upgrading, see the steps in the [EKS documentation](https://docs.aws.amazon.com/eks/latest/userguide/update-cluster.html).
+ vpc_id = "vpc-1234556abcdef"
+ subnet_ids = ["subnet-abcde012", "subnet-bcde012a", "subnet-fghi345a"]
+ cluster_endpoint_private_access = true
+ cluster_endpoint_public_access = true
-## Usage example
+ eks_managed_node_groups = {
+ default = {}
+ }
-```hcl
-data "aws_eks_cluster" "eks" {
- name = module.eks.cluster_id
-}
+ self_managed_node_groups = {
+ one = {
+ instance_type = "m5.large"
+ desired_capacity = 1
+ max_size = 5
+ }
+ }
-data "aws_eks_cluster_auth" "eks" {
- name = module.eks.cluster_id
+ tags = {
+ Environment = "dev"
+ Terraform = "true"
+ }
}
+```
-provider "kubernetes" {
- host = data.aws_eks_cluster.eks.endpoint
- cluster_ca_certificate = base64decode(data.aws_eks_cluster.eks.certificate_authority[0].data)
- token = data.aws_eks_cluster_auth.eks.token
-}
+## Notes
+
+- Kubernetes is constantly evolving, and each version brings new features, fixes, and changes. Always check [Kubernetes Release Notes](https://kubernetes.io/docs/setup/release/notes/) before updating the major version, and [CHANGELOG.md](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/CHANGELOG.md) for all changes related to this EKS module. Applications and add-ons will most likely require updates or workloads could fail after an upgrade. Check [the documentation](https://docs.aws.amazon.com/eks/latest/userguide/update-cluster.html) for any necessary steps you may need to perform.
+
+- Setting `instance_refresh_enabled = true` will recreate your worker nodes without draining them first. It is recommended to install [aws-node-termination-handler](https://github.com/aws/aws-node-termination-handler) for proper node draining. See the [instance_refresh](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/instance_refresh) example provided.
+
+Frequently Asked Questions
+
+
Why are nodes not being registered?
+
+Often an issue caused by a networking or endpoint mis-configuration. At least one of the cluster public or private endpoints must be enabled to access the cluster to work. If you require a public endpoint, setting up both (public and private) and restricting the public endpoint via setting `cluster_endpoint_public_access_cidrs` is recommended. More about communication with an endpoint is available [here](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html).
+
+Nodes need to be able to contact the EKS cluster endpoint. By default, the module only creates a public endpoint. To access endpoint, the nodes need outgoing internet access:
+
+- Nodes in private subnets: via a NAT gateway or instance. It will need adding along with appropriate routing rules.
+- Nodes in public subnets: assign public IPs to nodes. Set `public_ip = true` in the `worker_groups` list on this module.
+
+Important: If you apply only the public endpoint and setup `cluster_endpoint_public_access_cidrs` to restrict access. Remember, EKS nodes also use the public endpoint, so you must allow access to the endpoint. If not, then your nodes will fail to work correctly.
+
+Cluster private endpoint can also be enabled by setting `cluster_endpoint_private_access = true` on this module. Node communication to the endpoint stay within the VPC. When the private endpoint is enabled ensure that VPC DNS resolution and hostnames are also enabled:
+
+- If managing the VPC with Terraform: set `enable_dns_hostnames = true` and `enable_dns_support = true` on the `aws_vpc` resource. The [`terraform-aws-module/vpc/aws`](https://github.com/terraform-aws-modules/terraform-aws-vpc/) community module also has these variables.
+- Otherwise refer to the [AWS VPC docs](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-dns.html#vpc-dns-updating) and [AWS EKS Cluster Endpoint Access docs](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html) for more information.
+
+Nodes need to be able to connect to other AWS services plus pull down container images from container registries (ECR). If for some reason you cannot enable public internet access for nodes you can add VPC endpoints to the relevant services: EC2 API, ECR API, ECR DKR and S3.
+
+
How can I work with the cluster if I disable the public endpoint?
+
+You have to interact with the cluster from within the VPC that it is associated with; either through a VPN connection, a bastion EC2 instance, etc.
+
+
How can I stop Terraform from removing the EKS tags from my VPC and subnets?
+
+You need to add the tags to the VPC and subnets yourself. See the [basic example](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/basic).
+
+An alternative is to use the aws provider's [`ignore_tags` variable](https://www.terraform.io/docs/providers/aws/#ignore_tags-configuration-block). However this can also cause terraform to display a perpetual difference.
+
+
Why are there no changes when a node group's desired count is modified?
+
+The module is configured to ignore this value. Unfortunately, Terraform does not support variables within the `lifecycle` block. The setting is ignored to allow the cluster autoscaler to work correctly so that `terraform apply` does not accidentally remove running workers. You can change the desired count via the CLI or console if you're not using the cluster autoscaler.
+
+If you are not using autoscaling and want to control the number of nodes via terraform, set the `min_capacity` and `max_capacity` for node groups. Before changing those values, you must satisfy AWS `desired_capacity` constraints (which must be between new min/max values).
+
+
Why are nodes not recreated when the `launch_template` is recreated?
+
+By default the ASG is not configured to be recreated when the launch configuration or template changes; you will need to use a process to drain and cycle the nodes.
+
+If you are NOT using the cluster autoscaler:
+
+- Add a new instance
+- Drain an old node `kubectl drain --force --ignore-daemonsets --delete-local-data ip-xxxxxxx.eu-west-1.compute.internal`
+- Wait for pods to be Running
+- Terminate the old node instance. ASG will start a new instance
+- Repeat the drain and delete process until all old nodes are replaced
+
+If you are using the cluster autoscaler:
+
+- Drain an old node `kubectl drain --force --ignore-daemonsets --delete-local-data ip-xxxxxxx.eu-west-1.compute.internal`
+- Wait for pods to be Running
+- Cluster autoscaler will create new nodes when required
+- Repeat until all old nodes are drained
+- Cluster autoscaler will terminate the old nodes after 10-60 minutes automatically
+
+You can also use a 3rd party tool like Gruntwork's kubergrunt. See the [`eks deploy`](https://github.com/gruntwork-io/kubergrunt#deploy) subcommand.
+
+
How can I use Windows workers?
+
+To enable Windows support for your EKS cluster, you should apply some configs manually. See the [Enabling Windows Support (Windows/MacOS/Linux)](https://docs.aws.amazon.com/eks/latest/userguide/windows-support.html#enable-windows-support).
+
+Windows nodes requires additional cluster role (`eks:kube-proxy-windows`).
+
+
Example configuration
+
+Amazon EKS clusters must contain one or more Linux worker nodes to run core system pods that only run on Linux, such as coredns and the VPC resource controller.
+
+1. Build AWS EKS cluster with the next workers configuration (default Linux):
+
+```hcl
+ worker_groups = {
+ one = {
+ name = "worker-group-linux"
+ instance_type = "m5.large"
+ platform = "linux"
+ asg_desired_capacity = 2
+ },
+ }
+```
+
+2. Apply commands from https://docs.aws.amazon.com/eks/latest/userguide/windows-support.html#enable-windows-support (use tab with name `Windows`)
+3. Add one more worker group for Windows with required field `platform = "windows"` and update your cluster. Worker group example:
+
+```hcl
+ worker_groups = {
+ linux = {
+ name = "worker-group-linux"
+ instance_type = "m5.large"
+ platform = "linux"
+ asg_desired_capacity = 2
+ },
+ windows = {
+ name = "worker-group-windows"
+ instance_type = "m5.large"
+ platform = "windows"
+ asg_desired_capacity = 1
+ },
+ }
+```
+
+4. With `kubectl get nodes` you can see cluster with mixed (Linux/Windows) nodes support.
+
+
Worker nodes with labels do not join a 1.16+ cluster
+
+Kubelet restricts the allowed list of labels in the `kubernetes.io` namespace that can be applied to nodes starting in 1.16. Older configurations used labels such as `kubernetes.io/lifecycle=spot` which is no longer allowed; instead, use `node.kubernetes.io/lifecycle=spot`
+
+Reference the `--node-labels` argument for your version of Kubenetes for the allowed prefixes. [Documentation for 1.16](https://v1-16.docs.kubernetes.io/docs/reference/command-line-tools-reference/kubelet/)
+
+
I am using both EKS Managed node groups and Self Managed node groups and pods scheduled on an EKS Managed node group are unable resolve DNS (even communication between pods)
+
+This happen because CoreDNS can be scheduled on Self Managed nodes and by default this module does not create security group rules to allow communication between pods scheduled on Self Managed node groups and EKS Managed node groups.
+
+You can set `var.worker_create_cluster_primary_security_group_rules` to `true` to create required rules.
+
Dedicated control plane subnets
+
+[AWS recommends](https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html) to create dedicated subnets for EKS created network interfaces (control plane) which this module supports. To enable this:
+1. Set the `subnet_ids` to the subnets for the control plane
+2. Within the `eks_managed_node_groups`, `self_managed_node_groups`, or `fargate_profiles`, set the `subnet_ids` for the nodes (different from the control plane).
+
+```hcl
module "eks" {
- source = "terraform-aws-modules/eks/aws"
+ source = "terraform-aws-modules/eks/aws"
cluster_version = "1.21"
cluster_name = "my-cluster"
- vpc_id = "vpc-1234556abcdef"
- subnet_ids = ["subnet-abcde012", "subnet-bcde012a", "subnet-fghi345a"]
- worker_groups = {
+ vpc_id = "vpc-1234556abcdef"
+ subnet_ids = ["subnet-abcde123", "subnet-abcde456", "subnet-abcde789"]
+
+ self_managed_node_group_defaults = {
+ subnet_ids = ["subnet-xyz123", "subnet-xyz456", "subnet-xyz789"]
+ }
+
+ self_managed_node_groups = {
one = {
instance_type = "m4.large"
asg_max_size = 5
+ },
+ two = {
+ name = "worker-group-2"
+ subnet_ids = ["subnet-qwer123"]
+ instance_type = "t3.medium"
+ asg_desired_capacity = 1
+ public_ip = true
+ ebs_optimized = true
}
}
+
+ tags = {
+ Environment = "dev"
+ Terraform = "true"
+ }
}
```
+
-There is also a [complete example](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/complete) which shows large set of features available in the module.
+Autoscaling
-## Submodules
+To enable worker node autoscaling you will need to do a few things:
-Root module calls these modules which can also be used separately to create independent resources:
+- Add the [required tags](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler/cloudprovider/aws#auto-discovery-setup) to the worker group
+- Install the cluster-autoscaler
+- Give the cluster-autoscaler access via an IAM policy
-- [fargate](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/modules/fargate) - creates Fargate profiles, see [examples/fargate](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/fargate) for detailed examples.
-
+It's probably easiest to follow the example in [examples/irsa](examples/irsa), this will install the cluster-autoscaler using [Helm](https://helm.sh/) and use IRSA to attach a policy.
-## Notes
+If you don't want to use IRSA then you will need to attach the IAM policy to the worker node IAM role or add AWS credentials to the cluster-autoscaler environment variables. Here is some example terraform code for the policy:
-- By default, this module manages the `aws-auth` configmap for you (`manage_aws_auth=true`). To avoid the following [issue](https://github.com/aws/containers-roadmap/issues/654) where the EKS creation is `ACTIVE` but not ready. We implemented a "retry" logic with a [fork of the http provider](https://github.com/terraform-aws-modules/terraform-provider-http). This fork adds the support of a self-signed CA certificate. The original PR can be found [here](https://github.com/hashicorp/terraform-provider-http/pull/29).
+```hcl
+resource "aws_iam_role_policy_attachment" "workers_autoscaling" {
+ policy_arn = aws_iam_policy.worker_autoscaling.arn
+ role = module.my_cluster.worker_iam_role_name
+}
-- Setting `instance_refresh_enabled = true` will recreate your worker nodes without draining them first. It is recommended to install [aws-node-termination-handler](https://github.com/aws/aws-node-termination-handler) for proper node draining. Find the complete example here [instance_refresh](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/instance_refresh).
+resource "aws_iam_policy" "worker_autoscaling" {
+ name_prefix = "eks-worker-autoscaling-${module.my_cluster.cluster_id}"
+ description = "EKS worker node autoscaling policy for cluster ${module.my_cluster.cluster_id}"
+ policy = data.aws_iam_policy_document.worker_autoscaling.json
+ path = var.iam_path
+ tags = var.tags
+}
-## Documentation
+data "aws_iam_policy_document" "worker_autoscaling" {
+ statement {
+ sid = "eksWorkerAutoscalingAll"
+ effect = "Allow"
-### Official docs
+ actions = [
+ "autoscaling:DescribeAutoScalingGroups",
+ "autoscaling:DescribeAutoScalingInstances",
+ "autoscaling:DescribeLaunchConfigurations",
+ "autoscaling:DescribeTags",
+ "ec2:DescribeLaunchTemplateVersions",
+ ]
-- [Amazon Elastic Kubernetes Service (Amazon EKS)](https://docs.aws.amazon.com/eks/latest/userguide/).
+ resources = ["*"]
+ }
-### Module docs
+ statement {
+ sid = "eksWorkerAutoscalingOwn"
+ effect = "Allow"
-- [Autoscaling](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/autoscaling.md): How to enable worker node autoscaling.
-- [Enable Docker Bridge Network](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/enable-docker-bridge-network.md): How to enable the docker bridge network when using the EKS-optimized AMI, which disables it by default.
-- [Spot instances](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/spot-instances.md): How to use spot instances with this module.
-- [IAM Permissions](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/iam-permissions.md): Minimum IAM permissions needed to setup EKS Cluster.
-- [FAQ](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/faq.md): Frequently Asked Questions
+ actions = [
+ "autoscaling:SetDesiredCapacity",
+ "autoscaling:TerminateInstanceInAutoScalingGroup",
+ "autoscaling:UpdateAutoScalingGroup",
+ ]
-## Examples
+ resources = ["*"]
-There are detailed examples available for you to see how certain features of this module can be used in a straightforward way. Make sure to check them and run them before opening an issue. [Here](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/iam-permissions.md) you can find the list of the minimum IAM Permissions required to create EKS cluster.
+ condition {
+ test = "StringEquals"
+ variable = "autoscaling:ResourceTag/kubernetes.io/cluster/${module.my_cluster.cluster_id}"
+ values = ["owned"]
+ }
-- [Complete](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/complete) - Create EKS Cluster with all available workers types in various combinations with many of supported features.
-- [Bottlerocket](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/bottlerocket) - Create EKS cluster using [Bottlerocket AMI](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami-bottlerocket.html).
-- [Fargate](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/fargate) - Create EKS cluster with [Fargate profiles](https://docs.aws.amazon.com/eks/latest/userguide/fargate.html) and attach Fargate profiles to an existing EKS cluster.
+ condition {
+ test = "StringEquals"
+ variable = "autoscaling:ResourceTag/k8s.io/cluster-autoscaler/enabled"
+ values = ["true"]
+ }
+ }
+}
+```
-## Contributing
+And example values for the [helm chart](https://github.com/helm/charts/tree/master/stable/cluster-autoscaler):
-Report issues/questions/feature requests on in the [issues](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/new) section.
+```yaml
+rbac:
+ create: true
-Full contributing [guidelines are covered here](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/.github/CONTRIBUTING.md).
+cloudProvider: aws
+awsRegion: YOUR_AWS_REGION
-## Authors
+autoDiscovery:
+ clusterName: YOUR_CLUSTER_NAME
+ enabled: true
-This module has been originally created by [Brandon O'Connor](https://github.com/brandoconnor), and was maintained by [Max Williams](https://github.com/max-rocket-internet), [Thierno IB. BARRY](https://github.com/barryib) and many more [contributors listed here](https://github.com/terraform-aws-modules/terraform-aws-eks/graphs/contributors)!
+image:
+ repository: us.gcr.io/k8s-artifacts-prod/autoscaling/cluster-autoscaler
+ tag: v1.16.5
+```
-## License
+To install the chart, simply run helm with the `--values` option:
-Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/LICENSE) for full details.
+```bash
+helm install stable/cluster-autoscaler --values=path/to/your/values-file.yaml
+```
+
+
+
+Spot Instances
+
+# TODO - move to an example
+
+You will need to install a daemonset to catch the 2 minute warning before termination. This will ensure the node is gracefully drained before termination. You can install the [k8s-spot-termination-handler](https://github.com/kube-aws/kube-spot-termination-notice-handler) for this. There's a [Helm chart](https://github.com/helm/charts/tree/master/stable/k8s-spot-termination-handler):
+
+In the following examples at least 1 worker group that uses on-demand instances is included. This worker group has an added node label that can be used in scheduling. This could be used to schedule any workload not suitable for spot instances but is important for the [cluster-autoscaler](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler) as it might be end up unscheduled when spot instances are terminated. You can add this to the values of the [cluster-autoscaler helm chart](https://github.com/kubernetes/autoscaler/tree/master/charts/cluster-autoscaler-chart):
+
+```yaml
+nodeSelector:
+ kubernetes.io/lifecycle: normal
+```
+
+Notes:
+
+- The `spot_price` is set to the on-demand price so that the spot instances will run as long as they are the cheaper.
+- It's best to have a broad range of instance types to ensure there's always some instances to run when prices fluctuate.
+- There is an AWS blog article about this [here](https://aws.amazon.com/blogs/compute/run-your-kubernetes-workloads-on-amazon-ec2-spot-instances-with-amazon-eks/).
+- Consider using [k8s-spot-rescheduler](https://github.com/pusher/k8s-spot-rescheduler) to move pods from on-demand to spot instances.
+
+## Using Launch Templates
+
+```hcl
+ self_managed_node_groups = {
+ one = {
+ name = "spot-1"
+ instance_types = ["m5.large", "m5a.large", "m5d.large", "m5ad.large"]
+ spot_instance_pools = 4
+ max_size = 5
+ desired_capacity = 5
+ bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"
+ public_ip = true
+ },
+ }
+```
+
+## Using Launch Templates With Both Spot and On Demand
+
+Example launch template to launch 2 on demand instances of type m5.large, and have the ability to scale up using spot instances and on demand instances. The `node.kubernetes.io/lifecycle` node label will be set to the value queried from the EC2 meta-data service: either "on-demand" or "spot".
+
+`on_demand_percentage_above_base_capacity` is set to 25 so 1 in 4 new nodes, when auto-scaling, will be on-demand instances. If not set, all new nodes will be spot instances. The on-demand instances will be the primary instance type (first in the array if they are not weighted).
+
+```hcl
+ self_managed_node_groups = {
+ one = {
+ name = "mixed-demand-spot"
+ override_instance_types = ["m5.large", "m5a.large", "m4.large"]
+ root_encrypted = true
+ root_volume_size = 50
+
+ min_size = 2
+ desired_capacity = 2
+ on_demand_base_capacity = 3
+ on_demand_percentage_above_base_capacity = 25
+ asg_max_size = 20
+ spot_instance_pools = 3
+
+ bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=`curl -s http://169.254.169.254/latest/meta-data/instance-life-cycle`'"
+ }
+ }
+```
+
+Note: An issue with the cluster-autoscaler: https://github.com/kubernetes/autoscaler/issues/1133 - AWS have released their own termination handler now: https://github.com/aws/aws-node-termination-handler
+
+
+
+## Examples
+
+- [Bottlerocket](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/bottlerocket): EKS cluster using [Bottlerocket AMI](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami-bottlerocket.html)
+- [Complete](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/complete): EKS Cluster using all available node group types in various combinations demonstrating many of the supported features and configurations
+- [EKS Managed Node Group](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/eks_managed_node_group): EKS Cluster using EKS managed node groups
+- [Fargate](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/fargate): EKS cluster using [Fargate Profiles](https://docs.aws.amazon.com/eks/latest/userguide/fargate.html)
+- [Instance Refresh](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/instance_refresh): EKS Cluster using self-managed node group demonstrating how to enable/utilize instance refresh configuration along with node termination handler
+- [IRSA](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/irsa): EKS Cluster demonstrating how to enable IRSA
+- [Secrets Encryption](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/secrets_encryption): EKS Cluster demonstrating how to encrypt cluster secrets
+- [Self Managed Node Group](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/self_managed_node_group): EKS Cluster using self-managed node groups
+
+## Contributing
+
+Report issues/questions/feature requests on in the [issues](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/new) section.
+Full contributing [guidelines are covered here](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/.github/CONTRIBUTING.md).
+
## Requirements
-| Name | Version |
-|------|---------|
-| [terraform](#requirement\_terraform) | >= 0.13.1 |
-| [aws](#requirement\_aws) | >= 3.56.0 |
-| [tls](#requirement\_tls) | >= 2.2.0 |
+| Name | Version |
+| ------------------------------------------------------------------------ | --------- |
+| [terraform](#requirement_terraform) | >= 0.13.1 |
+| [aws](#requirement_aws) | >= 3.56.0 |
+| [tls](#requirement_tls) | >= 2.2.0 |
## Providers
-| Name | Version |
-|------|---------|
-| [aws](#provider\_aws) | >= 3.56.0 |
-| [tls](#provider\_tls) | >= 2.2.0 |
+| Name | Version |
+| ------------------------------------------------ | --------- |
+| [aws](#provider_aws) | >= 3.56.0 |
+| [tls](#provider_tls) | >= 2.2.0 |
## Modules
-| Name | Source | Version |
-|------|--------|---------|
-| [eks\_managed\_node\_group](#module\_eks\_managed\_node\_group) | ./modules/eks-managed-node-group | n/a |
-| [fargate\_profile](#module\_fargate\_profile) | ./modules/fargate-profile | n/a |
-| [self\_managed\_node\_group](#module\_self\_managed\_node\_group) | ./modules/self-managed-node-group | n/a |
+| Name | Source | Version |
+| -------------------------------------------------------------------------------------------------------- | --------------------------------- | ------- |
+| [eks_managed_node_group](#module_eks_managed_node_group) | ./modules/eks-managed-node-group | n/a |
+| [fargate_profile](#module_fargate_profile) | ./modules/fargate-profile | n/a |
+| [self_managed_node_group](#module_self_managed_node_group) | ./modules/self-managed-node-group | n/a |
## Resources
-| Name | Type |
-|------|------|
-| [aws_cloudwatch_log_group.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_log_group) | resource |
-| [aws_eks_cluster.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_cluster) | resource |
-| [aws_iam_openid_connect_provider.oidc_provider](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_openid_connect_provider) | resource |
-| [aws_iam_policy.cluster_additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
-| [aws_iam_role.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
-| [aws_security_group.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
-| [aws_security_group_rule.cluster_egress_internet](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.cluster_private_access_cidrs_source](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.cluster_private_access_sg_source](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_iam_policy_document.cluster_additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
-| [aws_iam_policy_document.cluster_assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
-| [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source |
-| [tls_certificate.this](https://registry.terraform.io/providers/hashicorp/tls/latest/docs/data-sources/certificate) | data source |
+| Name | Type |
+| -------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------- |
+| [aws_cloudwatch_log_group.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_log_group) | resource |
+| [aws_eks_cluster.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_cluster) | resource |
+| [aws_iam_openid_connect_provider.oidc_provider](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_openid_connect_provider) | resource |
+| [aws_iam_policy.cluster_additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
+| [aws_iam_role.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
+| [aws_security_group.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
+| [aws_security_group_rule.cluster_egress_internet](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
+| [aws_security_group_rule.cluster_private_access_cidrs_source](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
+| [aws_security_group_rule.cluster_private_access_sg_source](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
+| [aws_iam_policy_document.cluster_additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
+| [aws_iam_policy_document.cluster_assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
+| [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source |
+| [tls_certificate.this](https://registry.terraform.io/providers/hashicorp/tls/latest/docs/data-sources/certificate) | data source |
## Inputs
-| Name | Description | Type | Default | Required |
-|------|-------------|------|---------|:--------:|
-| [cluster\_create\_endpoint\_private\_access\_sg\_rule](#input\_cluster\_create\_endpoint\_private\_access\_sg\_rule) | Whether to create security group rules for the access to the Amazon EKS private API server endpoint. If `true`, `cluster_endpoint_private_access_cidrs` and/or 'cluster\_endpoint\_private\_access\_sg' should be provided | `bool` | `false` | no |
-| [cluster\_egress\_cidrs](#input\_cluster\_egress\_cidrs) | List of CIDR blocks that are permitted for cluster egress traffic | `list(string)` |
[ "0.0.0.0/0" ]
| no |
-| [cluster\_enabled\_log\_types](#input\_cluster\_enabled\_log\_types) | A list of the desired control plane logging to enable. For more information, see Amazon EKS Control Plane Logging documentation (https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html) | `list(string)` | `[]` | no |
-| [cluster\_encryption\_config](#input\_cluster\_encryption\_config) | Configuration block with encryption configuration for the cluster. See examples/secrets\_encryption/main.tf for example format |
| `[]` | no |
-| [cluster\_endpoint\_private\_access](#input\_cluster\_endpoint\_private\_access) | Indicates whether or not the Amazon EKS private API server endpoint is enabled | `bool` | `false` | no |
-| [cluster\_endpoint\_private\_access\_cidrs](#input\_cluster\_endpoint\_private\_access\_cidrs) | List of CIDR blocks which can access the Amazon EKS private API server endpoint. `cluster_endpoint_private_access` and `cluster_create_endpoint_private_access_sg_rule` must be set to `true` | `list(string)` | `[]` | no |
-| [cluster\_endpoint\_private\_access\_sg](#input\_cluster\_endpoint\_private\_access\_sg) | List of security group IDs which can access the Amazon EKS private API server endpoint. `cluster_endpoint_private_access` and `cluster_create_endpoint_private_access_sg_rule` must be set to `true` | `list(string)` | `[]` | no |
-| [cluster\_endpoint\_public\_access](#input\_cluster\_endpoint\_public\_access) | Indicates whether or not the Amazon EKS public API server endpoint is enabled. When it's set to `false` ensure to have a proper private access with `cluster_endpoint_private_access = true` | `bool` | `true` | no |
-| [cluster\_endpoint\_public\_access\_cidrs](#input\_cluster\_endpoint\_public\_access\_cidrs) | List of CIDR blocks which can access the Amazon EKS public API server endpoint | `list(string)` |
[ "0.0.0.0/0" ]
| no |
-| [cluster\_iam\_role\_arn](#input\_cluster\_iam\_role\_arn) | Existing IAM role ARN for the cluster. Required if `create_cluster_iam_role` is set to `false` | `string` | `null` | no |
-| [cluster\_iam\_role\_name](#input\_cluster\_iam\_role\_name) | Name to use on cluster role created | `string` | `null` | no |
-| [cluster\_iam\_role\_path](#input\_cluster\_iam\_role\_path) | Cluster IAM role path | `string` | `null` | no |
-| [cluster\_iam\_role\_permissions\_boundary](#input\_cluster\_iam\_role\_permissions\_boundary) | ARN of the policy that is used to set the permissions boundary for the cluster role | `string` | `null` | no |
-| [cluster\_iam\_role\_tags](#input\_cluster\_iam\_role\_tags) | A map of additional tags to add to the cluster IAM role created | `map(string)` | `{}` | no |
-| [cluster\_iam\_role\_use\_name\_prefix](#input\_cluster\_iam\_role\_use\_name\_prefix) | Determines whether cluster IAM role name (`cluster_iam_role_name`) is used as a prefix | `string` | `true` | no |
-| [cluster\_log\_kms\_key\_id](#input\_cluster\_log\_kms\_key\_id) | If a KMS Key ARN is set, this key will be used to encrypt the corresponding log group. Please be sure that the KMS Key has an appropriate key policy (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/encrypt-log-data-kms.html) | `string` | `""` | no |
-| [cluster\_log\_retention\_in\_days](#input\_cluster\_log\_retention\_in\_days) | Number of days to retain log events. Default retention - 90 days | `number` | `90` | no |
-| [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster and default name (prefix) used throughout the resources created | `string` | `""` | no |
-| [cluster\_security\_group\_id](#input\_cluster\_security\_group\_id) | If provided, the EKS cluster will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the workers | `string` | `""` | no |
-| [cluster\_security\_group\_name](#input\_cluster\_security\_group\_name) | Name to use on cluster role created | `string` | `null` | no |
-| [cluster\_security\_group\_tags](#input\_cluster\_security\_group\_tags) | A map of additional tags to add to the cluster security group created | `map(string)` | `{}` | no |
-| [cluster\_security\_group\_use\_name\_prefix](#input\_cluster\_security\_group\_use\_name\_prefix) | Determines whether cluster IAM role name (`cluster_iam_role_name`) is used as a prefix | `string` | `true` | no |
-| [cluster\_service\_ipv4\_cidr](#input\_cluster\_service\_ipv4\_cidr) | service ipv4 cidr for the kubernetes cluster | `string` | `null` | no |
-| [cluster\_tags](#input\_cluster\_tags) | A map of additional tags to add to the cluster | `map(string)` | `{}` | no |
-| [cluster\_timeouts](#input\_cluster\_timeouts) | Create, update, and delete timeout configurations for the cluster | `map(string)` | `{}` | no |
-| [cluster\_version](#input\_cluster\_version) | Kubernetes minor version to use for the EKS cluster (for example 1.21) | `string` | `null` | no |
-| [create](#input\_create) | Controls if EKS resources should be created (it affects almost all resources) | `bool` | `true` | no |
-| [create\_cluster\_iam\_role](#input\_create\_cluster\_iam\_role) | Determines whether a cluster IAM role is created or to use an existing IAM role | `bool` | `true` | no |
-| [create\_cluster\_security\_group](#input\_create\_cluster\_security\_group) | Whether to create a security group for the cluster or attach the cluster to `cluster_security_group_id` | `bool` | `true` | no |
-| [eks\_managed\_node\_groups](#input\_eks\_managed\_node\_groups) | Map of EKS managed node group definitions to create | `any` | `{}` | no |
-| [enable\_irsa](#input\_enable\_irsa) | Whether to create OpenID Connect Provider for EKS to enable IRSA | `bool` | `false` | no |
-| [fargate\_profiles](#input\_fargate\_profiles) | Map of Fargate Profile definitions to create | `any` | `{}` | no |
-| [openid\_connect\_audiences](#input\_openid\_connect\_audiences) | List of OpenID Connect audience client IDs to add to the IRSA provider | `list(string)` | `[]` | no |
-| [self\_managed\_node\_groups](#input\_self\_managed\_node\_groups) | Map of self-managed node group definitions to create | `any` | `{}` | no |
-| [subnet\_ids](#input\_subnet\_ids) | A list of subnet IDs to place the EKS cluster and workers within | `list(string)` | `[]` | no |
-| [tags](#input\_tags) | A map of tags to add to all resources. Tags added to launch configuration or templates override these values for ASG Tags only | `map(string)` | `{}` | no |
-| [vpc\_id](#input\_vpc\_id) | ID of the VPC where the cluster and workers will be provisioned | `string` | `null` | no |
+| Name | Description | Type | Default | Required |
+| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------- | --------------------------------- | :------: |
+| [cluster_create_endpoint_private_access_sg_rule](#input_cluster_create_endpoint_private_access_sg_rule) | Whether to create security group rules for the access to the Amazon EKS private API server endpoint. If `true`, `cluster_endpoint_private_access_cidrs` and/or 'cluster_endpoint_private_access_sg' should be provided | `bool` | `false` | no |
+| [cluster_egress_cidrs](#input_cluster_egress_cidrs) | List of CIDR blocks that are permitted for cluster egress traffic | `list(string)` |
[ "0.0.0.0/0" ]
| no |
+| [cluster_enabled_log_types](#input_cluster_enabled_log_types) | A list of the desired control plane logging to enable. For more information, see Amazon EKS Control Plane Logging documentation (https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html) | `list(string)` | `[]` | no |
+| [cluster_encryption_config](#input_cluster_encryption_config) | Configuration block with encryption configuration for the cluster. See examples/secrets_encryption/main.tf for example format |
| `[]` | no |
+| [cluster_endpoint_private_access](#input_cluster_endpoint_private_access) | Indicates whether or not the Amazon EKS private API server endpoint is enabled | `bool` | `false` | no |
+| [cluster_endpoint_private_access_cidrs](#input_cluster_endpoint_private_access_cidrs) | List of CIDR blocks which can access the Amazon EKS private API server endpoint. `cluster_endpoint_private_access` and `cluster_create_endpoint_private_access_sg_rule` must be set to `true` | `list(string)` | `[]` | no |
+| [cluster_endpoint_private_access_sg](#input_cluster_endpoint_private_access_sg) | List of security group IDs which can access the Amazon EKS private API server endpoint. `cluster_endpoint_private_access` and `cluster_create_endpoint_private_access_sg_rule` must be set to `true` | `list(string)` | `[]` | no |
+| [cluster_endpoint_public_access](#input_cluster_endpoint_public_access) | Indicates whether or not the Amazon EKS public API server endpoint is enabled. When it's set to `false` ensure to have a proper private access with `cluster_endpoint_private_access = true` | `bool` | `true` | no |
+| [cluster_endpoint_public_access_cidrs](#input_cluster_endpoint_public_access_cidrs) | List of CIDR blocks which can access the Amazon EKS public API server endpoint | `list(string)` |
[ "0.0.0.0/0" ]
| no |
+| [cluster_iam_role_arn](#input_cluster_iam_role_arn) | Existing IAM role ARN for the cluster. Required if `create_cluster_iam_role` is set to `false` | `string` | `null` | no |
+| [cluster_iam_role_name](#input_cluster_iam_role_name) | Name to use on cluster role created | `string` | `null` | no |
+| [cluster_iam_role_path](#input_cluster_iam_role_path) | Cluster IAM role path | `string` | `null` | no |
+| [cluster_iam_role_permissions_boundary](#input_cluster_iam_role_permissions_boundary) | ARN of the policy that is used to set the permissions boundary for the cluster role | `string` | `null` | no |
+| [cluster_iam_role_tags](#input_cluster_iam_role_tags) | A map of additional tags to add to the cluster IAM role created | `map(string)` | `{}` | no |
+| [cluster_iam_role_use_name_prefix](#input_cluster_iam_role_use_name_prefix) | Determines whether cluster IAM role name (`cluster_iam_role_name`) is used as a prefix | `string` | `true` | no |
+| [cluster_log_kms_key_id](#input_cluster_log_kms_key_id) | If a KMS Key ARN is set, this key will be used to encrypt the corresponding log group. Please be sure that the KMS Key has an appropriate key policy (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/encrypt-log-data-kms.html) | `string` | `""` | no |
+| [cluster_log_retention_in_days](#input_cluster_log_retention_in_days) | Number of days to retain log events. Default retention - 90 days | `number` | `90` | no |
+| [cluster_name](#input_cluster_name) | Name of the EKS cluster and default name (prefix) used throughout the resources created | `string` | `""` | no |
+| [cluster_security_group_id](#input_cluster_security_group_id) | If provided, the EKS cluster will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the workers | `string` | `""` | no |
+| [cluster_security_group_name](#input_cluster_security_group_name) | Name to use on cluster role created | `string` | `null` | no |
+| [cluster_security_group_tags](#input_cluster_security_group_tags) | A map of additional tags to add to the cluster security group created | `map(string)` | `{}` | no |
+| [cluster_security_group_use_name_prefix](#input_cluster_security_group_use_name_prefix) | Determines whether cluster IAM role name (`cluster_iam_role_name`) is used as a prefix | `string` | `true` | no |
+| [cluster_service_ipv4_cidr](#input_cluster_service_ipv4_cidr) | service ipv4 cidr for the kubernetes cluster | `string` | `null` | no |
+| [cluster_tags](#input_cluster_tags) | A map of additional tags to add to the cluster | `map(string)` | `{}` | no |
+| [cluster_timeouts](#input_cluster_timeouts) | Create, update, and delete timeout configurations for the cluster | `map(string)` | `{}` | no |
+| [cluster_version](#input_cluster_version) | Kubernetes minor version to use for the EKS cluster (for example 1.21) | `string` | `null` | no |
+| [create](#input_create) | Controls if EKS resources should be created (it affects almost all resources) | `bool` | `true` | no |
+| [create_cluster_iam_role](#input_create_cluster_iam_role) | Determines whether a cluster IAM role is created or to use an existing IAM role | `bool` | `true` | no |
+| [create_cluster_security_group](#input_create_cluster_security_group) | Whether to create a security group for the cluster or attach the cluster to `cluster_security_group_id` | `bool` | `true` | no |
+| [eks_managed_node_groups](#input_eks_managed_node_groups) | Map of EKS managed node group definitions to create | `any` | `{}` | no |
+| [enable_irsa](#input_enable_irsa) | Whether to create OpenID Connect Provider for EKS to enable IRSA | `bool` | `false` | no |
+| [fargate_profiles](#input_fargate_profiles) | Map of Fargate Profile definitions to create | `any` | `{}` | no |
+| [openid_connect_audiences](#input_openid_connect_audiences) | List of OpenID Connect audience client IDs to add to the IRSA provider | `list(string)` | `[]` | no |
+| [self_managed_node_groups](#input_self_managed_node_groups) | Map of self-managed node group definitions to create | `any` | `{}` | no |
+| [subnet_ids](#input_subnet_ids) | A list of subnet IDs to place the EKS cluster and workers within | `list(string)` | `[]` | no |
+| [tags](#input_tags) | A map of tags to add to all resources. Tags added to launch configuration or templates override these values for ASG Tags only | `map(string)` | `{}` | no |
+| [vpc_id](#input_vpc_id) | ID of the VPC where the cluster and workers will be provisioned | `string` | `null` | no |
## Outputs
-| Name | Description |
-|------|-------------|
-| [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created |
-| [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created |
-| [cluster\_arn](#output\_cluster\_arn) | The Amazon Resource Name (ARN) of the cluster |
-| [cluster\_certificate\_authority\_data](#output\_cluster\_certificate\_authority\_data) | Base64 encoded certificate data required to communicate with the cluster |
-| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for your Kubernetes API server |
-| [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster |
-| [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster |
-| [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role |
-| [cluster\_id](#output\_cluster\_id) | The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready |
-| [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider |
-| [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster |
-| [cluster\_security\_group\_arn](#output\_cluster\_security\_group\_arn) | Amazon Resource Name (ARN) of the cluster security group |
-| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console |
-| [cluster\_status](#output\_cluster\_status) | Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED` |
-| [eks\_managed\_node\_groups](#output\_eks\_managed\_node\_groups) | Map of attribute maps for all EKS managed node groups created |
-| [fargate\_profiles](#output\_fargate\_profiles) | Map of attribute maps for all EKS Fargate Profiles created |
-| [oidc\_provider\_arn](#output\_oidc\_provider\_arn) | The ARN of the OIDC Provider if `enable_irsa = true` |
-| [self\_managed\_node\_groups](#output\_self\_managed\_node\_groups) | Map of attribute maps for all self managed node groups created |
+| Name | Description |
+| ----------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| [cloudwatch_log_group_arn](#output_cloudwatch_log_group_arn) | Arn of cloudwatch log group created |
+| [cloudwatch_log_group_name](#output_cloudwatch_log_group_name) | Name of cloudwatch log group created |
+| [cluster_arn](#output_cluster_arn) | The Amazon Resource Name (ARN) of the cluster |
+| [cluster_certificate_authority_data](#output_cluster_certificate_authority_data) | Base64 encoded certificate data required to communicate with the cluster |
+| [cluster_endpoint](#output_cluster_endpoint) | Endpoint for your Kubernetes API server |
+| [cluster_iam_role_arn](#output_cluster_iam_role_arn) | IAM role ARN of the EKS cluster |
+| [cluster_iam_role_name](#output_cluster_iam_role_name) | IAM role name of the EKS cluster |
+| [cluster_iam_role_unique_id](#output_cluster_iam_role_unique_id) | Stable and unique string identifying the IAM role |
+| [cluster_id](#output_cluster_id) | The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready |
+| [cluster_oidc_issuer_url](#output_cluster_oidc_issuer_url) | The URL on the EKS cluster for the OpenID Connect identity provider |
+| [cluster_platform_version](#output_cluster_platform_version) | Platform version for the cluster |
+| [cluster_security_group_arn](#output_cluster_security_group_arn) | Amazon Resource Name (ARN) of the cluster security group |
+| [cluster_security_group_id](#output_cluster_security_group_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console |
+| [cluster_status](#output_cluster_status) | Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED` |
+| [eks_managed_node_groups](#output_eks_managed_node_groups) | Map of attribute maps for all EKS managed node groups created |
+| [fargate_profiles](#output_fargate_profiles) | Map of attribute maps for all EKS Fargate Profiles created |
+| [oidc_provider_arn](#output_oidc_provider_arn) | The ARN of the OIDC Provider if `enable_irsa = true` |
+| [self_managed_node_groups](#output_self_managed_node_groups) | Map of attribute maps for all self managed node groups created |
+
+
+## License
+
+Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraform-aws-rds-aurora/tree/master/LICENSE) for full details.
diff --git a/docs/autoscaling.md b/docs/autoscaling.md
deleted file mode 100644
index 3c1aa5ee93..0000000000
--- a/docs/autoscaling.md
+++ /dev/null
@@ -1,98 +0,0 @@
-# Autoscaling
-
-To enable worker node autoscaling you will need to do a few things:
-
-- Add the [required tags](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler/cloudprovider/aws#auto-discovery-setup) to the worker group
-- Install the cluster-autoscaler
-- Give the cluster-autoscaler access via an IAM policy
-
-It's probably easiest to follow the example in [examples/irsa](../examples/irsa), this will install the cluster-autoscaler using [Helm](https://helm.sh/) and use IRSA to attach a policy.
-
-If you don't want to use IRSA then you will need to attach the IAM policy to the worker node IAM role or add AWS credentials to the cluster-autoscaler environment variables. Here is some example terraform code for the policy:
-
-```hcl
-resource "aws_iam_role_policy_attachment" "workers_autoscaling" {
- policy_arn = aws_iam_policy.worker_autoscaling.arn
- role = module.my_cluster.worker_iam_role_name
-}
-
-resource "aws_iam_policy" "worker_autoscaling" {
- name_prefix = "eks-worker-autoscaling-${module.my_cluster.cluster_id}"
- description = "EKS worker node autoscaling policy for cluster ${module.my_cluster.cluster_id}"
- policy = data.aws_iam_policy_document.worker_autoscaling.json
- path = var.iam_path
- tags = var.tags
-}
-
-data "aws_iam_policy_document" "worker_autoscaling" {
- statement {
- sid = "eksWorkerAutoscalingAll"
- effect = "Allow"
-
- actions = [
- "autoscaling:DescribeAutoScalingGroups",
- "autoscaling:DescribeAutoScalingInstances",
- "autoscaling:DescribeLaunchConfigurations",
- "autoscaling:DescribeTags",
- "ec2:DescribeLaunchTemplateVersions",
- ]
-
- resources = ["*"]
- }
-
- statement {
- sid = "eksWorkerAutoscalingOwn"
- effect = "Allow"
-
- actions = [
- "autoscaling:SetDesiredCapacity",
- "autoscaling:TerminateInstanceInAutoScalingGroup",
- "autoscaling:UpdateAutoScalingGroup",
- ]
-
- resources = ["*"]
-
- condition {
- test = "StringEquals"
- variable = "autoscaling:ResourceTag/kubernetes.io/cluster/${module.my_cluster.cluster_id}"
- values = ["owned"]
- }
-
- condition {
- test = "StringEquals"
- variable = "autoscaling:ResourceTag/k8s.io/cluster-autoscaler/enabled"
- values = ["true"]
- }
- }
-}
-```
-
-And example values for the [helm chart](https://github.com/helm/charts/tree/master/stable/cluster-autoscaler):
-
-```yaml
-rbac:
- create: true
-
-cloudProvider: aws
-awsRegion: YOUR_AWS_REGION
-
-autoDiscovery:
- clusterName: YOUR_CLUSTER_NAME
- enabled: true
-
-image:
- repository: us.gcr.io/k8s-artifacts-prod/autoscaling/cluster-autoscaler
- tag: v1.16.5
-```
-
-To install the chart, simply run helm with the `--values` option:
-
-```
-helm install stable/cluster-autoscaler --values=path/to/your/values-file.yaml
-```
-
-## Notes
-
-There is a variable `asg_desired_capacity` given in the `local.tf` file, currently it can be used to change the desired worker(s) capacity in the autoscaling group but currently it is being ignored in terraform to reduce the [complexities](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/510#issuecomment-531700442) and the feature of scaling up and down the cluster nodes is being handled by the cluster autoscaler.
-
-The cluster autoscaler major and minor versions must match your cluster. For example if you are running a 1.16 EKS cluster set `image.tag=v1.16.5`. Search through their [releases page](https://github.com/kubernetes/autoscaler/releases) for valid version numbers.
diff --git a/docs/enable-docker-bridge-network.md b/docs/enable-docker-bridge-network.md
deleted file mode 100644
index df5e20f629..0000000000
--- a/docs/enable-docker-bridge-network.md
+++ /dev/null
@@ -1,23 +0,0 @@
-# Enable Docker Bridge Network
-
-The latest versions of the AWS EKS-optimized AMI disable the docker bridge network by default. To enable it, add the `bootstrap_extra_args` parameter to your worker group template.
-
-```hcl
-locals {
- worker_groups = {
- one = {
- # Other parameters omitted for brevity
- bootstrap_extra_args = "--enable-docker-bridge true"
- }
- }
-}
-```
-
-Examples of when this would be necessary are:
-
-- You are running Continuous Integration in K8s, and building docker images by either mounting the docker sock as a volume or using docker in docker. Without the bridge enabled, internal routing from the inner container can't reach the outside world.
-
-## See More
-
-- [Docker in Docker no longer works without docker0 bridge](https://github.com/awslabs/amazon-eks-ami/issues/183)
-- [Add enable-docker-bridge bootstrap argument](https://github.com/awslabs/amazon-eks-ami/pull/187)
diff --git a/docs/faq.md b/docs/faq.md
deleted file mode 100644
index ed9fa69cd1..0000000000
--- a/docs/faq.md
+++ /dev/null
@@ -1,173 +0,0 @@
-# Frequently Asked Questions
-
-## How do I customize X on the worker group's settings?
-
-All the options that can be customized for worker groups are listed in [local.tf](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/local.tf) under `workers_group_defaults_defaults`.
-
-Please open Issues or PRs if you think something is missing.
-
-## Why are nodes not being registered?
-
-### Networking
-
-Often caused by a networking or endpoint configuration issue.
-
-At least one of the cluster public or private endpoints must be enabled to access the cluster to work. If you require a public endpoint, setting up both (public and private) and restricting the public endpoint via setting `cluster_endpoint_public_access_cidrs` is recommended. More about communication with an endpoint is available [here](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html).
-
-Nodes need to be able to contact the EKS cluster endpoint. By default, the module only creates a public endpoint. To access endpoint, the nodes need outgoing internet access:
-
-- Nodes in private subnets: via a NAT gateway or instance. It will need adding along with appropriate routing rules.
-- Nodes in public subnets: assign public IPs to nodes. Set `public_ip = true` in the `worker_groups` list on this module.
-
-> Important:
-> If you apply only the public endpoint and setup `cluster_endpoint_public_access_cidrs` to restrict access, remember that EKS nodes also use the public endpoint, so you must allow access to the endpoint. If not, then your nodes will not be working correctly.
-
-Cluster private endpoint can also be enabled by setting `cluster_endpoint_private_access = true` on this module. Node calls to the endpoint stay within the VPC.
-
-When the private endpoint is enabled ensure that VPC DNS resolution and hostnames are also enabled:
-
-- If managing the VPC with Terraform: set `enable_dns_hostnames = true` and `enable_dns_support = true` on the `aws_vpc` resource. The [`terraform-aws-module/vpc/aws`](https://github.com/terraform-aws-modules/terraform-aws-vpc/) community module also has these variables.
-- Otherwise refer to the [AWS VPC docs](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-dns.html#vpc-dns-updating) and [AWS EKS Cluster Endpoint Access docs](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html) for more information.
-
-Nodes need to be able to connect to other AWS services plus pull down container images from repos. If for some reason you cannot enable public internet access for nodes you can add VPC endpoints to the relevant services: EC2 API, ECR API, ECR DKR and S3.
-
-## How can I work with the cluster if I disable the public endpoint?
-
-You have to interact with the cluster from within the VPC that it's associated with, from an instance that's allowed access via the cluster's security group.
-
-Creating a new cluster with the public endpoint disabled is harder to achieve. You will either want to pass in a pre-configured cluster security group or apply the `aws-auth` configmap in a separate action.
-
-## How can I stop Terraform from removing the EKS tags from my VPC and subnets?
-
-You need to add the tags to the VPC and subnets yourself. See the [basic example](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/basic).
-
-An alternative is to use the aws provider's [`ignore_tags` variable](https://www.terraform.io/docs/providers/aws/#ignore_tags-configuration-block). However this can also cause terraform to display a perpetual difference.
-
-## Why does changing the node or worker group's desired count not do anything?
-
-The module is configured to ignore this value. Unfortunately, Terraform does not support variables within the `lifecycle` block.
-
-The setting is ignored to allow the cluster autoscaler to work correctly so that `terraform apply` does not accidentally remove running workers.
-
-You can change the desired count via the CLI or console if you're not using the cluster autoscaler.
-
-If you are not using autoscaling and want to control the number of nodes via terraform, set the `min_capacity` and `max_capacity` for node groups or `asg_min_size` and `asg_max_size` for worker groups. Before changing those values, you must satisfy AWS `desired` capacity constraints (which must be between new min/max values).
-
-When you scale down AWS will remove a random instance, so you will have to weigh the risks here.
-
-## Why are nodes not recreated when the `launch_configuration`/`launch_template` is recreated?
-
-By default the ASG is not configured to be recreated when the launch configuration or template changes. Terraform spins up new instances and then deletes all the old instances in one go as the AWS provider team have refused to implement rolling updates of autoscaling groups. This is not good for kubernetes stability.
-
-You need to use a process to drain and cycle the workers.
-
-You are not using the cluster autoscaler:
-
-- Add a new instance
-- Drain an old node `kubectl drain --force --ignore-daemonsets --delete-local-data ip-xxxxxxx.eu-west-1.compute.internal`
-- Wait for pods to be Running
-- Terminate the old node instance. ASG will start a new instance
-- Repeat the drain and delete process until all old nodes are replaced
-
-You are using the cluster autoscaler:
-
-- Drain an old node `kubectl drain --force --ignore-daemonsets --delete-local-data ip-xxxxxxx.eu-west-1.compute.internal`
-- Wait for pods to be Running
-- Cluster autoscaler will create new nodes when required
-- Repeat until all old nodes are drained
-- Cluster autoscaler will terminate the old nodes after 10-60 minutes automatically
-
-You can also use a 3rd party tool like Gruntwork's kubergrunt. See the [`eks deploy`](https://github.com/gruntwork-io/kubergrunt#deploy) subcommand.
-
-## How can I use Windows workers?
-
-To enable Windows support for your EKS cluster, you should apply some configs manually. See the [Enabling Windows Support (Windows/MacOS/Linux)](https://docs.aws.amazon.com/eks/latest/userguide/windows-support.html#enable-windows-support).
-
-Windows worker nodes requires additional cluster role (eks:kube-proxy-windows). If you are adding windows workers to existing cluster, you should apply config-map-aws-auth again.
-
-#### Example configuration
-
-Amazon EKS clusters must contain one or more Linux worker nodes to run core system pods that only run on Linux, such as coredns and the VPC resource controller.
-
-1. Build AWS EKS cluster with the next workers configuration (default Linux):
-
-```hcl
- worker_groups = {
- one = {
- name = "worker-group-linux"
- instance_type = "m5.large"
- platform = "linux"
- asg_desired_capacity = 2
- },
- }
-```
-
-2. Apply commands from https://docs.aws.amazon.com/eks/latest/userguide/windows-support.html#enable-windows-support (use tab with name `Windows`)
-3. Add one more worker group for Windows with required field `platform = "windows"` and update your cluster. Worker group example:
-
-```hcl
- worker_groups = {
- linux = {
- name = "worker-group-linux"
- instance_type = "m5.large"
- platform = "linux"
- asg_desired_capacity = 2
- },
- windows = {
- name = "worker-group-windows"
- instance_type = "m5.large"
- platform = "windows"
- asg_desired_capacity = 1
- },
- }
-```
-
-4. With `kubectl get nodes` you can see cluster with mixed (Linux/Windows) nodes support.
-
-## Worker nodes with labels do not join a 1.16+ cluster
-
-Kubelet restricts the allowed list of labels in the `kubernetes.io` namespace that can be applied to nodes starting in 1.16.
-
-Older configurations used labels like `kubernetes.io/lifecycle=spot` and this is no longer allowed. Use `node.kubernetes.io/lifecycle=spot` instead.
-
-Reference the `--node-labels` argument for your version of Kubenetes for the allowed prefixes. [Documentation for 1.16](https://v1-16.docs.kubernetes.io/docs/reference/command-line-tools-reference/kubelet/)
-
-## I'm using both AWS-Managed node groups and Self-Managed worker groups and pods scheduled on a AWS Managed node groups are unable resolve DNS (even communication between pods)
-
-This happen because Core DNS can be scheduled on Self-Managed worker groups and by default, the terraform module doesn't create security group rules to ensure communication between pods schedulled on Self-Managed worker group and AWS-Managed node groups.
-
-You can set `var.worker_create_cluster_primary_security_group_rules` to `true` to create required rules.
-
-## Dedicated control plane subnets
-
-[AWS recommends](https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html) to create dedicated subnets for EKS created network interfaces (control plane). The module fully supports this approach. To set up this, you must configure the module by adding additional `subnets` into workers default specification `workers_group_defaults` map or directly `subnets` definition in worker definition.
-
-```hcl
-module "eks" {
- source = "terraform-aws-modules/eks/aws"
-
- cluster_version = "1.21"
- cluster_name = "my-cluster"
- vpc_id = "vpc-1234556abcdef"
- subnet_ids = ["subnet-abcde123", "subnet-abcde456", "subnet-abcde789"]
-
- workers_group_defaults = {
- subnet_ids = ["subnet-xyz123", "subnet-xyz456", "subnet-xyz789"]
- }
-
- worker_groups = {
- one = {
- instance_type = "m4.large"
- asg_max_size = 5
- },
- two = {
- name = "worker-group-2"
- subnet_ids = ["subnet-qwer123"]
- instance_type = "t3.medium"
- asg_desired_capacity = 1
- public_ip = true
- ebs_optimized = true
- }
- }
-}
-```
diff --git a/docs/spot-instances.md b/docs/spot-instances.md
deleted file mode 100644
index d9be09eaa7..0000000000
--- a/docs/spot-instances.md
+++ /dev/null
@@ -1,73 +0,0 @@
-# Using spot instances
-
-Spot instances usually cost around 30-70% less than an on-demand instance. So using them for your EKS workloads can save a lot of money but requires some special considerations as they could be terminated with only 2 minutes warning.
-
-You need to install a daemonset to catch the 2 minute warning before termination. This will ensure the node is gracefully drained before termination. You can install the [k8s-spot-termination-handler](https://github.com/kube-aws/kube-spot-termination-notice-handler) for this. There's a [Helm chart](https://github.com/helm/charts/tree/master/stable/k8s-spot-termination-handler):
-
-```shell
-helm install stable/k8s-spot-termination-handler --namespace kube-system
-```
-
-In the following examples at least 1 worker group that uses on-demand instances is included. This worker group has an added node label that can be used in scheduling. This could be used to schedule any workload not suitable for spot instances but is important for the [cluster-autoscaler](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler) as it might be end up unscheduled when spot instances are terminated. You can add this to the values of the [cluster-autoscaler helm chart](https://github.com/kubernetes/autoscaler/tree/master/charts/cluster-autoscaler-chart):
-
-```yaml
-nodeSelector:
- kubernetes.io/lifecycle: normal
-```
-
-Notes:
-
-- The `spot_price` is set to the on-demand price so that the spot instances will run as long as they are the cheaper.
-- It's best to have a broad range of instance types to ensure there's always some instances to run when prices fluctuate.
-- There is an AWS blog article about this [here](https://aws.amazon.com/blogs/compute/run-your-kubernetes-workloads-on-amazon-ec2-spot-instances-with-amazon-eks/).
-- Consider using [k8s-spot-rescheduler](https://github.com/pusher/k8s-spot-rescheduler) to move pods from on-demand to spot instances.
-
-## Using Launch Templates
-
-Only Launch Template is supported in this module; Launch Configuration support has been deprecated and removed:
-
-```hcl
- worker_groups = {
- one ={
- name = "spot-1"
- override_instance_types = ["m5.large", "m5a.large", "m5d.large", "m5ad.large"]
- spot_instance_pools = 4
- asg_max_size = 5
- asg_desired_capacity = 5
- bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"
- public_ip = true
- },
- }
-```
-
-## Using Launch Templates With Both Spot and On Demand
-
-Example launch template to launch 2 on demand instances of type m5.large, and have the ability to scale up using spot instances and on demand instances. The `node.kubernetes.io/lifecycle` node label will be set to the value queried from the EC2 meta-data service: either "on-demand" or "spot".
-
-`on_demand_percentage_above_base_capacity` is set to 25 so 1 in 4 new nodes, when auto-scaling, will be on-demand instances. If not set, all new nodes will be spot instances. The on-demand instances will be the primary instance type (first in the array if they are not weighted).
-
-```hcl
- worker_groups = {
- one = {
- name = "mixed-demand-spot"
- override_instance_types = ["m5.large", "m5a.large", "m4.large"]
- root_encrypted = true
- root_volume_size = 50
-
- asg_min_size = 2
- asg_desired_capacity = 2
- on_demand_base_capacity = 3
- on_demand_percentage_above_base_capacity = 25
- asg_max_size = 20
- spot_instance_pools = 3
-
- bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=`curl -s http://169.254.169.254/latest/meta-data/instance-life-cycle`'"
- }
- }
-```
-
-## Important Notes
-
-An issue with the cluster-autoscaler: https://github.com/kubernetes/autoscaler/issues/1133
-
-AWS have released their own termination handler now: https://github.com/aws/aws-node-termination-handler
diff --git a/modules/eks-managed-node-group/README.md b/modules/eks-managed-node-group/README.md
index 729617344e..2960d9b692 100644
--- a/modules/eks-managed-node-group/README.md
+++ b/modules/eks-managed-node-group/README.md
@@ -1,5 +1,20 @@
# EKS Managed Node Group Module
+Configuration in this directory creates an EKS Managed Node Group along with an IAM role, security group, and launch template
+
+## Usage
+
+To run this example you need to execute:
+
+```bash
+$ terraform init
+$ terraform plan
+$ terraform apply
+```
+
+# TODO - Update Notes vvv
+
+Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources.
# User Data Configurations
@@ -31,63 +46,6 @@ If you use a custom AMI, you need to supply via user-data, the bootstrap script
#
(optionally you can use https://registry.terraform.io/providers/hashicorp/cloudinit/latest/docs/data-sources/cloudinit_config to render the script, example: https://github.com/terraform-aws-modules/terraform-aws-eks/pull/997#issuecomment-705286151) then the default user-data for bootstrapping a cluster is merged in the copy.
-## Node Groups' IAM Role
-
-The role ARN specified in `var.default_iam_role_arn` will be used by default. In a simple configuration this will be the worker role created by the parent module.
-
-`iam_role_arn` must be specified in either `var.node_groups_defaults` or `var.node_groups` if the default parent IAM role is not being created for whatever reason, for example if `manage_worker_iam_resources` is set to false in the parent.
-
-## `node_groups` and `node_groups_defaults` keys
-`node_groups_defaults` is a map that can take the below keys. Values will be used if not specified in individual node groups.
-
-`node_groups` is a map of maps. Key of first level will be used as unique value for `for_each` resources and in the `aws_eks_node_group` name. Inner map can take the below values.
-
-| Name | Description | Type | If unset |
-|------|-------------|:----:|:-----:|
-| additional\_tags | Additional tags to apply to node group | map(string) | Only `var.tags` applied |
-| ami\_release\_version | AMI version of workers | string | Provider default behavior |
-| ami\_type | AMI Type. See Terraform or AWS docs | string | Provider default behavior |
-| ami\_id | ID of custom AMI. If you use a custom AMI, you need to set `ami_is_eks_optimized` | string | Provider default behavior |
-| ami\_is\_eks\_optimized | If the custom AMI is an EKS optimised image, ignored if `ami_id` is not set. If this is `true` then `bootstrap.sh` is called automatically (max pod logic needs to be manually set), if this is `false` you need to provide all the node configuration in `pre_userdata` | bool | `true` |
-| capacity\_type | Type of instance capacity to provision. Options are `ON_DEMAND` and `SPOT` | string | Provider default behavior |
-| create_launch_template | Create and use a default launch template | bool | `false` |
-| desired\_capacity | Desired number of workers | number | `var.workers_group_defaults[asg_desired_capacity]` |
-| disk\_encrypted | Whether the root disk will be encrypyted. Requires `create_launch_template` to be `true` and `disk_kms_key_id` to be set | bool | false |
-| disk\_kms\_key\_id | KMS Key used to encrypt the root disk. Requires both `create_launch_template` and `disk_encrypted` to be `true` | string | "" |
-| disk\_size | Workers' disk size | number | Provider default behavior |
-| disk\_type | Workers' disk type. Require `create_launch_template` to be `true`| string | Provider default behavior |
-| disk\_throughput | Workers' disk throughput. Require `create_launch_template` to be `true` and `disk_type` to be `gp3`| number | Provider default behavior |
-| disk\_iops | Workers' disk IOPS. Require `create_launch_template` to be `true` and `disk_type` to be `gp3`| number | Provider default behavior |
-| ebs\_optimized | Enables/disables EBS optimization. Require `create_launch_template` to be `true` | bool | `true` if defined `instance\_types` are not present in `var.ebs\_optimized\_not\_supported` |
-| enable_monitoring | Enables/disables detailed monitoring. Require `create_launch_template` to be `true`| bool | `true` |
-| eni_delete | Delete the Elastic Network Interface (ENI) on termination (if set to false you will have to manually delete before destroying) | bool | `true` |
-| force\_update\_version | Force version update if existing pods are unable to be drained due to a pod disruption budget issue. | bool | Provider default behavior |
-| iam\_role\_arn | IAM role ARN for workers | string | `var.default_iam_role_arn` |
-| instance\_types | Node group's instance type(s). Multiple types can be specified when `capacity_type="SPOT"`. | list | `[var.workers_group_defaults[instance_type]]` |
-| k8s\_labels | Kubernetes labels | map(string) | No labels applied |
-| key\_name | Key name for workers. Set to empty string to disable remote access | string | `var.workers_group_defaults[key_name]` |
-| bootstrap_env | Provide environment variables to customise [bootstrap.sh](https://github.com/awslabs/amazon-eks-ami/blob/master/files/bootstrap.sh). Require `create_launch_template` to be `true` | map(string) | `{}` |
-| kubelet_extra_args | Extra arguments for kubelet, this is automatically merged with `labels`. Require `create_launch_template` to be `true` | string | "" |
-| launch_template_id | The id of a aws_launch_template to use | string | No LT used |
-| launch\_template_version | The version of the LT to use | string | none |
-| max\_capacity | Max number of workers | number | `var.workers_group_defaults[asg_max_size]` |
-| min\_capacity | Min number of workers | number | `var.workers_group_defaults[asg_min_size]` |
-| update_config.max\_unavailable\_percentage | Max percentage of unavailable nodes during update. (e.g. 25, 50, etc) | number | `null` if `update_config.max_unavailable` is set |
-| update_config.max\_unavailable | Max number of unavailable nodes during update | number | `null` if `update_config.max_unavailable_percentage` is set |
-| name | Name of the node group. If you don't really need this, we recommend you to use `name_prefix` instead. | string | Will use the autogenerate name prefix |
-| name_prefix | Name prefix of the node group | string | Auto generated |
-| pre_userdata | userdata to pre-append to the default userdata. Require `create_launch_template` to be `true`| string | "" |
-| public_ip | Associate a public ip address with a worker. Require `create_launch_template` to be `true`| string | `false`
-| source\_security\_group\_ids | Source security groups for remote access to workers | list(string) | If key\_name is specified: THE REMOTE ACCESS WILL BE OPENED TO THE WORLD |
-| subnets | Subnets to contain workers | list(string) | `var.workers_group_defaults[subnets]` |
-| version | Kubernetes version | string | Provider default behavior |
-| taints | Kubernetes node taints | list(map) | empty |
-| timeouts | A map of timeouts for create/update/delete operations. | `map(string)` | Provider default behavior |
-| update_default_version | Whether or not to set the new launch template version the Default | bool | `true` |
-| metadata_http_endpoint | The state of the instance metadata service. Requires `create_launch_template` to be `true` | string | `var.workers_group_defaults[metadata_http_endpoint]` |
-| metadata_http_tokens | If session tokens are required. Requires `create_launch_template` to be `true` | string | `var.workers_group_defaults[metadata_http_tokens]` |
-| metadata_http_put_response_hop_limit | The desired HTTP PUT response hop limit for instance metadata requests. Requires `create_launch_template` to be `true` | number | `var.workers_group_defaults[metadata_http_put_response_hop_limit]` |
-
## Requirements
diff --git a/modules/fargate-profile/README.md b/modules/fargate-profile/README.md
index 89b6dc40c2..44b0b9e551 100644
--- a/modules/fargate-profile/README.md
+++ b/modules/fargate-profile/README.md
@@ -1,14 +1,16 @@
-# EKS `fargate` submodule
+# EKS Fargate Profile Module
-Helper submodule to create and manage resources related to `aws_eks_fargate_profile`.
+Configuration in this directory creates a Fargate EKS Profile
-## `fargate_profile` keys
+## Usage
-`fargate_profile` is a map of maps. Key of first level will be used as unique value for `for_each` resources and in the `aws_eks_fargate_profile` name. Inner map can take the below values.
+To run this example you need to execute:
-## Example
-
-See example code in `examples/fargate`.
+```bash
+$ terraform init
+$ terraform plan
+$ terraform apply
+```
| Name | Description | Type | Default | Required |
|------|-------------|------|---------|:--------:|
diff --git a/modules/self-managed-node-group/README.md b/modules/self-managed-node-group/README.md
index 740ed6603e..e44f48f68e 100644
--- a/modules/self-managed-node-group/README.md
+++ b/modules/self-managed-node-group/README.md
@@ -1,5 +1,17 @@
# Self Managed Node Group Module
+Configuration in this directory creates a Self Managed Node Group (AutoScaling Group) along with an IAM role, security group, and launch template
+
+## Usage
+
+To run this example you need to execute:
+
+```bash
+$ terraform init
+$ terraform plan
+$ terraform apply
+```
+
## Requirements
diff --git a/workers.tf b/node_groups.tf
similarity index 100%
rename from workers.tf
rename to node_groups.tf
From be9dfd92b45a61fa1c84980e4e2db4396d16f772 Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Fri, 12 Nov 2021 21:51:00 -0500
Subject: [PATCH 26/83] feat: add back in the group default variables
---
README.md | 181 +++++-----
modules/self-managed-node-group/README.md | 2 +-
modules/self-managed-node-group/main.tf | 4 +-
modules/self-managed-node-group/variables.tf | 2 +-
node_groups.tf | 332 +++++++++----------
variables.tf | 18 +
6 files changed, 279 insertions(+), 260 deletions(-)
diff --git a/README.md b/README.md
index 51b1635609..ec19e5329c 100644
--- a/README.md
+++ b/README.md
@@ -387,113 +387,114 @@ Report issues/questions/feature requests on in the [issues](https://github.com/t
Full contributing [guidelines are covered here](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/.github/CONTRIBUTING.md).
-
## Requirements
-| Name | Version |
-| ------------------------------------------------------------------------ | --------- |
-| [terraform](#requirement_terraform) | >= 0.13.1 |
-| [aws](#requirement_aws) | >= 3.56.0 |
-| [tls](#requirement_tls) | >= 2.2.0 |
+| Name | Version |
+|------|---------|
+| [terraform](#requirement\_terraform) | >= 0.13.1 |
+| [aws](#requirement\_aws) | >= 3.56.0 |
+| [tls](#requirement\_tls) | >= 2.2.0 |
## Providers
-| Name | Version |
-| ------------------------------------------------ | --------- |
-| [aws](#provider_aws) | >= 3.56.0 |
-| [tls](#provider_tls) | >= 2.2.0 |
+| Name | Version |
+|------|---------|
+| [aws](#provider\_aws) | >= 3.56.0 |
+| [tls](#provider\_tls) | >= 2.2.0 |
## Modules
-| Name | Source | Version |
-| -------------------------------------------------------------------------------------------------------- | --------------------------------- | ------- |
-| [eks_managed_node_group](#module_eks_managed_node_group) | ./modules/eks-managed-node-group | n/a |
-| [fargate_profile](#module_fargate_profile) | ./modules/fargate-profile | n/a |
-| [self_managed_node_group](#module_self_managed_node_group) | ./modules/self-managed-node-group | n/a |
+| Name | Source | Version |
+|------|--------|---------|
+| [eks\_managed\_node\_group](#module\_eks\_managed\_node\_group) | ./modules/eks-managed-node-group | n/a |
+| [fargate\_profile](#module\_fargate\_profile) | ./modules/fargate-profile | n/a |
+| [self\_managed\_node\_group](#module\_self\_managed\_node\_group) | ./modules/self-managed-node-group | n/a |
## Resources
-| Name | Type |
-| -------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------- |
-| [aws_cloudwatch_log_group.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_log_group) | resource |
-| [aws_eks_cluster.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_cluster) | resource |
-| [aws_iam_openid_connect_provider.oidc_provider](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_openid_connect_provider) | resource |
-| [aws_iam_policy.cluster_additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
-| [aws_iam_role.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
-| [aws_security_group.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
-| [aws_security_group_rule.cluster_egress_internet](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.cluster_private_access_cidrs_source](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.cluster_private_access_sg_source](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_iam_policy_document.cluster_additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
-| [aws_iam_policy_document.cluster_assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
-| [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source |
-| [tls_certificate.this](https://registry.terraform.io/providers/hashicorp/tls/latest/docs/data-sources/certificate) | data source |
+| Name | Type |
+|------|------|
+| [aws_cloudwatch_log_group.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_log_group) | resource |
+| [aws_eks_cluster.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_cluster) | resource |
+| [aws_iam_openid_connect_provider.oidc_provider](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_openid_connect_provider) | resource |
+| [aws_iam_policy.cluster_additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
+| [aws_iam_role.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
+| [aws_security_group.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
+| [aws_security_group_rule.cluster_egress_internet](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
+| [aws_security_group_rule.cluster_private_access_cidrs_source](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
+| [aws_security_group_rule.cluster_private_access_sg_source](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
+| [aws_iam_policy_document.cluster_additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
+| [aws_iam_policy_document.cluster_assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
+| [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source |
+| [tls_certificate.this](https://registry.terraform.io/providers/hashicorp/tls/latest/docs/data-sources/certificate) | data source |
## Inputs
-| Name | Description | Type | Default | Required |
-| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------- | --------------------------------- | :------: |
-| [cluster_create_endpoint_private_access_sg_rule](#input_cluster_create_endpoint_private_access_sg_rule) | Whether to create security group rules for the access to the Amazon EKS private API server endpoint. If `true`, `cluster_endpoint_private_access_cidrs` and/or 'cluster_endpoint_private_access_sg' should be provided | `bool` | `false` | no |
-| [cluster_egress_cidrs](#input_cluster_egress_cidrs) | List of CIDR blocks that are permitted for cluster egress traffic | `list(string)` |
[ "0.0.0.0/0" ]
| no |
-| [cluster_enabled_log_types](#input_cluster_enabled_log_types) | A list of the desired control plane logging to enable. For more information, see Amazon EKS Control Plane Logging documentation (https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html) | `list(string)` | `[]` | no |
-| [cluster_encryption_config](#input_cluster_encryption_config) | Configuration block with encryption configuration for the cluster. See examples/secrets_encryption/main.tf for example format |
| `[]` | no |
-| [cluster_endpoint_private_access](#input_cluster_endpoint_private_access) | Indicates whether or not the Amazon EKS private API server endpoint is enabled | `bool` | `false` | no |
-| [cluster_endpoint_private_access_cidrs](#input_cluster_endpoint_private_access_cidrs) | List of CIDR blocks which can access the Amazon EKS private API server endpoint. `cluster_endpoint_private_access` and `cluster_create_endpoint_private_access_sg_rule` must be set to `true` | `list(string)` | `[]` | no |
-| [cluster_endpoint_private_access_sg](#input_cluster_endpoint_private_access_sg) | List of security group IDs which can access the Amazon EKS private API server endpoint. `cluster_endpoint_private_access` and `cluster_create_endpoint_private_access_sg_rule` must be set to `true` | `list(string)` | `[]` | no |
-| [cluster_endpoint_public_access](#input_cluster_endpoint_public_access) | Indicates whether or not the Amazon EKS public API server endpoint is enabled. When it's set to `false` ensure to have a proper private access with `cluster_endpoint_private_access = true` | `bool` | `true` | no |
-| [cluster_endpoint_public_access_cidrs](#input_cluster_endpoint_public_access_cidrs) | List of CIDR blocks which can access the Amazon EKS public API server endpoint | `list(string)` |
[ "0.0.0.0/0" ]
| no |
-| [cluster_iam_role_arn](#input_cluster_iam_role_arn) | Existing IAM role ARN for the cluster. Required if `create_cluster_iam_role` is set to `false` | `string` | `null` | no |
-| [cluster_iam_role_name](#input_cluster_iam_role_name) | Name to use on cluster role created | `string` | `null` | no |
-| [cluster_iam_role_path](#input_cluster_iam_role_path) | Cluster IAM role path | `string` | `null` | no |
-| [cluster_iam_role_permissions_boundary](#input_cluster_iam_role_permissions_boundary) | ARN of the policy that is used to set the permissions boundary for the cluster role | `string` | `null` | no |
-| [cluster_iam_role_tags](#input_cluster_iam_role_tags) | A map of additional tags to add to the cluster IAM role created | `map(string)` | `{}` | no |
-| [cluster_iam_role_use_name_prefix](#input_cluster_iam_role_use_name_prefix) | Determines whether cluster IAM role name (`cluster_iam_role_name`) is used as a prefix | `string` | `true` | no |
-| [cluster_log_kms_key_id](#input_cluster_log_kms_key_id) | If a KMS Key ARN is set, this key will be used to encrypt the corresponding log group. Please be sure that the KMS Key has an appropriate key policy (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/encrypt-log-data-kms.html) | `string` | `""` | no |
-| [cluster_log_retention_in_days](#input_cluster_log_retention_in_days) | Number of days to retain log events. Default retention - 90 days | `number` | `90` | no |
-| [cluster_name](#input_cluster_name) | Name of the EKS cluster and default name (prefix) used throughout the resources created | `string` | `""` | no |
-| [cluster_security_group_id](#input_cluster_security_group_id) | If provided, the EKS cluster will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the workers | `string` | `""` | no |
-| [cluster_security_group_name](#input_cluster_security_group_name) | Name to use on cluster role created | `string` | `null` | no |
-| [cluster_security_group_tags](#input_cluster_security_group_tags) | A map of additional tags to add to the cluster security group created | `map(string)` | `{}` | no |
-| [cluster_security_group_use_name_prefix](#input_cluster_security_group_use_name_prefix) | Determines whether cluster IAM role name (`cluster_iam_role_name`) is used as a prefix | `string` | `true` | no |
-| [cluster_service_ipv4_cidr](#input_cluster_service_ipv4_cidr) | service ipv4 cidr for the kubernetes cluster | `string` | `null` | no |
-| [cluster_tags](#input_cluster_tags) | A map of additional tags to add to the cluster | `map(string)` | `{}` | no |
-| [cluster_timeouts](#input_cluster_timeouts) | Create, update, and delete timeout configurations for the cluster | `map(string)` | `{}` | no |
-| [cluster_version](#input_cluster_version) | Kubernetes minor version to use for the EKS cluster (for example 1.21) | `string` | `null` | no |
-| [create](#input_create) | Controls if EKS resources should be created (it affects almost all resources) | `bool` | `true` | no |
-| [create_cluster_iam_role](#input_create_cluster_iam_role) | Determines whether a cluster IAM role is created or to use an existing IAM role | `bool` | `true` | no |
-| [create_cluster_security_group](#input_create_cluster_security_group) | Whether to create a security group for the cluster or attach the cluster to `cluster_security_group_id` | `bool` | `true` | no |
-| [eks_managed_node_groups](#input_eks_managed_node_groups) | Map of EKS managed node group definitions to create | `any` | `{}` | no |
-| [enable_irsa](#input_enable_irsa) | Whether to create OpenID Connect Provider for EKS to enable IRSA | `bool` | `false` | no |
-| [fargate_profiles](#input_fargate_profiles) | Map of Fargate Profile definitions to create | `any` | `{}` | no |
-| [openid_connect_audiences](#input_openid_connect_audiences) | List of OpenID Connect audience client IDs to add to the IRSA provider | `list(string)` | `[]` | no |
-| [self_managed_node_groups](#input_self_managed_node_groups) | Map of self-managed node group definitions to create | `any` | `{}` | no |
-| [subnet_ids](#input_subnet_ids) | A list of subnet IDs to place the EKS cluster and workers within | `list(string)` | `[]` | no |
-| [tags](#input_tags) | A map of tags to add to all resources. Tags added to launch configuration or templates override these values for ASG Tags only | `map(string)` | `{}` | no |
-| [vpc_id](#input_vpc_id) | ID of the VPC where the cluster and workers will be provisioned | `string` | `null` | no |
+| Name | Description | Type | Default | Required |
+|------|-------------|------|---------|:--------:|
+| [cluster\_create\_endpoint\_private\_access\_sg\_rule](#input\_cluster\_create\_endpoint\_private\_access\_sg\_rule) | Whether to create security group rules for the access to the Amazon EKS private API server endpoint. If `true`, `cluster_endpoint_private_access_cidrs` and/or 'cluster\_endpoint\_private\_access\_sg' should be provided | `bool` | `false` | no |
+| [cluster\_egress\_cidrs](#input\_cluster\_egress\_cidrs) | List of CIDR blocks that are permitted for cluster egress traffic | `list(string)` |
[ "0.0.0.0/0" ]
| no |
+| [cluster\_enabled\_log\_types](#input\_cluster\_enabled\_log\_types) | A list of the desired control plane logging to enable. For more information, see Amazon EKS Control Plane Logging documentation (https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html) | `list(string)` | `[]` | no |
+| [cluster\_encryption\_config](#input\_cluster\_encryption\_config) | Configuration block with encryption configuration for the cluster. See examples/secrets\_encryption/main.tf for example format |
| `[]` | no |
+| [cluster\_endpoint\_private\_access](#input\_cluster\_endpoint\_private\_access) | Indicates whether or not the Amazon EKS private API server endpoint is enabled | `bool` | `false` | no |
+| [cluster\_endpoint\_private\_access\_cidrs](#input\_cluster\_endpoint\_private\_access\_cidrs) | List of CIDR blocks which can access the Amazon EKS private API server endpoint. `cluster_endpoint_private_access` and `cluster_create_endpoint_private_access_sg_rule` must be set to `true` | `list(string)` | `[]` | no |
+| [cluster\_endpoint\_private\_access\_sg](#input\_cluster\_endpoint\_private\_access\_sg) | List of security group IDs which can access the Amazon EKS private API server endpoint. `cluster_endpoint_private_access` and `cluster_create_endpoint_private_access_sg_rule` must be set to `true` | `list(string)` | `[]` | no |
+| [cluster\_endpoint\_public\_access](#input\_cluster\_endpoint\_public\_access) | Indicates whether or not the Amazon EKS public API server endpoint is enabled. When it's set to `false` ensure to have a proper private access with `cluster_endpoint_private_access = true` | `bool` | `true` | no |
+| [cluster\_endpoint\_public\_access\_cidrs](#input\_cluster\_endpoint\_public\_access\_cidrs) | List of CIDR blocks which can access the Amazon EKS public API server endpoint | `list(string)` |
[ "0.0.0.0/0" ]
| no |
+| [cluster\_iam\_role\_arn](#input\_cluster\_iam\_role\_arn) | Existing IAM role ARN for the cluster. Required if `create_cluster_iam_role` is set to `false` | `string` | `null` | no |
+| [cluster\_iam\_role\_name](#input\_cluster\_iam\_role\_name) | Name to use on cluster role created | `string` | `null` | no |
+| [cluster\_iam\_role\_path](#input\_cluster\_iam\_role\_path) | Cluster IAM role path | `string` | `null` | no |
+| [cluster\_iam\_role\_permissions\_boundary](#input\_cluster\_iam\_role\_permissions\_boundary) | ARN of the policy that is used to set the permissions boundary for the cluster role | `string` | `null` | no |
+| [cluster\_iam\_role\_tags](#input\_cluster\_iam\_role\_tags) | A map of additional tags to add to the cluster IAM role created | `map(string)` | `{}` | no |
+| [cluster\_iam\_role\_use\_name\_prefix](#input\_cluster\_iam\_role\_use\_name\_prefix) | Determines whether cluster IAM role name (`cluster_iam_role_name`) is used as a prefix | `string` | `true` | no |
+| [cluster\_log\_kms\_key\_id](#input\_cluster\_log\_kms\_key\_id) | If a KMS Key ARN is set, this key will be used to encrypt the corresponding log group. Please be sure that the KMS Key has an appropriate key policy (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/encrypt-log-data-kms.html) | `string` | `""` | no |
+| [cluster\_log\_retention\_in\_days](#input\_cluster\_log\_retention\_in\_days) | Number of days to retain log events. Default retention - 90 days | `number` | `90` | no |
+| [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster and default name (prefix) used throughout the resources created | `string` | `""` | no |
+| [cluster\_security\_group\_id](#input\_cluster\_security\_group\_id) | If provided, the EKS cluster will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the workers | `string` | `""` | no |
+| [cluster\_security\_group\_name](#input\_cluster\_security\_group\_name) | Name to use on cluster role created | `string` | `null` | no |
+| [cluster\_security\_group\_tags](#input\_cluster\_security\_group\_tags) | A map of additional tags to add to the cluster security group created | `map(string)` | `{}` | no |
+| [cluster\_security\_group\_use\_name\_prefix](#input\_cluster\_security\_group\_use\_name\_prefix) | Determines whether cluster IAM role name (`cluster_iam_role_name`) is used as a prefix | `string` | `true` | no |
+| [cluster\_service\_ipv4\_cidr](#input\_cluster\_service\_ipv4\_cidr) | service ipv4 cidr for the kubernetes cluster | `string` | `null` | no |
+| [cluster\_tags](#input\_cluster\_tags) | A map of additional tags to add to the cluster | `map(string)` | `{}` | no |
+| [cluster\_timeouts](#input\_cluster\_timeouts) | Create, update, and delete timeout configurations for the cluster | `map(string)` | `{}` | no |
+| [cluster\_version](#input\_cluster\_version) | Kubernetes minor version to use for the EKS cluster (for example 1.21) | `string` | `null` | no |
+| [create](#input\_create) | Controls if EKS resources should be created (it affects almost all resources) | `bool` | `true` | no |
+| [create\_cluster\_iam\_role](#input\_create\_cluster\_iam\_role) | Determines whether a cluster IAM role is created or to use an existing IAM role | `bool` | `true` | no |
+| [create\_cluster\_security\_group](#input\_create\_cluster\_security\_group) | Whether to create a security group for the cluster or attach the cluster to `cluster_security_group_id` | `bool` | `true` | no |
+| [eks\_managed\_node\_group\_defaults](#input\_eks\_managed\_node\_group\_defaults) | Map of EKS managed node group default configurations | `any` | `{}` | no |
+| [eks\_managed\_node\_groups](#input\_eks\_managed\_node\_groups) | Map of EKS managed node group definitions to create | `any` | `{}` | no |
+| [enable\_irsa](#input\_enable\_irsa) | Whether to create OpenID Connect Provider for EKS to enable IRSA | `bool` | `false` | no |
+| [fargate\_profile\_defaults](#input\_fargate\_profile\_defaults) | Map of Fargate Profile default configurations | `any` | `{}` | no |
+| [fargate\_profiles](#input\_fargate\_profiles) | Map of Fargate Profile definitions to create | `any` | `{}` | no |
+| [openid\_connect\_audiences](#input\_openid\_connect\_audiences) | List of OpenID Connect audience client IDs to add to the IRSA provider | `list(string)` | `[]` | no |
+| [self\_managed\_node\_group\_defaults](#input\_self\_managed\_node\_group\_defaults) | Map of self-managed node group default configurations | `any` | `{}` | no |
+| [self\_managed\_node\_groups](#input\_self\_managed\_node\_groups) | Map of self-managed node group definitions to create | `any` | `{}` | no |
+| [subnet\_ids](#input\_subnet\_ids) | A list of subnet IDs to place the EKS cluster and workers within | `list(string)` | `[]` | no |
+| [tags](#input\_tags) | A map of tags to add to all resources. Tags added to launch configuration or templates override these values for ASG Tags only | `map(string)` | `{}` | no |
+| [vpc\_id](#input\_vpc\_id) | ID of the VPC where the cluster and workers will be provisioned | `string` | `null` | no |
## Outputs
-| Name | Description |
-| ----------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| [cloudwatch_log_group_arn](#output_cloudwatch_log_group_arn) | Arn of cloudwatch log group created |
-| [cloudwatch_log_group_name](#output_cloudwatch_log_group_name) | Name of cloudwatch log group created |
-| [cluster_arn](#output_cluster_arn) | The Amazon Resource Name (ARN) of the cluster |
-| [cluster_certificate_authority_data](#output_cluster_certificate_authority_data) | Base64 encoded certificate data required to communicate with the cluster |
-| [cluster_endpoint](#output_cluster_endpoint) | Endpoint for your Kubernetes API server |
-| [cluster_iam_role_arn](#output_cluster_iam_role_arn) | IAM role ARN of the EKS cluster |
-| [cluster_iam_role_name](#output_cluster_iam_role_name) | IAM role name of the EKS cluster |
-| [cluster_iam_role_unique_id](#output_cluster_iam_role_unique_id) | Stable and unique string identifying the IAM role |
-| [cluster_id](#output_cluster_id) | The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready |
-| [cluster_oidc_issuer_url](#output_cluster_oidc_issuer_url) | The URL on the EKS cluster for the OpenID Connect identity provider |
-| [cluster_platform_version](#output_cluster_platform_version) | Platform version for the cluster |
-| [cluster_security_group_arn](#output_cluster_security_group_arn) | Amazon Resource Name (ARN) of the cluster security group |
-| [cluster_security_group_id](#output_cluster_security_group_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console |
-| [cluster_status](#output_cluster_status) | Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED` |
-| [eks_managed_node_groups](#output_eks_managed_node_groups) | Map of attribute maps for all EKS managed node groups created |
-| [fargate_profiles](#output_fargate_profiles) | Map of attribute maps for all EKS Fargate Profiles created |
-| [oidc_provider_arn](#output_oidc_provider_arn) | The ARN of the OIDC Provider if `enable_irsa = true` |
-| [self_managed_node_groups](#output_self_managed_node_groups) | Map of attribute maps for all self managed node groups created |
-
+| Name | Description |
+|------|-------------|
+| [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created |
+| [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created |
+| [cluster\_arn](#output\_cluster\_arn) | The Amazon Resource Name (ARN) of the cluster |
+| [cluster\_certificate\_authority\_data](#output\_cluster\_certificate\_authority\_data) | Base64 encoded certificate data required to communicate with the cluster |
+| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for your Kubernetes API server |
+| [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster |
+| [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster |
+| [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role |
+| [cluster\_id](#output\_cluster\_id) | The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready |
+| [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider |
+| [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster |
+| [cluster\_security\_group\_arn](#output\_cluster\_security\_group\_arn) | Amazon Resource Name (ARN) of the cluster security group |
+| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console |
+| [cluster\_status](#output\_cluster\_status) | Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED` |
+| [eks\_managed\_node\_groups](#output\_eks\_managed\_node\_groups) | Map of attribute maps for all EKS managed node groups created |
+| [fargate\_profiles](#output\_fargate\_profiles) | Map of attribute maps for all EKS Fargate Profiles created |
+| [oidc\_provider\_arn](#output\_oidc\_provider\_arn) | The ARN of the OIDC Provider if `enable_irsa = true` |
+| [self\_managed\_node\_groups](#output\_self\_managed\_node\_groups) | Map of attribute maps for all self managed node groups created |
## License
diff --git a/modules/self-managed-node-group/README.md b/modules/self-managed-node-group/README.md
index e44f48f68e..e40250ea94 100644
--- a/modules/self-managed-node-group/README.md
+++ b/modules/self-managed-node-group/README.md
@@ -74,7 +74,7 @@ No modules.
| [default\_version](#input\_default\_version) | Default Version of the launch template | `string` | `null` | no |
| [delete\_timeout](#input\_delete\_timeout) | Delete timeout to wait for destroying autoscaling group | `string` | `null` | no |
| [description](#input\_description) | Description of the launch template | `string` | `null` | no |
-| [desired\_capacity](#input\_desired\_capacity) | The number of Amazon EC2 instances that should be running in the autoscaling group | `number` | `null` | no |
+| [desired\_size](#input\_desired\_size) | The number of Amazon EC2 instances that should be running in the autoscaling group | `number` | `null` | no |
| [disable\_api\_termination](#input\_disable\_api\_termination) | If true, enables EC2 instance termination protection | `bool` | `null` | no |
| [ebs\_optimized](#input\_ebs\_optimized) | If true, the launched EC2 instance will be EBS-optimized | `bool` | `null` | no |
| [elastic\_gpu\_specifications](#input\_elastic\_gpu\_specifications) | The elastic GPU to attach to the instance | `map(string)` | `null` | no |
diff --git a/modules/self-managed-node-group/main.tf b/modules/self-managed-node-group/main.tf
index 431912344d..6d8ad17e16 100644
--- a/modules/self-managed-node-group/main.tf
+++ b/modules/self-managed-node-group/main.tf
@@ -235,7 +235,7 @@ resource "aws_autoscaling_group" "this" {
min_size = var.min_size
max_size = var.max_size
- desired_capacity = var.desired_capacity
+ desired_capacity = var.desired_size
capacity_rebalance = var.capacity_rebalance
min_elb_capacity = var.min_elb_capacity
wait_for_elb_capacity = var.wait_for_elb_capacity
@@ -382,7 +382,7 @@ resource "aws_autoscaling_schedule" "this" {
min_size = lookup(each.value, "min_size", null)
max_size = lookup(each.value, "max_size", null)
- desired_capacity = lookup(each.value, "desired_capacity", null)
+ desired_capacity = lookup(each.value, "desired_size", null)
start_time = lookup(each.value, "start_time", null)
end_time = lookup(each.value, "end_time", null)
time_zone = lookup(each.value, "time_zone", null)
diff --git a/modules/self-managed-node-group/variables.tf b/modules/self-managed-node-group/variables.tf
index 94777de1c4..9599d28627 100644
--- a/modules/self-managed-node-group/variables.tf
+++ b/modules/self-managed-node-group/variables.tf
@@ -55,7 +55,7 @@ variable "max_size" {
default = null
}
-variable "desired_capacity" {
+variable "desired_size" {
description = "The number of Amazon EC2 instances that should be running in the autoscaling group"
type = number
default = null
diff --git a/node_groups.tf b/node_groups.tf
index 2ef75569cf..ad8de2a5e6 100644
--- a/node_groups.tf
+++ b/node_groups.tf
@@ -1,5 +1,5 @@
################################################################################
-# Fargate
+# Fargate Profile
################################################################################
module "fargate_profile" {
@@ -9,22 +9,22 @@ module "fargate_profile" {
# Fargate Profile
cluster_name = aws_eks_cluster.this[0].name
- fargate_profile_name = try(each.value.fargate_profile_name, each.key, true)
- subnet_ids = try(each.value.subnet_ids, var.subnet_ids)
- selectors = try(each.value.selectors, {})
- timeouts = try(each.value.timeouts, {})
+ fargate_profile_name = try(each.value.fargate_profile_name, each.key)
+ subnet_ids = try(each.value.subnet_ids, var.fargate_profile_defaults.subnet_ids, var.subnet_ids)
+ selectors = try(each.value.selectors, var.fargate_profile_defaults.selectors, {})
+ timeouts = try(each.value.timeouts, var.fargate_profile_defaults.timeouts, {})
# IAM role
- create_iam_role = try(each.value.create_iam_role, true)
- iam_role_arn = try(each.value.iam_role_arn, null)
- iam_role_name = try(each.value.iam_role_name, null)
- iam_role_use_name_prefix = try(each.value.iam_role_use_name_prefix, true)
- iam_role_path = try(each.value.iam_role_path, null)
- iam_role_permissions_boundary = try(each.value.iam_role_permissions_boundary, null)
- iam_role_tags = try(each.value.iam_role_tags, {})
- iam_role_additional_policies = try(each.value.iam_role_additional_policies, [])
-
- tags = merge(var.tags, try(each.value.tags, {}))
+ create_iam_role = try(each.value.create_iam_role, var.fargate_profile_defaults.create_iam_role, true)
+ iam_role_arn = try(each.value.iam_role_arn, var.fargate_profile_defaults.iam_role_arn, null)
+ iam_role_name = try(each.value.iam_role_name, var.fargate_profile_defaults.iam_role_name, null)
+ iam_role_use_name_prefix = try(each.value.iam_role_use_name_prefix, var.fargate_profile_defaults.iam_role_use_name_prefix, true)
+ iam_role_path = try(each.value.iam_role_path, var.fargate_profile_defaults.iam_role_path, null)
+ iam_role_permissions_boundary = try(each.value.iam_role_permissions_boundary, var.fargate_profile_defaults.iam_role_permissions_boundary, null)
+ iam_role_tags = try(each.value.iam_role_tags, var.fargate_profile_defaults.iam_role_tags, {})
+ iam_role_additional_policies = try(each.value.iam_role_additional_policies, var.fargate_profile_defaults.iam_role_additional_policies, [])
+
+ tags = merge(var.tags, try(each.value.tags, var.fargate_profile_defaults.tags, {}))
}
################################################################################
@@ -40,85 +40,85 @@ module "eks_managed_node_group" {
# EKS Managed Node Group
name = try(each.value.name, each.key)
- use_name_prefix = try(each.value.use_name_prefix, false)
+ use_name_prefix = try(each.value.use_name_prefix, var.eks_managed_node_group_defaults.use_name_prefix, false)
- subnet_ids = try(each.value.subnet_ids, var.subnet_ids)
+ subnet_ids = try(each.value.subnet_ids, var.eks_managed_node_group_defaults.subnet_ids, var.subnet_ids)
- min_size = try(each.value.min_size, 1)
- max_size = try(each.value.max_size, 3)
- desired_size = try(each.value.desired_size, 1)
+ min_size = try(each.value.min_size, var.eks_managed_node_group_defaults.min_size, 1)
+ max_size = try(each.value.max_size, var.eks_managed_node_group_defaults.max_size, 3)
+ desired_size = try(each.value.desired_size, var.eks_managed_node_group_defaults.desired_size, 1)
- ami_id = try(each.value.ami_id, null)
- ami_type = try(each.value.ami_type, null)
- ami_release_version = try(each.value.ami_release_version, null)
+ ami_id = try(each.value.ami_id, var.eks_managed_node_group_defaults.ami_id, null)
+ ami_type = try(each.value.ami_type, var.eks_managed_node_group_defaults.ami_type, null)
+ ami_release_version = try(each.value.ami_release_version, var.eks_managed_node_group_defaults.ami_release_version, null)
- capacity_type = try(each.value.capacity_type, null)
- disk_size = try(each.value.disk_size, null)
- force_update_version = try(each.value.force_update_version, null)
- instance_types = try(each.value.instance_types, null)
- labels = try(each.value.labels, null)
- cluster_version = try(each.value.cluster_version, var.cluster_version)
+ capacity_type = try(each.value.capacity_type, var.eks_managed_node_group_defaults.capacity_type, null)
+ disk_size = try(each.value.disk_size, var.eks_managed_node_group_defaults.disk_size, null)
+ force_update_version = try(each.value.force_update_version, var.eks_managed_node_group_defaults.force_update_version, null)
+ instance_types = try(each.value.instance_types, var.eks_managed_node_group_defaults.instance_types, null)
+ labels = try(each.value.labels, var.eks_managed_node_group_defaults.labels, null)
+ cluster_version = try(each.value.cluster_version, var.eks_managed_node_group_defaults.cluster_version, var.cluster_version)
- remote_access = try(each.value.remote_access, {})
- taints = try(each.value.taints, {})
- update_config = try(each.value.update_config, {})
- timeouts = try(each.value.timeouts, {})
+ remote_access = try(each.value.remote_access, var.eks_managed_node_group_defaults.remote_access, {})
+ taints = try(each.value.taints, var.eks_managed_node_group_defaults.taints, {})
+ update_config = try(each.value.update_config, var.eks_managed_node_group_defaults.update_config, {})
+ timeouts = try(each.value.timeouts, var.eks_managed_node_group_defaults.timeouts, {})
# User data
- custom_user_data = try(each.value.custom_user_data, null)
- custom_ami_is_eks_optimized = try(each.value.custom_ami_is_eks_optimized, true)
- cluster_endpoint = try(aws_eks_cluster.this[0].endpoint, null)
- cluster_auth_base64 = try(aws_eks_cluster.this[0].certificate_authority[0].data, null)
- cluster_dns_ip = try(aws_eks_cluster.this[0].kubernetes_network_config[0].service_ipv4_cidr, "")
- pre_bootstrap_user_data = try(each.value.pre_bootstrap_user_data, "")
- post_bootstrap_user_data = try(each.value.post_bootstrap_user_data, "")
- bootstrap_extra_args = try(each.value.bootstrap_extra_args, "")
+ custom_user_data = try(each.value.custom_user_data, var.eks_managed_node_group_defaults.custom_user_data, null)
+ custom_ami_is_eks_optimized = try(each.value.custom_ami_is_eks_optimized, var.eks_managed_node_group_defaults.custom_ami_is_eks_optimized, true)
+ cluster_endpoint = try(aws_eks_cluster.this[0].endpoint, var.eks_managed_node_group_defaults.cluster_endpoint, null)
+ cluster_auth_base64 = try(aws_eks_cluster.this[0].certificate_authority[0].data, var.eks_managed_node_group_defaults.cluster_auth_base64, null)
+ cluster_dns_ip = try(aws_eks_cluster.this[0].kubernetes_network_config[0].service_ipv4_cidr, var.eks_managed_node_group_defaults.cluster_dns_ip, "")
+ pre_bootstrap_user_data = try(each.value.pre_bootstrap_user_data, var.eks_managed_node_group_defaults.pre_bootstrap_user_data, "")
+ post_bootstrap_user_data = try(each.value.post_bootstrap_user_data, var.eks_managed_node_group_defaults.post_bootstrap_user_data, "")
+ bootstrap_extra_args = try(each.value.bootstrap_extra_args, var.eks_managed_node_group_defaults.bootstrap_extra_args, "")
# Launch Template
- create_launch_template = try(each.value.create_launch_template, false)
- launch_template_name = try(each.value.launch_template_name, null)
- launch_template_use_name_prefix = try(each.value.launch_template_use_name_prefix, true)
- launch_template_version = try(each.value.launch_template_version, null)
- description = try(each.value.description, null)
-
- ebs_optimized = try(each.value.ebs_optimized, null)
- key_name = try(each.value.key_name, null)
-
- vpc_security_group_ids = try(each.value.vpc_security_group_ids, [])
-
- default_version = try(each.value.default_version, null)
- update_default_version = try(each.value.update_default_version, null)
- disable_api_termination = try(each.value.disable_api_termination, null)
- instance_initiated_shutdown_behavior = try(each.value.instance_initiated_shutdown_behavior, null)
- kernel_id = try(each.value.kernel_id, null)
- ram_disk_id = try(each.value.ram_disk_id, null)
-
- block_device_mappings = try(each.value.block_device_mappings, [])
- capacity_reservation_specification = try(each.value.capacity_reservation_specification, null)
- cpu_options = try(each.value.cpu_options, null)
- credit_specification = try(each.value.credit_specification, null)
- elastic_gpu_specifications = try(each.value.elastic_gpu_specifications, null)
- elastic_inference_accelerator = try(each.value.elastic_inference_accelerator, null)
- enclave_options = try(each.value.enclave_options, null)
- hibernation_options = try(each.value.hibernation_options, null)
- instance_market_options = try(each.value.instance_market_options, null)
- license_specifications = try(each.value.license_specifications, null)
- metadata_options = try(each.value.metadata_options, null)
- enable_monitoring = try(each.value.enable_monitoring, null)
- network_interfaces = try(each.value.network_interfaces, [])
- placement = try(each.value.placement, null)
+ create_launch_template = try(each.value.create_launch_template, var.eks_managed_node_group_defaults.create_launch_template, false)
+ launch_template_name = try(each.value.launch_template_name, var.eks_managed_node_group_defaults.launch_template_name, null)
+ launch_template_use_name_prefix = try(each.value.launch_template_use_name_prefix, var.eks_managed_node_group_defaults.launch_template_use_name_prefix, true)
+ launch_template_version = try(each.value.launch_template_version, var.eks_managed_node_group_defaults.launch_template_version, null)
+ description = try(each.value.description, var.eks_managed_node_group_defaults.description, null)
+
+ ebs_optimized = try(each.value.ebs_optimized, var.eks_managed_node_group_defaults.ebs_optimized, null)
+ key_name = try(each.value.key_name, var.eks_managed_node_group_defaults.key_name, null)
+
+ vpc_security_group_ids = try(each.value.vpc_security_group_ids, var.eks_managed_node_group_defaults.vpc_security_group_ids, [])
+
+ default_version = try(each.value.default_version, var.eks_managed_node_group_defaults.default_version, null)
+ update_default_version = try(each.value.update_default_version, var.eks_managed_node_group_defaults.update_default_version, null)
+ disable_api_termination = try(each.value.disable_api_termination, var.eks_managed_node_group_defaults.disable_api_termination, null)
+ instance_initiated_shutdown_behavior = try(each.value.instance_initiated_shutdown_behavior, var.eks_managed_node_group_defaults.instance_initiated_shutdown_behavior, null)
+ kernel_id = try(each.value.kernel_id, var.eks_managed_node_group_defaults.kernel_id, null)
+ ram_disk_id = try(each.value.ram_disk_id, var.eks_managed_node_group_defaults.ram_disk_id, null)
+
+ block_device_mappings = try(each.value.block_device_mappings, var.eks_managed_node_group_defaults.block_device_mappings, [])
+ capacity_reservation_specification = try(each.value.capacity_reservation_specification, var.eks_managed_node_group_defaults.capacity_reservation_specification, null)
+ cpu_options = try(each.value.cpu_options, var.eks_managed_node_group_defaults.cpu_options, null)
+ credit_specification = try(each.value.credit_specification, var.eks_managed_node_group_defaults.credit_specification, null)
+ elastic_gpu_specifications = try(each.value.elastic_gpu_specifications, var.eks_managed_node_group_defaults.elastic_gpu_specifications, null)
+ elastic_inference_accelerator = try(each.value.elastic_inference_accelerator, var.eks_managed_node_group_defaults.elastic_inference_accelerator, null)
+ enclave_options = try(each.value.enclave_options, var.eks_managed_node_group_defaults.enclave_options, null)
+ hibernation_options = try(each.value.hibernation_options, var.eks_managed_node_group_defaults.hibernation_options, null)
+ instance_market_options = try(each.value.instance_market_options, var.eks_managed_node_group_defaults.instance_market_options, null)
+ license_specifications = try(each.value.license_specifications, var.eks_managed_node_group_defaults.license_specifications, null)
+ metadata_options = try(each.value.metadata_options, var.eks_managed_node_group_defaults.metadata_options, null)
+ enable_monitoring = try(each.value.enable_monitoring, var.eks_managed_node_group_defaults.enable_monitoring, null)
+ network_interfaces = try(each.value.network_interfaces, var.eks_managed_node_group_defaults.network_interfaces, [])
+ placement = try(each.value.placement, var.eks_managed_node_group_defaults.placement, null)
# IAM role
- create_iam_role = try(each.value.create_iam_role, true)
- iam_role_arn = try(each.value.iam_role_arn, null)
- iam_role_name = try(each.value.iam_role_name, null)
- iam_role_use_name_prefix = try(each.value.iam_role_use_name_prefix, true)
- iam_role_path = try(each.value.iam_role_path, null)
- iam_role_permissions_boundary = try(each.value.iam_role_permissions_boundary, null)
- iam_role_tags = try(each.value.iam_role_tags, {})
- iam_role_additional_policies = try(each.value.iam_role_additional_policies, [])
-
- tags = merge(var.tags, try(each.value.tags, {}))
+ create_iam_role = try(each.value.create_iam_role, var.eks_managed_node_group_defaults.create_iam_role, true)
+ iam_role_arn = try(each.value.iam_role_arn, var.eks_managed_node_group_defaults.iam_role_arn, null)
+ iam_role_name = try(each.value.iam_role_name, var.eks_managed_node_group_defaults.iam_role_name, null)
+ iam_role_use_name_prefix = try(each.value.iam_role_use_name_prefix, var.eks_managed_node_group_defaults.iam_role_use_name_prefix, true)
+ iam_role_path = try(each.value.iam_role_path, var.eks_managed_node_group_defaults.iam_role_path, null)
+ iam_role_permissions_boundary = try(each.value.iam_role_permissions_boundary, var.eks_managed_node_group_defaults.iam_role_permissions_boundary, null)
+ iam_role_tags = try(each.value.iam_role_tags, var.eks_managed_node_group_defaults.iam_role_tags, {})
+ iam_role_additional_policies = try(each.value.iam_role_additional_policies, var.eks_managed_node_group_defaults.iam_role_additional_policies, [])
+
+ tags = merge(var.tags, try(each.value.tags, var.eks_managed_node_group_defaults.tags, {}))
}
################################################################################
@@ -134,95 +134,95 @@ module "self_managed_node_group" {
# Autoscaling Group
name = try(each.value.name, each.key)
- use_name_prefix = try(each.value.use_name_prefix, false)
+ use_name_prefix = try(each.value.use_name_prefix, var.self_managed_node_group_defaults.use_name_prefix, false)
launch_template_name = try(each.value.launch_template_name, each.key)
- launch_template_version = try(each.value.launch_template_version, null)
- availability_zones = try(each.value.availability_zones, null)
- subnet_ids = try(each.value.subnet_ids, var.subnet_ids)
-
- min_size = try(each.value.min_size, 0)
- max_size = try(each.value.max_size, 0)
- desired_capacity = try(each.value.desired_size, 0) # to be consisted with EKS MNG
- capacity_rebalance = try(each.value.capacity_rebalance, null)
- min_elb_capacity = try(each.value.min_elb_capacity, null)
- wait_for_elb_capacity = try(each.value.wait_for_elb_capacity, null)
- wait_for_capacity_timeout = try(each.value.wait_for_capacity_timeout, null)
- default_cooldown = try(each.value.default_cooldown, null)
- protect_from_scale_in = try(each.value.protect_from_scale_in, null)
-
- target_group_arns = try(each.value.target_group_arns, null)
- placement_group = try(each.value.placement_group, null)
- health_check_type = try(each.value.health_check_type, null)
- health_check_grace_period = try(each.value.health_check_grace_period, null)
-
- force_delete = try(each.value.force_delete, null)
- termination_policies = try(each.value.termination_policies, null)
- suspended_processes = try(each.value.suspended_processes, null)
- max_instance_lifetime = try(each.value.max_instance_lifetime, null)
-
- enabled_metrics = try(each.value.enabled_metrics, null)
- metrics_granularity = try(each.value.metrics_granularity, null)
- service_linked_role_arn = try(each.value.service_linked_role_arn, null)
-
- initial_lifecycle_hooks = try(each.value.initial_lifecycle_hooks, [])
- instance_refresh = try(each.value.instance_refresh, null)
- use_mixed_instances_policy = try(each.value.use_mixed_instances_policy, false)
- warm_pool = try(each.value.warm_pool, null)
-
- create_schedule = try(each.value.create_schedule, false)
- schedules = try(each.value.schedules, null)
-
- delete_timeout = try(each.value.delete_timeout, null)
+ launch_template_version = try(each.value.launch_template_version, var.self_managed_node_group_defaults.launch_template_version, null)
+ availability_zones = try(each.value.availability_zones, var.self_managed_node_group_defaults.availability_zones, null)
+ subnet_ids = try(each.value.subnet_ids, var.self_managed_node_group_defaults.subnet_ids, var.subnet_ids)
+
+ min_size = try(each.value.min_size, var.self_managed_node_group_defaults.min_size, 1)
+ max_size = try(each.value.max_size, var.self_managed_node_group_defaults.max_size, 3)
+ desired_size = try(each.value.desired_size, var.self_managed_node_group_defaults.desired_size, 1)
+ capacity_rebalance = try(each.value.capacity_rebalance, var.self_managed_node_group_defaults.capacity_rebalance, null)
+ min_elb_capacity = try(each.value.min_elb_capacity, var.self_managed_node_group_defaults.min_elb_capacity, null)
+ wait_for_elb_capacity = try(each.value.wait_for_elb_capacity, var.self_managed_node_group_defaults.wait_for_elb_capacity, null)
+ wait_for_capacity_timeout = try(each.value.wait_for_capacity_timeout, var.self_managed_node_group_defaults.wait_for_capacity_timeout, null)
+ default_cooldown = try(each.value.default_cooldown, var.self_managed_node_group_defaults.default_cooldown, null)
+ protect_from_scale_in = try(each.value.protect_from_scale_in, var.self_managed_node_group_defaults.protect_from_scale_in, null)
+
+ target_group_arns = try(each.value.target_group_arns, var.self_managed_node_group_defaults.target_group_arns, null)
+ placement_group = try(each.value.placement_group, var.self_managed_node_group_defaults.placement_group, null)
+ health_check_type = try(each.value.health_check_type, var.self_managed_node_group_defaults.health_check_type, null)
+ health_check_grace_period = try(each.value.health_check_grace_period, var.self_managed_node_group_defaults.health_check_grace_period, null)
+
+ force_delete = try(each.value.force_delete, var.self_managed_node_group_defaults.force_delete, null)
+ termination_policies = try(each.value.termination_policies, var.self_managed_node_group_defaults.termination_policies, null)
+ suspended_processes = try(each.value.suspended_processes, var.self_managed_node_group_defaults.suspended_processes, null)
+ max_instance_lifetime = try(each.value.max_instance_lifetime, var.self_managed_node_group_defaults.max_instance_lifetime, null)
+
+ enabled_metrics = try(each.value.enabled_metrics, var.self_managed_node_group_defaults.enabled_metrics, null)
+ metrics_granularity = try(each.value.metrics_granularity, var.self_managed_node_group_defaults.metrics_granularity, null)
+ service_linked_role_arn = try(each.value.service_linked_role_arn, var.self_managed_node_group_defaults.service_linked_role_arn, null)
+
+ initial_lifecycle_hooks = try(each.value.initial_lifecycle_hooks, var.self_managed_node_group_defaults.initial_lifecycle_hooks, [])
+ instance_refresh = try(each.value.instance_refresh, var.self_managed_node_group_defaults.instance_refresh, null)
+ use_mixed_instances_policy = try(each.value.use_mixed_instances_policy, var.self_managed_node_group_defaults.use_mixed_instances_policy, false)
+ warm_pool = try(each.value.warm_pool, var.self_managed_node_group_defaults.warm_pool, null)
+
+ create_schedule = try(each.value.create_schedule, var.self_managed_node_group_defaults.create_schedule, false)
+ schedules = try(each.value.schedules, var.self_managed_node_group_defaults.schedules, null)
+
+ delete_timeout = try(each.value.delete_timeout, var.self_managed_node_group_defaults.delete_timeout, null)
# Launch Template
- create_launch_template = try(each.value.create_launch_template, true)
- description = try(each.value.description, null)
-
- ebs_optimized = try(each.value.ebs_optimized, null)
- image_id = try(each.value.image_id, null)
- cluster_version = try(each.value.cluster_version, var.cluster_version)
- instance_type = try(each.value.instance_type, "m6i.large")
- key_name = try(each.value.key_name, null)
- user_data = try(each.value.user_data, null)
-
- vpc_security_group_ids = try(each.value.vpc_security_group_ids, [])
-
- default_version = try(each.value.default_version, null)
- update_default_version = try(each.value.update_default_version, null)
- disable_api_termination = try(each.value.disable_api_termination, null)
- instance_initiated_shutdown_behavior = try(each.value.instance_initiated_shutdown_behavior, null)
- kernel_id = try(each.value.kernel_id, null)
- ram_disk_id = try(each.value.ram_disk_id, null)
-
- block_device_mappings = try(each.value.block_device_mappings, [])
- capacity_reservation_specification = try(each.value.capacity_reservation_specification, null)
- cpu_options = try(each.value.cpu_options, null)
- credit_specification = try(each.value.credit_specification, null)
- elastic_gpu_specifications = try(each.value.elastic_gpu_specifications, null)
- elastic_inference_accelerator = try(each.value.elastic_inference_accelerator, null)
- enclave_options = try(each.value.enclave_options, null)
- hibernation_options = try(each.value.hibernation_options, null)
- instance_market_options = try(each.value.instance_market_options, null)
- license_specifications = try(each.value.license_specifications, null)
- metadata_options = try(each.value.metadata_options, null)
- enable_monitoring = try(each.value.enable_monitoring, null)
- network_interfaces = try(each.value.network_interfaces, [])
- placement = try(each.value.placement, null)
+ create_launch_template = try(each.value.create_launch_template, var.self_managed_node_group_defaults.create_launch_template, true)
+ description = try(each.value.description, var.self_managed_node_group_defaults.description, null)
+
+ ebs_optimized = try(each.value.ebs_optimized, var.self_managed_node_group_defaults.ebs_optimized, null)
+ image_id = try(each.value.image_id, var.self_managed_node_group_defaults.image_id, null)
+ cluster_version = try(each.value.cluster_version, var.self_managed_node_group_defaults.cluster_version, var.cluster_version)
+ instance_type = try(each.value.instance_type, var.self_managed_node_group_defaults.instance_type, "m6i.large")
+ key_name = try(each.value.key_name, var.self_managed_node_group_defaults.key_name, null)
+ user_data = try(each.value.user_data, var.self_managed_node_group_defaults.user_data, null)
+
+ vpc_security_group_ids = try(each.value.vpc_security_group_ids, var.self_managed_node_group_defaults.vpc_security_group_ids, [])
+
+ default_version = try(each.value.default_version, var.self_managed_node_group_defaults.default_version, null)
+ update_default_version = try(each.value.update_default_version, var.self_managed_node_group_defaults.update_default_version, null)
+ disable_api_termination = try(each.value.disable_api_termination, var.self_managed_node_group_defaults.disable_api_termination, null)
+ instance_initiated_shutdown_behavior = try(each.value.instance_initiated_shutdown_behavior, var.self_managed_node_group_defaults.instance_initiated_shutdown_behavior, null)
+ kernel_id = try(each.value.kernel_id, var.self_managed_node_group_defaults.kernel_id, null)
+ ram_disk_id = try(each.value.ram_disk_id, var.self_managed_node_group_defaults.ram_disk_id, null)
+
+ block_device_mappings = try(each.value.block_device_mappings, var.self_managed_node_group_defaults.block_device_mappings, [])
+ capacity_reservation_specification = try(each.value.capacity_reservation_specification, var.self_managed_node_group_defaults.capacity_reservation_specification, null)
+ cpu_options = try(each.value.cpu_options, var.self_managed_node_group_defaults.cpu_options, null)
+ credit_specification = try(each.value.credit_specification, var.self_managed_node_group_defaults.credit_specification, null)
+ elastic_gpu_specifications = try(each.value.elastic_gpu_specifications, var.self_managed_node_group_defaults.elastic_gpu_specifications, null)
+ elastic_inference_accelerator = try(each.value.elastic_inference_accelerator, var.self_managed_node_group_defaults.elastic_inference_accelerator, null)
+ enclave_options = try(each.value.enclave_options, var.self_managed_node_group_defaults.enclave_options, null)
+ hibernation_options = try(each.value.hibernation_options, var.self_managed_node_group_defaults.hibernation_options, null)
+ instance_market_options = try(each.value.instance_market_options, var.self_managed_node_group_defaults.instance_market_options, null)
+ license_specifications = try(each.value.license_specifications, var.self_managed_node_group_defaults.license_specifications, null)
+ metadata_options = try(each.value.metadata_options, var.self_managed_node_group_defaults.metadata_options, null)
+ enable_monitoring = try(each.value.enable_monitoring, var.self_managed_node_group_defaults.enable_monitoring, null)
+ network_interfaces = try(each.value.network_interfaces, var.self_managed_node_group_defaults.network_interfaces, [])
+ placement = try(each.value.placement, var.self_managed_node_group_defaults.placement, null)
# IAM role
- create_iam_instance_profile = try(each.value.create_iam_instance_profile, true)
- iam_instance_profile_arn = try(each.value.iam_instance_profile_arn, null)
- iam_role_name = try(each.value.iam_role_name, null)
- iam_role_use_name_prefix = try(each.value.iam_role_use_name_prefix, true)
- iam_role_path = try(each.value.iam_role_path, null)
- iam_role_permissions_boundary = try(each.value.iam_role_permissions_boundary, null)
- iam_role_tags = try(each.value.iam_role_tags, {})
- iam_role_attach_cni_policy = try(each.value.iam_role_attach_cni_policy, true)
- iam_role_additional_policies = try(each.value.iam_role_additional_policies, [])
-
- tags = merge(var.tags, try(each.value.tags, {}))
- propagate_tags = try(each.value.propagate_tags, [])
+ create_iam_instance_profile = try(each.value.create_iam_instance_profile, var.self_managed_node_group_defaults.create_iam_instance_profile, true)
+ iam_instance_profile_arn = try(each.value.iam_instance_profile_arn, var.self_managed_node_group_defaults.iam_instance_profile_arn, null)
+ iam_role_name = try(each.value.iam_role_name, var.self_managed_node_group_defaults.iam_role_name, null)
+ iam_role_use_name_prefix = try(each.value.iam_role_use_name_prefix, var.self_managed_node_group_defaults.iam_role_use_name_prefix, true)
+ iam_role_path = try(each.value.iam_role_path, var.self_managed_node_group_defaults.iam_role_path, null)
+ iam_role_permissions_boundary = try(each.value.iam_role_permissions_boundary, var.self_managed_node_group_defaults.iam_role_permissions_boundary, null)
+ iam_role_tags = try(each.value.iam_role_tags, var.self_managed_node_group_defaults.iam_role_tags, {})
+ iam_role_attach_cni_policy = try(each.value.iam_role_attach_cni_policy, var.self_managed_node_group_defaults.iam_role_attach_cni_policy, true)
+ iam_role_additional_policies = try(each.value.iam_role_additional_policies, var.self_managed_node_group_defaults.iam_role_additional_policies, [])
+
+ tags = merge(var.tags, try(each.value.tags, var.self_managed_node_group_defaults.tags, {}))
+ propagate_tags = try(each.value.propagate_tags, var.self_managed_node_group_defaults.propagate_tags, [])
}
# ################################################################################
diff --git a/variables.tf b/variables.tf
index 040bc9232b..d9d1e4a415 100644
--- a/variables.tf
+++ b/variables.tf
@@ -232,6 +232,12 @@ variable "fargate_profiles" {
default = {}
}
+variable "fargate_profile_defaults" {
+ description = "Map of Fargate Profile default configurations"
+ type = any
+ default = {}
+}
+
################################################################################
# Self Managed Node Group
################################################################################
@@ -242,6 +248,12 @@ variable "self_managed_node_groups" {
default = {}
}
+variable "self_managed_node_group_defaults" {
+ description = "Map of self-managed node group default configurations"
+ type = any
+ default = {}
+}
+
################################################################################
# EKS Managed Node Group
################################################################################
@@ -251,3 +263,9 @@ variable "eks_managed_node_groups" {
type = any
default = {}
}
+
+variable "eks_managed_node_group_defaults" {
+ description = "Map of EKS managed node group default configurations"
+ type = any
+ default = {}
+}
From 53f2816cb8a9ab0603d0158a51fd153228c61fa5 Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Sat, 13 Nov 2021 08:28:44 -0500
Subject: [PATCH 27/83] chore: update outputs, bump version for autoscaling
attribute additions
---
README.md | 8 +-
examples/bottlerocket/README.md | 32 +++--
examples/bottlerocket/outputs.tf | 116 ++++++++++++++++-
examples/bottlerocket/versions.tf | 6 +-
examples/complete/README.md | 28 +++-
examples/complete/main.tf | 46 ++++---
examples/complete/outputs.tf | 116 ++++++++++++++++-
examples/complete/versions.tf | 2 +-
examples/eks_managed_node_group/README.md | 26 +++-
examples/eks_managed_node_group/outputs.tf | 116 ++++++++++++++++-
examples/eks_managed_node_group/versions.tf | 4 +-
examples/fargate/README.md | 24 +++-
examples/fargate/outputs.tf | 119 ++++++++++++++++-
examples/fargate/versions.tf | 2 +-
examples/instance_refresh/README.md | 28 +++-
examples/instance_refresh/outputs.tf | 116 ++++++++++++++++-
examples/instance_refresh/versions.tf | 4 +-
examples/irsa/README.md | 28 +++-
examples/irsa/irsa.tf | 2 -
examples/irsa/outputs.tf | 123 +++++++++++++++++-
examples/irsa/versions.tf | 4 +-
examples/secrets_encryption/README.md | 24 +++-
examples/secrets_encryption/outputs.tf | 116 ++++++++++++++++-
examples/secrets_encryption/versions.tf | 2 +-
examples/self_managed_node_groups/README.md | 24 +++-
examples/self_managed_node_groups/outputs.tf | 116 ++++++++++++++++-
examples/self_managed_node_groups/versions.tf | 2 +-
modules/eks-managed-node-group/README.md | 10 +-
modules/eks-managed-node-group/variables.tf | 2 +-
modules/eks-managed-node-group/versions.tf | 4 +-
modules/fargate-profile/README.md | 8 +-
modules/fargate-profile/variables.tf | 4 +-
modules/fargate-profile/versions.tf | 2 +-
modules/self-managed-node-group/README.md | 6 +-
modules/self-managed-node-group/main.tf | 2 +
modules/self-managed-node-group/variables.tf | 1 +
modules/self-managed-node-group/versions.tf | 2 +-
versions.tf | 4 +-
38 files changed, 1157 insertions(+), 122 deletions(-)
diff --git a/README.md b/README.md
index ec19e5329c..19531718ad 100644
--- a/README.md
+++ b/README.md
@@ -392,15 +392,15 @@ Full contributing [guidelines are covered here](https://github.com/terraform-aws
| Name | Version |
|------|---------|
| [terraform](#requirement\_terraform) | >= 0.13.1 |
-| [aws](#requirement\_aws) | >= 3.56.0 |
-| [tls](#requirement\_tls) | >= 2.2.0 |
+| [aws](#requirement\_aws) | >= 3.64 |
+| [tls](#requirement\_tls) | >= 2.2 |
## Providers
| Name | Version |
|------|---------|
-| [aws](#provider\_aws) | >= 3.56.0 |
-| [tls](#provider\_tls) | >= 2.2.0 |
+| [aws](#provider\_aws) | >= 3.64 |
+| [tls](#provider\_tls) | >= 2.2 |
## Modules
diff --git a/examples/bottlerocket/README.md b/examples/bottlerocket/README.md
index e430d572c0..e52e6bba2e 100644
--- a/examples/bottlerocket/README.md
+++ b/examples/bottlerocket/README.md
@@ -24,17 +24,17 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Version |
|------|---------|
| [terraform](#requirement\_terraform) | >= 0.13.1 |
-| [aws](#requirement\_aws) | >= 3.56.0 |
-| [random](#requirement\_random) | >= 2.0.0 |
-| [tls](#requirement\_tls) | >= 2.2.0 |
+| [aws](#requirement\_aws) | >= 3.64 |
+| [random](#requirement\_random) | >= 2.0 |
+| [tls](#requirement\_tls) | >= 2.2 |
## Providers
| Name | Version |
|------|---------|
-| [aws](#provider\_aws) | >= 3.56.0 |
-| [random](#provider\_random) | >= 2.0.0 |
-| [tls](#provider\_tls) | >= 2.2.0 |
+| [aws](#provider\_aws) | >= 3.64 |
+| [random](#provider\_random) | >= 2.0 |
+| [tls](#provider\_tls) | >= 2.2 |
## Modules
@@ -61,6 +61,22 @@ No inputs.
| Name | Description |
|------|-------------|
-| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for EKS control plane. |
-| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Security group ids attached to the cluster control plane. |
+| [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created |
+| [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created |
+| [cluster\_arn](#output\_cluster\_arn) | The Amazon Resource Name (ARN) of the cluster |
+| [cluster\_certificate\_authority\_data](#output\_cluster\_certificate\_authority\_data) | Base64 encoded certificate data required to communicate with the cluster |
+| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for your Kubernetes API server |
+| [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster |
+| [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster |
+| [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role |
+| [cluster\_id](#output\_cluster\_id) | The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready |
+| [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider |
+| [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster |
+| [cluster\_security\_group\_arn](#output\_cluster\_security\_group\_arn) | Amazon Resource Name (ARN) of the cluster security group |
+| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console |
+| [cluster\_status](#output\_cluster\_status) | Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED` |
+| [eks\_managed\_node\_groups](#output\_eks\_managed\_node\_groups) | Map of attribute maps for all EKS managed node groups created |
+| [fargate\_profiles](#output\_fargate\_profiles) | Map of attribute maps for all EKS Fargate Profiles created |
+| [oidc\_provider\_arn](#output\_oidc\_provider\_arn) | The ARN of the OIDC Provider if `enable_irsa = true` |
+| [self\_managed\_node\_groups](#output\_self\_managed\_node\_groups) | Map of attribute maps for all self managed node groups created |
diff --git a/examples/bottlerocket/outputs.tf b/examples/bottlerocket/outputs.tf
index 440cd0f723..7a3517e42a 100644
--- a/examples/bottlerocket/outputs.tf
+++ b/examples/bottlerocket/outputs.tf
@@ -1,9 +1,121 @@
+################################################################################
+# Cluster
+################################################################################
+
+output "cluster_arn" {
+ description = "The Amazon Resource Name (ARN) of the cluster"
+ value = module.eks.cluster_arn
+}
+
+output "cluster_certificate_authority_data" {
+ description = "Base64 encoded certificate data required to communicate with the cluster"
+ value = module.eks.cluster_certificate_authority_data
+}
+
output "cluster_endpoint" {
- description = "Endpoint for EKS control plane."
+ description = "Endpoint for your Kubernetes API server"
value = module.eks.cluster_endpoint
}
+output "cluster_id" {
+ description = "The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready"
+ value = module.eks.cluster_id
+}
+
+output "cluster_oidc_issuer_url" {
+ description = "The URL on the EKS cluster for the OpenID Connect identity provider"
+ value = module.eks.cluster_oidc_issuer_url
+}
+
+output "cluster_platform_version" {
+ description = "Platform version for the cluster"
+ value = module.eks.cluster_platform_version
+}
+
+output "cluster_status" {
+ description = "Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED`"
+ value = module.eks.cluster_status
+}
+
output "cluster_security_group_id" {
- description = "Security group ids attached to the cluster control plane."
+ description = "Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console"
value = module.eks.cluster_security_group_id
}
+
+################################################################################
+# Security Group
+################################################################################
+
+output "cluster_security_group_arn" {
+ description = "Amazon Resource Name (ARN) of the cluster security group"
+ value = module.eks.cluster_security_group_arn
+}
+
+################################################################################
+# IRSA
+################################################################################
+
+output "oidc_provider_arn" {
+ description = "The ARN of the OIDC Provider if `enable_irsa = true`"
+ value = module.eks.oidc_provider_arn
+}
+
+################################################################################
+# IAM Role
+################################################################################
+
+output "cluster_iam_role_name" {
+ description = "IAM role name of the EKS cluster"
+ value = module.eks.cluster_iam_role_name
+}
+
+output "cluster_iam_role_arn" {
+ description = "IAM role ARN of the EKS cluster"
+ value = module.eks.cluster_iam_role_arn
+}
+
+output "cluster_iam_role_unique_id" {
+ description = "Stable and unique string identifying the IAM role"
+ value = module.eks.cluster_iam_role_unique_id
+}
+
+################################################################################
+# CloudWatch Log Group
+################################################################################
+
+output "cloudwatch_log_group_name" {
+ description = "Name of cloudwatch log group created"
+ value = module.eks.cloudwatch_log_group_name
+}
+
+output "cloudwatch_log_group_arn" {
+ description = "Arn of cloudwatch log group created"
+ value = module.eks.cloudwatch_log_group_arn
+}
+
+################################################################################
+# Fargate Profile
+################################################################################
+
+output "fargate_profiles" {
+ description = "Map of attribute maps for all EKS Fargate Profiles created"
+ value = module.eks.fargate_profiles
+}
+
+################################################################################
+# EKS Managed Node Group
+################################################################################
+
+output "eks_managed_node_groups" {
+ description = "Map of attribute maps for all EKS managed node groups created"
+ value = module.eks.eks_managed_node_groups
+}
+
+################################################################################
+# Self Managed Node Group
+################################################################################
+
+output "self_managed_node_groups" {
+ description = "Map of attribute maps for all self managed node groups created"
+ value = module.eks.self_managed_node_groups
+}
diff --git a/examples/bottlerocket/versions.tf b/examples/bottlerocket/versions.tf
index 4e1b818eed..ac7c0c71b1 100644
--- a/examples/bottlerocket/versions.tf
+++ b/examples/bottlerocket/versions.tf
@@ -4,15 +4,15 @@ terraform {
required_providers {
aws = {
source = "hashicorp/aws"
- version = ">= 3.56.0"
+ version = ">= 3.64"
}
random = {
source = "hashicorp/random"
- version = ">= 2.0.0"
+ version = ">= 2.0"
}
tls = {
source = "hashicorp/tls"
- version = ">= 2.2.0"
+ version = ">= 2.2"
}
}
}
diff --git a/examples/complete/README.md b/examples/complete/README.md
index 003faa07dc..39ba6d179e 100644
--- a/examples/complete/README.md
+++ b/examples/complete/README.md
@@ -24,20 +24,22 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Version |
|------|---------|
| [terraform](#requirement\_terraform) | >= 0.13.1 |
-| [aws](#requirement\_aws) | >= 3.56.0 |
+| [aws](#requirement\_aws) | >= 3.64 |
## Providers
| Name | Version |
|------|---------|
-| [aws](#provider\_aws) | >= 3.56.0 |
+| [aws](#provider\_aws) | >= 3.64 |
## Modules
| Name | Source | Version |
|------|--------|---------|
| [disabled\_eks](#module\_disabled\_eks) | ../.. | n/a |
-| [disabled\_fargate](#module\_disabled\_fargate) | ../../modules/fargate-profile | n/a |
+| [disabled\_eks\_managed\_node\_group](#module\_disabled\_eks\_managed\_node\_group) | ../../modules/eks-managed-node-group | n/a |
+| [disabled\_fargate\_profile](#module\_disabled\_fargate\_profile) | ../../modules/fargate-profile | n/a |
+| [disabled\_self\_managed\_node\_group](#module\_disabled\_self\_managed\_node\_group) | ../../modules/self-managed-node-group | n/a |
| [eks](#module\_eks) | ../.. | n/a |
| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 |
@@ -56,6 +58,22 @@ No inputs.
| Name | Description |
|------|-------------|
-| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for EKS control plane. |
-| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Security group ids attached to the cluster control plane. |
+| [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created |
+| [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created |
+| [cluster\_arn](#output\_cluster\_arn) | The Amazon Resource Name (ARN) of the cluster |
+| [cluster\_certificate\_authority\_data](#output\_cluster\_certificate\_authority\_data) | Base64 encoded certificate data required to communicate with the cluster |
+| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for your Kubernetes API server |
+| [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster |
+| [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster |
+| [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role |
+| [cluster\_id](#output\_cluster\_id) | The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready |
+| [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider |
+| [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster |
+| [cluster\_security\_group\_arn](#output\_cluster\_security\_group\_arn) | Amazon Resource Name (ARN) of the cluster security group |
+| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console |
+| [cluster\_status](#output\_cluster\_status) | Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED` |
+| [eks\_managed\_node\_groups](#output\_eks\_managed\_node\_groups) | Map of attribute maps for all EKS managed node groups created |
+| [fargate\_profiles](#output\_fargate\_profiles) | Map of attribute maps for all EKS Fargate Profiles created |
+| [oidc\_provider\_arn](#output\_oidc\_provider\_arn) | The ARN of the OIDC Provider if `enable_irsa = true` |
+| [self\_managed\_node\_groups](#output\_self\_managed\_node\_groups) | Map of attribute maps for all self managed node groups created |
diff --git a/examples/complete/main.tf b/examples/complete/main.tf
index 9598e3ab9c..b5840cf6d2 100644
--- a/examples/complete/main.tf
+++ b/examples/complete/main.tf
@@ -4,7 +4,7 @@ provider "aws" {
locals {
name = "ex-${replace(basename(path.cwd), "_", "-")}"
- cluster_version = "1.20"
+ cluster_version = "1.21"
region = "eu-west-1"
tags = {
@@ -36,7 +36,7 @@ module "eks" {
self_managed_node_groups = {
one = {
name = "spot-1"
- override_instance_types = ["m5.large", "m5a.large", "m5d.large", "m5ad.large"]
+ override_instance_types = ["m5.large", "m5d.large", "m6i.large"]
spot_instance_pools = 4
asg_max_size = 5
asg_desired_capacity = 5
@@ -124,30 +124,22 @@ module "disabled_eks" {
create = false
}
-module "disabled_fargate" {
+module "disabled_fargate_profile" {
source = "../../modules/fargate-profile"
create = false
}
-################################################################################
-# Additional security groups for workers
-################################################################################
+module "disabled_eks_managed_node_group" {
+ source = "../../modules/eks-managed-node-group"
-resource "aws_security_group" "additional" {
- name_prefix = "all_worker_management"
- vpc_id = module.vpc.vpc_id
+ create = false
+}
- ingress {
- from_port = 22
- to_port = 22
- protocol = "tcp"
- cidr_blocks = [
- "10.0.0.0/8",
- "172.16.0.0/12",
- "192.168.0.0/16",
- ]
- }
+module "disabled_self_managed_node_group" {
+ source = "../../modules/self-managed-node-group"
+
+ create = false
}
################################################################################
@@ -181,3 +173,19 @@ module "vpc" {
tags = local.tags
}
+
+resource "aws_security_group" "additional" {
+ name_prefix = "${local.name}-additional"
+ vpc_id = module.vpc.vpc_id
+
+ ingress {
+ from_port = 22
+ to_port = 22
+ protocol = "tcp"
+ cidr_blocks = [
+ "10.0.0.0/8",
+ "172.16.0.0/12",
+ "192.168.0.0/16",
+ ]
+ }
+}
diff --git a/examples/complete/outputs.tf b/examples/complete/outputs.tf
index 440cd0f723..7a3517e42a 100644
--- a/examples/complete/outputs.tf
+++ b/examples/complete/outputs.tf
@@ -1,9 +1,121 @@
+################################################################################
+# Cluster
+################################################################################
+
+output "cluster_arn" {
+ description = "The Amazon Resource Name (ARN) of the cluster"
+ value = module.eks.cluster_arn
+}
+
+output "cluster_certificate_authority_data" {
+ description = "Base64 encoded certificate data required to communicate with the cluster"
+ value = module.eks.cluster_certificate_authority_data
+}
+
output "cluster_endpoint" {
- description = "Endpoint for EKS control plane."
+ description = "Endpoint for your Kubernetes API server"
value = module.eks.cluster_endpoint
}
+output "cluster_id" {
+ description = "The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready"
+ value = module.eks.cluster_id
+}
+
+output "cluster_oidc_issuer_url" {
+ description = "The URL on the EKS cluster for the OpenID Connect identity provider"
+ value = module.eks.cluster_oidc_issuer_url
+}
+
+output "cluster_platform_version" {
+ description = "Platform version for the cluster"
+ value = module.eks.cluster_platform_version
+}
+
+output "cluster_status" {
+ description = "Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED`"
+ value = module.eks.cluster_status
+}
+
output "cluster_security_group_id" {
- description = "Security group ids attached to the cluster control plane."
+ description = "Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console"
value = module.eks.cluster_security_group_id
}
+
+################################################################################
+# Security Group
+################################################################################
+
+output "cluster_security_group_arn" {
+ description = "Amazon Resource Name (ARN) of the cluster security group"
+ value = module.eks.cluster_security_group_arn
+}
+
+################################################################################
+# IRSA
+################################################################################
+
+output "oidc_provider_arn" {
+ description = "The ARN of the OIDC Provider if `enable_irsa = true`"
+ value = module.eks.oidc_provider_arn
+}
+
+################################################################################
+# IAM Role
+################################################################################
+
+output "cluster_iam_role_name" {
+ description = "IAM role name of the EKS cluster"
+ value = module.eks.cluster_iam_role_name
+}
+
+output "cluster_iam_role_arn" {
+ description = "IAM role ARN of the EKS cluster"
+ value = module.eks.cluster_iam_role_arn
+}
+
+output "cluster_iam_role_unique_id" {
+ description = "Stable and unique string identifying the IAM role"
+ value = module.eks.cluster_iam_role_unique_id
+}
+
+################################################################################
+# CloudWatch Log Group
+################################################################################
+
+output "cloudwatch_log_group_name" {
+ description = "Name of cloudwatch log group created"
+ value = module.eks.cloudwatch_log_group_name
+}
+
+output "cloudwatch_log_group_arn" {
+ description = "Arn of cloudwatch log group created"
+ value = module.eks.cloudwatch_log_group_arn
+}
+
+################################################################################
+# Fargate Profile
+################################################################################
+
+output "fargate_profiles" {
+ description = "Map of attribute maps for all EKS Fargate Profiles created"
+ value = module.eks.fargate_profiles
+}
+
+################################################################################
+# EKS Managed Node Group
+################################################################################
+
+output "eks_managed_node_groups" {
+ description = "Map of attribute maps for all EKS managed node groups created"
+ value = module.eks.eks_managed_node_groups
+}
+
+################################################################################
+# Self Managed Node Group
+################################################################################
+
+output "self_managed_node_groups" {
+ description = "Map of attribute maps for all self managed node groups created"
+ value = module.eks.self_managed_node_groups
+}
diff --git a/examples/complete/versions.tf b/examples/complete/versions.tf
index 97955e9bc8..bfce6ae345 100644
--- a/examples/complete/versions.tf
+++ b/examples/complete/versions.tf
@@ -4,7 +4,7 @@ terraform {
required_providers {
aws = {
source = "hashicorp/aws"
- version = ">= 3.56.0"
+ version = ">= 3.64"
}
}
}
diff --git a/examples/eks_managed_node_group/README.md b/examples/eks_managed_node_group/README.md
index 3a55a24c04..9da6466eac 100644
--- a/examples/eks_managed_node_group/README.md
+++ b/examples/eks_managed_node_group/README.md
@@ -25,14 +25,14 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Version |
|------|---------|
| [terraform](#requirement\_terraform) | >= 0.13.1 |
-| [aws](#requirement\_aws) | >= 3.56.0 |
-| [cloudinit](#requirement\_cloudinit) | >= 2.0.0 |
+| [aws](#requirement\_aws) | >= 3.64 |
+| [cloudinit](#requirement\_cloudinit) | >= 2.0 |
## Providers
| Name | Version |
|------|---------|
-| [aws](#provider\_aws) | >= 3.56.0 |
+| [aws](#provider\_aws) | >= 3.64 |
## Modules
@@ -56,6 +56,22 @@ No inputs.
| Name | Description |
|------|-------------|
-| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for EKS control plane. |
-| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Security group ids attached to the cluster control plane. |
+| [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created |
+| [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created |
+| [cluster\_arn](#output\_cluster\_arn) | The Amazon Resource Name (ARN) of the cluster |
+| [cluster\_certificate\_authority\_data](#output\_cluster\_certificate\_authority\_data) | Base64 encoded certificate data required to communicate with the cluster |
+| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for your Kubernetes API server |
+| [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster |
+| [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster |
+| [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role |
+| [cluster\_id](#output\_cluster\_id) | The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready |
+| [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider |
+| [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster |
+| [cluster\_security\_group\_arn](#output\_cluster\_security\_group\_arn) | Amazon Resource Name (ARN) of the cluster security group |
+| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console |
+| [cluster\_status](#output\_cluster\_status) | Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED` |
+| [eks\_managed\_node\_groups](#output\_eks\_managed\_node\_groups) | Map of attribute maps for all EKS managed node groups created |
+| [fargate\_profiles](#output\_fargate\_profiles) | Map of attribute maps for all EKS Fargate Profiles created |
+| [oidc\_provider\_arn](#output\_oidc\_provider\_arn) | The ARN of the OIDC Provider if `enable_irsa = true` |
+| [self\_managed\_node\_groups](#output\_self\_managed\_node\_groups) | Map of attribute maps for all self managed node groups created |
diff --git a/examples/eks_managed_node_group/outputs.tf b/examples/eks_managed_node_group/outputs.tf
index 440cd0f723..7a3517e42a 100644
--- a/examples/eks_managed_node_group/outputs.tf
+++ b/examples/eks_managed_node_group/outputs.tf
@@ -1,9 +1,121 @@
+################################################################################
+# Cluster
+################################################################################
+
+output "cluster_arn" {
+ description = "The Amazon Resource Name (ARN) of the cluster"
+ value = module.eks.cluster_arn
+}
+
+output "cluster_certificate_authority_data" {
+ description = "Base64 encoded certificate data required to communicate with the cluster"
+ value = module.eks.cluster_certificate_authority_data
+}
+
output "cluster_endpoint" {
- description = "Endpoint for EKS control plane."
+ description = "Endpoint for your Kubernetes API server"
value = module.eks.cluster_endpoint
}
+output "cluster_id" {
+ description = "The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready"
+ value = module.eks.cluster_id
+}
+
+output "cluster_oidc_issuer_url" {
+ description = "The URL on the EKS cluster for the OpenID Connect identity provider"
+ value = module.eks.cluster_oidc_issuer_url
+}
+
+output "cluster_platform_version" {
+ description = "Platform version for the cluster"
+ value = module.eks.cluster_platform_version
+}
+
+output "cluster_status" {
+ description = "Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED`"
+ value = module.eks.cluster_status
+}
+
output "cluster_security_group_id" {
- description = "Security group ids attached to the cluster control plane."
+ description = "Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console"
value = module.eks.cluster_security_group_id
}
+
+################################################################################
+# Security Group
+################################################################################
+
+output "cluster_security_group_arn" {
+ description = "Amazon Resource Name (ARN) of the cluster security group"
+ value = module.eks.cluster_security_group_arn
+}
+
+################################################################################
+# IRSA
+################################################################################
+
+output "oidc_provider_arn" {
+ description = "The ARN of the OIDC Provider if `enable_irsa = true`"
+ value = module.eks.oidc_provider_arn
+}
+
+################################################################################
+# IAM Role
+################################################################################
+
+output "cluster_iam_role_name" {
+ description = "IAM role name of the EKS cluster"
+ value = module.eks.cluster_iam_role_name
+}
+
+output "cluster_iam_role_arn" {
+ description = "IAM role ARN of the EKS cluster"
+ value = module.eks.cluster_iam_role_arn
+}
+
+output "cluster_iam_role_unique_id" {
+ description = "Stable and unique string identifying the IAM role"
+ value = module.eks.cluster_iam_role_unique_id
+}
+
+################################################################################
+# CloudWatch Log Group
+################################################################################
+
+output "cloudwatch_log_group_name" {
+ description = "Name of cloudwatch log group created"
+ value = module.eks.cloudwatch_log_group_name
+}
+
+output "cloudwatch_log_group_arn" {
+ description = "Arn of cloudwatch log group created"
+ value = module.eks.cloudwatch_log_group_arn
+}
+
+################################################################################
+# Fargate Profile
+################################################################################
+
+output "fargate_profiles" {
+ description = "Map of attribute maps for all EKS Fargate Profiles created"
+ value = module.eks.fargate_profiles
+}
+
+################################################################################
+# EKS Managed Node Group
+################################################################################
+
+output "eks_managed_node_groups" {
+ description = "Map of attribute maps for all EKS managed node groups created"
+ value = module.eks.eks_managed_node_groups
+}
+
+################################################################################
+# Self Managed Node Group
+################################################################################
+
+output "self_managed_node_groups" {
+ description = "Map of attribute maps for all self managed node groups created"
+ value = module.eks.self_managed_node_groups
+}
diff --git a/examples/eks_managed_node_group/versions.tf b/examples/eks_managed_node_group/versions.tf
index e41b1ab10a..48492037e2 100644
--- a/examples/eks_managed_node_group/versions.tf
+++ b/examples/eks_managed_node_group/versions.tf
@@ -4,11 +4,11 @@ terraform {
required_providers {
aws = {
source = "hashicorp/aws"
- version = ">= 3.56.0"
+ version = ">= 3.64"
}
cloudinit = {
source = "hashicorp/cloudinit"
- version = ">= 2.0.0"
+ version = ">= 2.0"
}
}
}
diff --git a/examples/fargate/README.md b/examples/fargate/README.md
index 0404ea4d36..0d79a8ac56 100644
--- a/examples/fargate/README.md
+++ b/examples/fargate/README.md
@@ -23,13 +23,13 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Version |
|------|---------|
| [terraform](#requirement\_terraform) | >= 0.13.1 |
-| [aws](#requirement\_aws) | >= 3.56.0 |
+| [aws](#requirement\_aws) | >= 3.64 |
## Providers
| Name | Version |
|------|---------|
-| [aws](#provider\_aws) | >= 3.56.0 |
+| [aws](#provider\_aws) | >= 3.64 |
## Modules
@@ -53,6 +53,22 @@ No inputs.
| Name | Description |
|------|-------------|
-| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for EKS control plane. |
-| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Security group ids attached to the cluster control plane. |
+| [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created |
+| [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created |
+| [cluster\_arn](#output\_cluster\_arn) | The Amazon Resource Name (ARN) of the cluster |
+| [cluster\_certificate\_authority\_data](#output\_cluster\_certificate\_authority\_data) | Base64 encoded certificate data required to communicate with the cluster |
+| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for your Kubernetes API server |
+| [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster |
+| [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster |
+| [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role |
+| [cluster\_id](#output\_cluster\_id) | The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready |
+| [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider |
+| [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster |
+| [cluster\_security\_group\_arn](#output\_cluster\_security\_group\_arn) | Amazon Resource Name (ARN) of the cluster security group |
+| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console |
+| [cluster\_status](#output\_cluster\_status) | Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED` |
+| [eks\_managed\_node\_groups](#output\_eks\_managed\_node\_groups) | Map of attribute maps for all EKS managed node groups created |
+| [fargate\_profiles](#output\_fargate\_profiles) | Map of attribute maps for all EKS Fargate Profiles created |
+| [oidc\_provider\_arn](#output\_oidc\_provider\_arn) | The ARN of the OIDC Provider if `enable_irsa = true` |
+| [self\_managed\_node\_groups](#output\_self\_managed\_node\_groups) | Map of attribute maps for all self managed node groups created |
diff --git a/examples/fargate/outputs.tf b/examples/fargate/outputs.tf
index 1bc56942a5..7a3517e42a 100644
--- a/examples/fargate/outputs.tf
+++ b/examples/fargate/outputs.tf
@@ -1,14 +1,121 @@
+################################################################################
+# Cluster
+################################################################################
+
+output "cluster_arn" {
+ description = "The Amazon Resource Name (ARN) of the cluster"
+ value = module.eks.cluster_arn
+}
+
+output "cluster_certificate_authority_data" {
+ description = "Base64 encoded certificate data required to communicate with the cluster"
+ value = module.eks.cluster_certificate_authority_data
+}
+
output "cluster_endpoint" {
- description = "Endpoint for EKS control plane."
+ description = "Endpoint for your Kubernetes API server"
value = module.eks.cluster_endpoint
}
+output "cluster_id" {
+ description = "The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready"
+ value = module.eks.cluster_id
+}
+
+output "cluster_oidc_issuer_url" {
+ description = "The URL on the EKS cluster for the OpenID Connect identity provider"
+ value = module.eks.cluster_oidc_issuer_url
+}
+
+output "cluster_platform_version" {
+ description = "Platform version for the cluster"
+ value = module.eks.cluster_platform_version
+}
+
+output "cluster_status" {
+ description = "Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED`"
+ value = module.eks.cluster_status
+}
+
output "cluster_security_group_id" {
- description = "Security group ids attached to the cluster control plane."
+ description = "Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console"
value = module.eks.cluster_security_group_id
}
-# output "fargate_profile_arn" {
-# description = "Outputs from node groups"
-# value = module.eks.fargate_profile_arn
-# }
+################################################################################
+# Security Group
+################################################################################
+
+output "cluster_security_group_arn" {
+ description = "Amazon Resource Name (ARN) of the cluster security group"
+ value = module.eks.cluster_security_group_arn
+}
+
+################################################################################
+# IRSA
+################################################################################
+
+output "oidc_provider_arn" {
+ description = "The ARN of the OIDC Provider if `enable_irsa = true`"
+ value = module.eks.oidc_provider_arn
+}
+
+################################################################################
+# IAM Role
+################################################################################
+
+output "cluster_iam_role_name" {
+ description = "IAM role name of the EKS cluster"
+ value = module.eks.cluster_iam_role_name
+}
+
+output "cluster_iam_role_arn" {
+ description = "IAM role ARN of the EKS cluster"
+ value = module.eks.cluster_iam_role_arn
+}
+
+output "cluster_iam_role_unique_id" {
+ description = "Stable and unique string identifying the IAM role"
+ value = module.eks.cluster_iam_role_unique_id
+}
+
+################################################################################
+# CloudWatch Log Group
+################################################################################
+
+output "cloudwatch_log_group_name" {
+ description = "Name of cloudwatch log group created"
+ value = module.eks.cloudwatch_log_group_name
+}
+
+output "cloudwatch_log_group_arn" {
+ description = "Arn of cloudwatch log group created"
+ value = module.eks.cloudwatch_log_group_arn
+}
+
+################################################################################
+# Fargate Profile
+################################################################################
+
+output "fargate_profiles" {
+ description = "Map of attribute maps for all EKS Fargate Profiles created"
+ value = module.eks.fargate_profiles
+}
+
+################################################################################
+# EKS Managed Node Group
+################################################################################
+
+output "eks_managed_node_groups" {
+ description = "Map of attribute maps for all EKS managed node groups created"
+ value = module.eks.eks_managed_node_groups
+}
+
+################################################################################
+# Self Managed Node Group
+################################################################################
+
+output "self_managed_node_groups" {
+ description = "Map of attribute maps for all self managed node groups created"
+ value = module.eks.self_managed_node_groups
+}
diff --git a/examples/fargate/versions.tf b/examples/fargate/versions.tf
index 97955e9bc8..bfce6ae345 100644
--- a/examples/fargate/versions.tf
+++ b/examples/fargate/versions.tf
@@ -4,7 +4,7 @@ terraform {
required_providers {
aws = {
source = "hashicorp/aws"
- version = ">= 3.56.0"
+ version = ">= 3.64"
}
}
}
diff --git a/examples/instance_refresh/README.md b/examples/instance_refresh/README.md
index 131734e2f1..028966e945 100644
--- a/examples/instance_refresh/README.md
+++ b/examples/instance_refresh/README.md
@@ -22,15 +22,15 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Version |
|------|---------|
| [terraform](#requirement\_terraform) | >= 0.13.1 |
-| [aws](#requirement\_aws) | >= 3.56.0 |
-| [helm](#requirement\_helm) | >= 2.0.0 |
+| [aws](#requirement\_aws) | >= 3.64 |
+| [helm](#requirement\_helm) | >= 2.0 |
## Providers
| Name | Version |
|------|---------|
-| [aws](#provider\_aws) | >= 3.56.0 |
-| [helm](#provider\_helm) | >= 2.0.0 |
+| [aws](#provider\_aws) | >= 3.64 |
+| [helm](#provider\_helm) | >= 2.0 |
## Modules
@@ -66,6 +66,22 @@ No inputs.
| Name | Description |
|------|-------------|
-| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for EKS control plane. |
-| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Security group ids attached to the cluster control plane. |
+| [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created |
+| [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created |
+| [cluster\_arn](#output\_cluster\_arn) | The Amazon Resource Name (ARN) of the cluster |
+| [cluster\_certificate\_authority\_data](#output\_cluster\_certificate\_authority\_data) | Base64 encoded certificate data required to communicate with the cluster |
+| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for your Kubernetes API server |
+| [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster |
+| [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster |
+| [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role |
+| [cluster\_id](#output\_cluster\_id) | The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready |
+| [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider |
+| [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster |
+| [cluster\_security\_group\_arn](#output\_cluster\_security\_group\_arn) | Amazon Resource Name (ARN) of the cluster security group |
+| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console |
+| [cluster\_status](#output\_cluster\_status) | Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED` |
+| [eks\_managed\_node\_groups](#output\_eks\_managed\_node\_groups) | Map of attribute maps for all EKS managed node groups created |
+| [fargate\_profiles](#output\_fargate\_profiles) | Map of attribute maps for all EKS Fargate Profiles created |
+| [oidc\_provider\_arn](#output\_oidc\_provider\_arn) | The ARN of the OIDC Provider if `enable_irsa = true` |
+| [self\_managed\_node\_groups](#output\_self\_managed\_node\_groups) | Map of attribute maps for all self managed node groups created |
diff --git a/examples/instance_refresh/outputs.tf b/examples/instance_refresh/outputs.tf
index 440cd0f723..7a3517e42a 100644
--- a/examples/instance_refresh/outputs.tf
+++ b/examples/instance_refresh/outputs.tf
@@ -1,9 +1,121 @@
+################################################################################
+# Cluster
+################################################################################
+
+output "cluster_arn" {
+ description = "The Amazon Resource Name (ARN) of the cluster"
+ value = module.eks.cluster_arn
+}
+
+output "cluster_certificate_authority_data" {
+ description = "Base64 encoded certificate data required to communicate with the cluster"
+ value = module.eks.cluster_certificate_authority_data
+}
+
output "cluster_endpoint" {
- description = "Endpoint for EKS control plane."
+ description = "Endpoint for your Kubernetes API server"
value = module.eks.cluster_endpoint
}
+output "cluster_id" {
+ description = "The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready"
+ value = module.eks.cluster_id
+}
+
+output "cluster_oidc_issuer_url" {
+ description = "The URL on the EKS cluster for the OpenID Connect identity provider"
+ value = module.eks.cluster_oidc_issuer_url
+}
+
+output "cluster_platform_version" {
+ description = "Platform version for the cluster"
+ value = module.eks.cluster_platform_version
+}
+
+output "cluster_status" {
+ description = "Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED`"
+ value = module.eks.cluster_status
+}
+
output "cluster_security_group_id" {
- description = "Security group ids attached to the cluster control plane."
+ description = "Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console"
value = module.eks.cluster_security_group_id
}
+
+################################################################################
+# Security Group
+################################################################################
+
+output "cluster_security_group_arn" {
+ description = "Amazon Resource Name (ARN) of the cluster security group"
+ value = module.eks.cluster_security_group_arn
+}
+
+################################################################################
+# IRSA
+################################################################################
+
+output "oidc_provider_arn" {
+ description = "The ARN of the OIDC Provider if `enable_irsa = true`"
+ value = module.eks.oidc_provider_arn
+}
+
+################################################################################
+# IAM Role
+################################################################################
+
+output "cluster_iam_role_name" {
+ description = "IAM role name of the EKS cluster"
+ value = module.eks.cluster_iam_role_name
+}
+
+output "cluster_iam_role_arn" {
+ description = "IAM role ARN of the EKS cluster"
+ value = module.eks.cluster_iam_role_arn
+}
+
+output "cluster_iam_role_unique_id" {
+ description = "Stable and unique string identifying the IAM role"
+ value = module.eks.cluster_iam_role_unique_id
+}
+
+################################################################################
+# CloudWatch Log Group
+################################################################################
+
+output "cloudwatch_log_group_name" {
+ description = "Name of cloudwatch log group created"
+ value = module.eks.cloudwatch_log_group_name
+}
+
+output "cloudwatch_log_group_arn" {
+ description = "Arn of cloudwatch log group created"
+ value = module.eks.cloudwatch_log_group_arn
+}
+
+################################################################################
+# Fargate Profile
+################################################################################
+
+output "fargate_profiles" {
+ description = "Map of attribute maps for all EKS Fargate Profiles created"
+ value = module.eks.fargate_profiles
+}
+
+################################################################################
+# EKS Managed Node Group
+################################################################################
+
+output "eks_managed_node_groups" {
+ description = "Map of attribute maps for all EKS managed node groups created"
+ value = module.eks.eks_managed_node_groups
+}
+
+################################################################################
+# Self Managed Node Group
+################################################################################
+
+output "self_managed_node_groups" {
+ description = "Map of attribute maps for all self managed node groups created"
+ value = module.eks.self_managed_node_groups
+}
diff --git a/examples/instance_refresh/versions.tf b/examples/instance_refresh/versions.tf
index 5c4a43112e..c7e53cecc0 100644
--- a/examples/instance_refresh/versions.tf
+++ b/examples/instance_refresh/versions.tf
@@ -4,11 +4,11 @@ terraform {
required_providers {
aws = {
source = "hashicorp/aws"
- version = ">= 3.56.0"
+ version = ">= 3.64"
}
helm = {
source = "hashicorp/helm"
- version = ">= 2.0.0"
+ version = ">= 2.0"
}
}
}
diff --git a/examples/irsa/README.md b/examples/irsa/README.md
index d2a071fcd5..9f183e4f06 100644
--- a/examples/irsa/README.md
+++ b/examples/irsa/README.md
@@ -22,15 +22,15 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Version |
|------|---------|
| [terraform](#requirement\_terraform) | >= 0.13.1 |
-| [aws](#requirement\_aws) | >= 3.56.0 |
-| [helm](#requirement\_helm) | >= 2.0.0 |
+| [aws](#requirement\_aws) | >= 3.64 |
+| [helm](#requirement\_helm) | >= 2.0 |
## Providers
| Name | Version |
|------|---------|
-| [aws](#provider\_aws) | >= 3.56.0 |
-| [helm](#provider\_helm) | >= 2.0.0 |
+| [aws](#provider\_aws) | >= 3.64 |
+| [helm](#provider\_helm) | >= 2.0 |
## Modules
@@ -47,7 +47,6 @@ Note that this example may create resources which cost money. Run `terraform des
| [aws_iam_policy.cluster_autoscaler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
| [helm_release.cluster_autoscaler](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource |
| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source |
-| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source |
| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
| [aws_iam_policy_document.cluster_autoscaler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
@@ -59,5 +58,22 @@ No inputs.
| Name | Description |
|------|-------------|
-| [aws\_account\_id](#output\_aws\_account\_id) | IAM AWS account id |
+| [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created |
+| [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created |
+| [cluster\_arn](#output\_cluster\_arn) | The Amazon Resource Name (ARN) of the cluster |
+| [cluster\_certificate\_authority\_data](#output\_cluster\_certificate\_authority\_data) | Base64 encoded certificate data required to communicate with the cluster |
+| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for your Kubernetes API server |
+| [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster |
+| [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster |
+| [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role |
+| [cluster\_id](#output\_cluster\_id) | The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready |
+| [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider |
+| [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster |
+| [cluster\_security\_group\_arn](#output\_cluster\_security\_group\_arn) | Amazon Resource Name (ARN) of the cluster security group |
+| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console |
+| [cluster\_status](#output\_cluster\_status) | Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED` |
+| [eks\_managed\_node\_groups](#output\_eks\_managed\_node\_groups) | Map of attribute maps for all EKS managed node groups created |
+| [fargate\_profiles](#output\_fargate\_profiles) | Map of attribute maps for all EKS Fargate Profiles created |
+| [oidc\_provider\_arn](#output\_oidc\_provider\_arn) | The ARN of the OIDC Provider if `enable_irsa = true` |
+| [self\_managed\_node\_groups](#output\_self\_managed\_node\_groups) | Map of attribute maps for all self managed node groups created |
diff --git a/examples/irsa/irsa.tf b/examples/irsa/irsa.tf
index 9ea074d1f2..810c871201 100644
--- a/examples/irsa/irsa.tf
+++ b/examples/irsa/irsa.tf
@@ -1,5 +1,3 @@
-data "aws_caller_identity" "current" {}
-
data "aws_eks_cluster_auth" "cluster" {
name = module.eks.cluster_id
}
diff --git a/examples/irsa/outputs.tf b/examples/irsa/outputs.tf
index 796e8ee3d4..7a3517e42a 100644
--- a/examples/irsa/outputs.tf
+++ b/examples/irsa/outputs.tf
@@ -1,4 +1,121 @@
-output "aws_account_id" {
- description = "IAM AWS account id"
- value = data.aws_caller_identity.current.account_id
+################################################################################
+# Cluster
+################################################################################
+
+output "cluster_arn" {
+ description = "The Amazon Resource Name (ARN) of the cluster"
+ value = module.eks.cluster_arn
+}
+
+output "cluster_certificate_authority_data" {
+ description = "Base64 encoded certificate data required to communicate with the cluster"
+ value = module.eks.cluster_certificate_authority_data
+}
+
+output "cluster_endpoint" {
+ description = "Endpoint for your Kubernetes API server"
+ value = module.eks.cluster_endpoint
+}
+
+output "cluster_id" {
+ description = "The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready"
+ value = module.eks.cluster_id
+}
+
+output "cluster_oidc_issuer_url" {
+ description = "The URL on the EKS cluster for the OpenID Connect identity provider"
+ value = module.eks.cluster_oidc_issuer_url
+}
+
+output "cluster_platform_version" {
+ description = "Platform version for the cluster"
+ value = module.eks.cluster_platform_version
+}
+
+output "cluster_status" {
+ description = "Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED`"
+ value = module.eks.cluster_status
+}
+
+output "cluster_security_group_id" {
+ description = "Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console"
+ value = module.eks.cluster_security_group_id
+}
+
+################################################################################
+# Security Group
+################################################################################
+
+output "cluster_security_group_arn" {
+ description = "Amazon Resource Name (ARN) of the cluster security group"
+ value = module.eks.cluster_security_group_arn
+}
+
+################################################################################
+# IRSA
+################################################################################
+
+output "oidc_provider_arn" {
+ description = "The ARN of the OIDC Provider if `enable_irsa = true`"
+ value = module.eks.oidc_provider_arn
+}
+
+################################################################################
+# IAM Role
+################################################################################
+
+output "cluster_iam_role_name" {
+ description = "IAM role name of the EKS cluster"
+ value = module.eks.cluster_iam_role_name
+}
+
+output "cluster_iam_role_arn" {
+ description = "IAM role ARN of the EKS cluster"
+ value = module.eks.cluster_iam_role_arn
+}
+
+output "cluster_iam_role_unique_id" {
+ description = "Stable and unique string identifying the IAM role"
+ value = module.eks.cluster_iam_role_unique_id
+}
+
+################################################################################
+# CloudWatch Log Group
+################################################################################
+
+output "cloudwatch_log_group_name" {
+ description = "Name of cloudwatch log group created"
+ value = module.eks.cloudwatch_log_group_name
+}
+
+output "cloudwatch_log_group_arn" {
+ description = "Arn of cloudwatch log group created"
+ value = module.eks.cloudwatch_log_group_arn
+}
+
+################################################################################
+# Fargate Profile
+################################################################################
+
+output "fargate_profiles" {
+ description = "Map of attribute maps for all EKS Fargate Profiles created"
+ value = module.eks.fargate_profiles
+}
+
+################################################################################
+# EKS Managed Node Group
+################################################################################
+
+output "eks_managed_node_groups" {
+ description = "Map of attribute maps for all EKS managed node groups created"
+ value = module.eks.eks_managed_node_groups
+}
+
+################################################################################
+# Self Managed Node Group
+################################################################################
+
+output "self_managed_node_groups" {
+ description = "Map of attribute maps for all self managed node groups created"
+ value = module.eks.self_managed_node_groups
}
diff --git a/examples/irsa/versions.tf b/examples/irsa/versions.tf
index 5c4a43112e..c7e53cecc0 100644
--- a/examples/irsa/versions.tf
+++ b/examples/irsa/versions.tf
@@ -4,11 +4,11 @@ terraform {
required_providers {
aws = {
source = "hashicorp/aws"
- version = ">= 3.56.0"
+ version = ">= 3.64"
}
helm = {
source = "hashicorp/helm"
- version = ">= 2.0.0"
+ version = ">= 2.0"
}
}
}
diff --git a/examples/secrets_encryption/README.md b/examples/secrets_encryption/README.md
index 2b5d070709..c0b2d84917 100644
--- a/examples/secrets_encryption/README.md
+++ b/examples/secrets_encryption/README.md
@@ -22,13 +22,13 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Version |
|------|---------|
| [terraform](#requirement\_terraform) | >= 0.13.1 |
-| [aws](#requirement\_aws) | >= 3.56.0 |
+| [aws](#requirement\_aws) | >= 3.64 |
## Providers
| Name | Version |
|------|---------|
-| [aws](#provider\_aws) | >= 3.56.0 |
+| [aws](#provider\_aws) | >= 3.64 |
## Modules
@@ -52,6 +52,22 @@ No inputs.
| Name | Description |
|------|-------------|
-| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for EKS control plane. |
-| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Security group ids attached to the cluster control plane. |
+| [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created |
+| [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created |
+| [cluster\_arn](#output\_cluster\_arn) | The Amazon Resource Name (ARN) of the cluster |
+| [cluster\_certificate\_authority\_data](#output\_cluster\_certificate\_authority\_data) | Base64 encoded certificate data required to communicate with the cluster |
+| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for your Kubernetes API server |
+| [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster |
+| [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster |
+| [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role |
+| [cluster\_id](#output\_cluster\_id) | The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready |
+| [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider |
+| [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster |
+| [cluster\_security\_group\_arn](#output\_cluster\_security\_group\_arn) | Amazon Resource Name (ARN) of the cluster security group |
+| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console |
+| [cluster\_status](#output\_cluster\_status) | Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED` |
+| [eks\_managed\_node\_groups](#output\_eks\_managed\_node\_groups) | Map of attribute maps for all EKS managed node groups created |
+| [fargate\_profiles](#output\_fargate\_profiles) | Map of attribute maps for all EKS Fargate Profiles created |
+| [oidc\_provider\_arn](#output\_oidc\_provider\_arn) | The ARN of the OIDC Provider if `enable_irsa = true` |
+| [self\_managed\_node\_groups](#output\_self\_managed\_node\_groups) | Map of attribute maps for all self managed node groups created |
diff --git a/examples/secrets_encryption/outputs.tf b/examples/secrets_encryption/outputs.tf
index 440cd0f723..7a3517e42a 100644
--- a/examples/secrets_encryption/outputs.tf
+++ b/examples/secrets_encryption/outputs.tf
@@ -1,9 +1,121 @@
+################################################################################
+# Cluster
+################################################################################
+
+output "cluster_arn" {
+ description = "The Amazon Resource Name (ARN) of the cluster"
+ value = module.eks.cluster_arn
+}
+
+output "cluster_certificate_authority_data" {
+ description = "Base64 encoded certificate data required to communicate with the cluster"
+ value = module.eks.cluster_certificate_authority_data
+}
+
output "cluster_endpoint" {
- description = "Endpoint for EKS control plane."
+ description = "Endpoint for your Kubernetes API server"
value = module.eks.cluster_endpoint
}
+output "cluster_id" {
+ description = "The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready"
+ value = module.eks.cluster_id
+}
+
+output "cluster_oidc_issuer_url" {
+ description = "The URL on the EKS cluster for the OpenID Connect identity provider"
+ value = module.eks.cluster_oidc_issuer_url
+}
+
+output "cluster_platform_version" {
+ description = "Platform version for the cluster"
+ value = module.eks.cluster_platform_version
+}
+
+output "cluster_status" {
+ description = "Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED`"
+ value = module.eks.cluster_status
+}
+
output "cluster_security_group_id" {
- description = "Security group ids attached to the cluster control plane."
+ description = "Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console"
value = module.eks.cluster_security_group_id
}
+
+################################################################################
+# Security Group
+################################################################################
+
+output "cluster_security_group_arn" {
+ description = "Amazon Resource Name (ARN) of the cluster security group"
+ value = module.eks.cluster_security_group_arn
+}
+
+################################################################################
+# IRSA
+################################################################################
+
+output "oidc_provider_arn" {
+ description = "The ARN of the OIDC Provider if `enable_irsa = true`"
+ value = module.eks.oidc_provider_arn
+}
+
+################################################################################
+# IAM Role
+################################################################################
+
+output "cluster_iam_role_name" {
+ description = "IAM role name of the EKS cluster"
+ value = module.eks.cluster_iam_role_name
+}
+
+output "cluster_iam_role_arn" {
+ description = "IAM role ARN of the EKS cluster"
+ value = module.eks.cluster_iam_role_arn
+}
+
+output "cluster_iam_role_unique_id" {
+ description = "Stable and unique string identifying the IAM role"
+ value = module.eks.cluster_iam_role_unique_id
+}
+
+################################################################################
+# CloudWatch Log Group
+################################################################################
+
+output "cloudwatch_log_group_name" {
+ description = "Name of cloudwatch log group created"
+ value = module.eks.cloudwatch_log_group_name
+}
+
+output "cloudwatch_log_group_arn" {
+ description = "Arn of cloudwatch log group created"
+ value = module.eks.cloudwatch_log_group_arn
+}
+
+################################################################################
+# Fargate Profile
+################################################################################
+
+output "fargate_profiles" {
+ description = "Map of attribute maps for all EKS Fargate Profiles created"
+ value = module.eks.fargate_profiles
+}
+
+################################################################################
+# EKS Managed Node Group
+################################################################################
+
+output "eks_managed_node_groups" {
+ description = "Map of attribute maps for all EKS managed node groups created"
+ value = module.eks.eks_managed_node_groups
+}
+
+################################################################################
+# Self Managed Node Group
+################################################################################
+
+output "self_managed_node_groups" {
+ description = "Map of attribute maps for all self managed node groups created"
+ value = module.eks.self_managed_node_groups
+}
diff --git a/examples/secrets_encryption/versions.tf b/examples/secrets_encryption/versions.tf
index 97955e9bc8..bfce6ae345 100644
--- a/examples/secrets_encryption/versions.tf
+++ b/examples/secrets_encryption/versions.tf
@@ -4,7 +4,7 @@ terraform {
required_providers {
aws = {
source = "hashicorp/aws"
- version = ">= 3.56.0"
+ version = ">= 3.64"
}
}
}
diff --git a/examples/self_managed_node_groups/README.md b/examples/self_managed_node_groups/README.md
index 21017e5e34..da79f31e6f 100644
--- a/examples/self_managed_node_groups/README.md
+++ b/examples/self_managed_node_groups/README.md
@@ -25,13 +25,13 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Version |
|------|---------|
| [terraform](#requirement\_terraform) | >= 0.13.1 |
-| [aws](#requirement\_aws) | >= 3.56.0 |
+| [aws](#requirement\_aws) | >= 3.64 |
## Providers
| Name | Version |
|------|---------|
-| [aws](#provider\_aws) | >= 3.56.0 |
+| [aws](#provider\_aws) | >= 3.64 |
## Modules
@@ -54,6 +54,22 @@ No inputs.
| Name | Description |
|------|-------------|
-| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for EKS control plane. |
-| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Security group ids attached to the cluster control plane. |
+| [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created |
+| [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created |
+| [cluster\_arn](#output\_cluster\_arn) | The Amazon Resource Name (ARN) of the cluster |
+| [cluster\_certificate\_authority\_data](#output\_cluster\_certificate\_authority\_data) | Base64 encoded certificate data required to communicate with the cluster |
+| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for your Kubernetes API server |
+| [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster |
+| [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster |
+| [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role |
+| [cluster\_id](#output\_cluster\_id) | The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready |
+| [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider |
+| [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster |
+| [cluster\_security\_group\_arn](#output\_cluster\_security\_group\_arn) | Amazon Resource Name (ARN) of the cluster security group |
+| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console |
+| [cluster\_status](#output\_cluster\_status) | Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED` |
+| [eks\_managed\_node\_groups](#output\_eks\_managed\_node\_groups) | Map of attribute maps for all EKS managed node groups created |
+| [fargate\_profiles](#output\_fargate\_profiles) | Map of attribute maps for all EKS Fargate Profiles created |
+| [oidc\_provider\_arn](#output\_oidc\_provider\_arn) | The ARN of the OIDC Provider if `enable_irsa = true` |
+| [self\_managed\_node\_groups](#output\_self\_managed\_node\_groups) | Map of attribute maps for all self managed node groups created |
diff --git a/examples/self_managed_node_groups/outputs.tf b/examples/self_managed_node_groups/outputs.tf
index 440cd0f723..7a3517e42a 100644
--- a/examples/self_managed_node_groups/outputs.tf
+++ b/examples/self_managed_node_groups/outputs.tf
@@ -1,9 +1,121 @@
+################################################################################
+# Cluster
+################################################################################
+
+output "cluster_arn" {
+ description = "The Amazon Resource Name (ARN) of the cluster"
+ value = module.eks.cluster_arn
+}
+
+output "cluster_certificate_authority_data" {
+ description = "Base64 encoded certificate data required to communicate with the cluster"
+ value = module.eks.cluster_certificate_authority_data
+}
+
output "cluster_endpoint" {
- description = "Endpoint for EKS control plane."
+ description = "Endpoint for your Kubernetes API server"
value = module.eks.cluster_endpoint
}
+output "cluster_id" {
+ description = "The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready"
+ value = module.eks.cluster_id
+}
+
+output "cluster_oidc_issuer_url" {
+ description = "The URL on the EKS cluster for the OpenID Connect identity provider"
+ value = module.eks.cluster_oidc_issuer_url
+}
+
+output "cluster_platform_version" {
+ description = "Platform version for the cluster"
+ value = module.eks.cluster_platform_version
+}
+
+output "cluster_status" {
+ description = "Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED`"
+ value = module.eks.cluster_status
+}
+
output "cluster_security_group_id" {
- description = "Security group ids attached to the cluster control plane."
+ description = "Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console"
value = module.eks.cluster_security_group_id
}
+
+################################################################################
+# Security Group
+################################################################################
+
+output "cluster_security_group_arn" {
+ description = "Amazon Resource Name (ARN) of the cluster security group"
+ value = module.eks.cluster_security_group_arn
+}
+
+################################################################################
+# IRSA
+################################################################################
+
+output "oidc_provider_arn" {
+ description = "The ARN of the OIDC Provider if `enable_irsa = true`"
+ value = module.eks.oidc_provider_arn
+}
+
+################################################################################
+# IAM Role
+################################################################################
+
+output "cluster_iam_role_name" {
+ description = "IAM role name of the EKS cluster"
+ value = module.eks.cluster_iam_role_name
+}
+
+output "cluster_iam_role_arn" {
+ description = "IAM role ARN of the EKS cluster"
+ value = module.eks.cluster_iam_role_arn
+}
+
+output "cluster_iam_role_unique_id" {
+ description = "Stable and unique string identifying the IAM role"
+ value = module.eks.cluster_iam_role_unique_id
+}
+
+################################################################################
+# CloudWatch Log Group
+################################################################################
+
+output "cloudwatch_log_group_name" {
+ description = "Name of cloudwatch log group created"
+ value = module.eks.cloudwatch_log_group_name
+}
+
+output "cloudwatch_log_group_arn" {
+ description = "Arn of cloudwatch log group created"
+ value = module.eks.cloudwatch_log_group_arn
+}
+
+################################################################################
+# Fargate Profile
+################################################################################
+
+output "fargate_profiles" {
+ description = "Map of attribute maps for all EKS Fargate Profiles created"
+ value = module.eks.fargate_profiles
+}
+
+################################################################################
+# EKS Managed Node Group
+################################################################################
+
+output "eks_managed_node_groups" {
+ description = "Map of attribute maps for all EKS managed node groups created"
+ value = module.eks.eks_managed_node_groups
+}
+
+################################################################################
+# Self Managed Node Group
+################################################################################
+
+output "self_managed_node_groups" {
+ description = "Map of attribute maps for all self managed node groups created"
+ value = module.eks.self_managed_node_groups
+}
diff --git a/examples/self_managed_node_groups/versions.tf b/examples/self_managed_node_groups/versions.tf
index 97955e9bc8..bfce6ae345 100644
--- a/examples/self_managed_node_groups/versions.tf
+++ b/examples/self_managed_node_groups/versions.tf
@@ -4,7 +4,7 @@ terraform {
required_providers {
aws = {
source = "hashicorp/aws"
- version = ">= 3.56.0"
+ version = ">= 3.64"
}
}
}
diff --git a/modules/eks-managed-node-group/README.md b/modules/eks-managed-node-group/README.md
index 2960d9b692..87ed7a9106 100644
--- a/modules/eks-managed-node-group/README.md
+++ b/modules/eks-managed-node-group/README.md
@@ -52,15 +52,15 @@ If you use a custom AMI, you need to supply via user-data, the bootstrap script
| Name | Version |
|------|---------|
| [terraform](#requirement\_terraform) | >= 0.13.1 |
-| [aws](#requirement\_aws) | >= 3.56.0 |
-| [cloudinit](#requirement\_cloudinit) | >= 2.0.0 |
+| [aws](#requirement\_aws) | >= 3.64 |
+| [cloudinit](#requirement\_cloudinit) | >= 2.0 |
## Providers
| Name | Version |
|------|---------|
-| [aws](#provider\_aws) | >= 3.56.0 |
-| [cloudinit](#provider\_cloudinit) | >= 2.0.0 |
+| [aws](#provider\_aws) | >= 3.64 |
+| [cloudinit](#provider\_cloudinit) | >= 2.0 |
## Modules
@@ -143,7 +143,7 @@ No modules.
| [max\_size](#input\_max\_size) | Maximum number of worker nodes | `number` | `3` | no |
| [metadata\_options](#input\_metadata\_options) | Customize the metadata options for the instance | `map(string)` | `null` | no |
| [min\_size](#input\_min\_size) | Minimum number of worker nodes | `number` | `0` | no |
-| [name](#input\_name) | Name of the EKS Node Group | `string` | `null` | no |
+| [name](#input\_name) | Name of the EKS Node Group | `string` | `""` | no |
| [network\_interfaces](#input\_network\_interfaces) | Customize network interfaces to be attached at instance boot time | `list(any)` | `[]` | no |
| [placement](#input\_placement) | The placement of the instance | `map(string)` | `null` | no |
| [post\_bootstrap\_user\_data](#input\_post\_bootstrap\_user\_data) | User data that is appended to the user data script after of the EKS bootstrap script. Only valid when using a custom EKS optimized AMI derivative | `string` | `""` | no |
diff --git a/modules/eks-managed-node-group/variables.tf b/modules/eks-managed-node-group/variables.tf
index 9e0fbe1481..04e3dc9669 100644
--- a/modules/eks-managed-node-group/variables.tf
+++ b/modules/eks-managed-node-group/variables.tf
@@ -277,7 +277,7 @@ variable "desired_size" {
variable "name" {
description = "Name of the EKS Node Group"
type = string
- default = null
+ default = ""
}
variable "use_name_prefix" {
diff --git a/modules/eks-managed-node-group/versions.tf b/modules/eks-managed-node-group/versions.tf
index e41b1ab10a..48492037e2 100644
--- a/modules/eks-managed-node-group/versions.tf
+++ b/modules/eks-managed-node-group/versions.tf
@@ -4,11 +4,11 @@ terraform {
required_providers {
aws = {
source = "hashicorp/aws"
- version = ">= 3.56.0"
+ version = ">= 3.64"
}
cloudinit = {
source = "hashicorp/cloudinit"
- version = ">= 2.0.0"
+ version = ">= 2.0"
}
}
}
diff --git a/modules/fargate-profile/README.md b/modules/fargate-profile/README.md
index 44b0b9e551..43aef7e7cf 100644
--- a/modules/fargate-profile/README.md
+++ b/modules/fargate-profile/README.md
@@ -26,13 +26,13 @@ $ terraform apply
| Name | Version |
|------|---------|
| [terraform](#requirement\_terraform) | >= 0.13.1 |
-| [aws](#requirement\_aws) | >= 3.56.0 |
+| [aws](#requirement\_aws) | >= 3.64 |
## Providers
| Name | Version |
|------|---------|
-| [aws](#provider\_aws) | >= 3.56.0 |
+| [aws](#provider\_aws) | >= 3.64 |
## Modules
@@ -55,10 +55,10 @@ No modules.
| [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster | `string` | `null` | no |
| [create](#input\_create) | Controls if Fargate resources should be created (it affects all resources) | `bool` | `true` | no |
| [create\_iam\_role](#input\_create\_iam\_role) | Controls if the the IAM Role that provides permissions for the EKS Fargate Profile will be created | `bool` | `true` | no |
-| [fargate\_profile\_name](#input\_fargate\_profile\_name) | Name of the EKS Fargate Profile | `string` | `null` | no |
+| [fargate\_profile\_name](#input\_fargate\_profile\_name) | Name of the EKS Fargate Profile | `string` | `""` | no |
| [iam\_role\_additional\_policies](#input\_iam\_role\_additional\_policies) | Additional policies to be added to the IAM role | `list(string)` | `[]` | no |
| [iam\_role\_arn](#input\_iam\_role\_arn) | Amazon Resource Name (ARN) of an existing IAM role that provides permissions for the Fargate pod executions | `string` | `null` | no |
-| [iam\_role\_name](#input\_iam\_role\_name) | Name to use on IAM role created | `string` | `null` | no |
+| [iam\_role\_name](#input\_iam\_role\_name) | Name to use on IAM role created | `string` | `""` | no |
| [iam\_role\_path](#input\_iam\_role\_path) | IAM role path | `string` | `null` | no |
| [iam\_role\_permissions\_boundary](#input\_iam\_role\_permissions\_boundary) | ARN of the policy that is used to set the permissions boundary for the IAM role | `string` | `null` | no |
| [iam\_role\_tags](#input\_iam\_role\_tags) | A map of additional tags to add to the IAM role created | `map(string)` | `{}` | no |
diff --git a/modules/fargate-profile/variables.tf b/modules/fargate-profile/variables.tf
index 13557e0ca9..5168b08a7e 100644
--- a/modules/fargate-profile/variables.tf
+++ b/modules/fargate-profile/variables.tf
@@ -29,7 +29,7 @@ variable "iam_role_arn" {
variable "iam_role_name" {
description = "Name to use on IAM role created"
type = string
- default = null
+ default = ""
}
variable "iam_role_use_name_prefix" {
@@ -75,7 +75,7 @@ variable "cluster_name" {
variable "fargate_profile_name" {
description = "Name of the EKS Fargate Profile"
type = string
- default = null
+ default = ""
}
variable "subnet_ids" {
diff --git a/modules/fargate-profile/versions.tf b/modules/fargate-profile/versions.tf
index 97955e9bc8..bfce6ae345 100644
--- a/modules/fargate-profile/versions.tf
+++ b/modules/fargate-profile/versions.tf
@@ -4,7 +4,7 @@ terraform {
required_providers {
aws = {
source = "hashicorp/aws"
- version = ">= 3.56.0"
+ version = ">= 3.64"
}
}
}
diff --git a/modules/self-managed-node-group/README.md b/modules/self-managed-node-group/README.md
index e40250ea94..7e569e4fcc 100644
--- a/modules/self-managed-node-group/README.md
+++ b/modules/self-managed-node-group/README.md
@@ -18,13 +18,13 @@ $ terraform apply
| Name | Version |
|------|---------|
| [terraform](#requirement\_terraform) | >= 0.13.1 |
-| [aws](#requirement\_aws) | >= 3.56 |
+| [aws](#requirement\_aws) | >= 3.64 |
## Providers
| Name | Version |
|------|---------|
-| [aws](#provider\_aws) | >= 3.56 |
+| [aws](#provider\_aws) | >= 3.64 |
## Modules
@@ -113,7 +113,7 @@ No modules.
| [min\_elb\_capacity](#input\_min\_elb\_capacity) | Setting this causes Terraform to wait for this number of instances to show up healthy in the ELB only on creation. Updates will not wait on ELB instance number changes | `number` | `null` | no |
| [min\_size](#input\_min\_size) | The minimum size of the autoscaling group | `number` | `null` | no |
| [mixed\_instances\_policy](#input\_mixed\_instances\_policy) | Configuration block containing settings to define launch targets for Auto Scaling groups | `any` | `null` | no |
-| [name](#input\_name) | Name used across the resources created | `string` | n/a | yes |
+| [name](#input\_name) | Name used across the resources created | `string` | `""` | no |
| [network\_interfaces](#input\_network\_interfaces) | Customize network interfaces to be attached at instance boot time | `list(any)` | `[]` | no |
| [placement](#input\_placement) | The placement of the instance | `map(string)` | `null` | no |
| [placement\_group](#input\_placement\_group) | The name of the placement group into which you'll launch your instances, if any | `string` | `null` | no |
diff --git a/modules/self-managed-node-group/main.tf b/modules/self-managed-node-group/main.tf
index 6d8ad17e16..e5666ba9f3 100644
--- a/modules/self-managed-node-group/main.tf
+++ b/modules/self-managed-node-group/main.tf
@@ -281,6 +281,8 @@ resource "aws_autoscaling_group" "this" {
content {
instance_warmup = lookup(preferences.value, "instance_warmup", null)
min_healthy_percentage = lookup(preferences.value, "min_healthy_percentage", null)
+ checkpoint_delay = lookup(preferences.value, "checkpoint_delay", null)
+ checkpoint_percentages = lookup(preferences.value, "checkpoint_percentages", null)
}
}
}
diff --git a/modules/self-managed-node-group/variables.tf b/modules/self-managed-node-group/variables.tf
index 9599d28627..5260261693 100644
--- a/modules/self-managed-node-group/variables.tf
+++ b/modules/self-managed-node-group/variables.tf
@@ -11,6 +11,7 @@ variable "create" {
variable "name" {
description = "Name used across the resources created"
type = string
+ default = ""
}
variable "use_name_prefix" {
diff --git a/modules/self-managed-node-group/versions.tf b/modules/self-managed-node-group/versions.tf
index 9480a77da8..bfce6ae345 100644
--- a/modules/self-managed-node-group/versions.tf
+++ b/modules/self-managed-node-group/versions.tf
@@ -4,7 +4,7 @@ terraform {
required_providers {
aws = {
source = "hashicorp/aws"
- version = ">= 3.56"
+ version = ">= 3.64"
}
}
}
diff --git a/versions.tf b/versions.tf
index 83a000f86f..1969855a84 100644
--- a/versions.tf
+++ b/versions.tf
@@ -4,11 +4,11 @@ terraform {
required_providers {
aws = {
source = "hashicorp/aws"
- version = ">= 3.56.0"
+ version = ">= 3.64"
}
tls = {
source = "hashicorp/tls"
- version = ">= 2.2.0"
+ version = ">= 2.2"
}
}
}
From 22a4e7738a1c6f6b113db90501b1a3024f8856b7 Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Sat, 13 Nov 2021 14:30:30 -0500
Subject: [PATCH 28/83] chore: updates from testing and validation
---
README.md | 11 +-
examples/complete/README.md | 1 -
examples/complete/main.tf | 105 ++++++++++----------
examples/complete/outputs.tf | 1 +
main.tf | 8 +-
modules/eks-managed-node-group/README.md | 6 +-
modules/eks-managed-node-group/main.tf | 39 +++++---
modules/eks-managed-node-group/variables.tf | 2 +-
modules/fargate-profile/README.md | 2 +-
modules/fargate-profile/main.tf | 4 +-
modules/fargate-profile/variables.tf | 4 +-
modules/self-managed-node-group/README.md | 4 +-
modules/self-managed-node-group/main.tf | 27 +++--
node_groups.tf | 37 +++++--
outputs.tf | 1 +
variables.tf | 24 +++--
16 files changed, 164 insertions(+), 112 deletions(-)
diff --git a/README.md b/README.md
index 19531718ad..35e09e6353 100644
--- a/README.md
+++ b/README.md
@@ -432,10 +432,12 @@ Full contributing [guidelines are covered here](https://github.com/terraform-aws
| Name | Description | Type | Default | Required |
|------|-------------|------|---------|:--------:|
+| [cloudwatch\_log\_group\_kms\_key\_id](#input\_cloudwatch\_log\_group\_kms\_key\_id) | If a KMS Key ARN is set, this key will be used to encrypt the corresponding log group. Please be sure that the KMS Key has an appropriate key policy (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/encrypt-log-data-kms.html) | `string` | `null` | no |
+| [cloudwatch\_log\_group\_retention\_in\_days](#input\_cloudwatch\_log\_group\_retention\_in\_days) | Number of days to retain log events. Default retention - 90 days | `number` | `90` | no |
| [cluster\_create\_endpoint\_private\_access\_sg\_rule](#input\_cluster\_create\_endpoint\_private\_access\_sg\_rule) | Whether to create security group rules for the access to the Amazon EKS private API server endpoint. If `true`, `cluster_endpoint_private_access_cidrs` and/or 'cluster\_endpoint\_private\_access\_sg' should be provided | `bool` | `false` | no |
| [cluster\_egress\_cidrs](#input\_cluster\_egress\_cidrs) | List of CIDR blocks that are permitted for cluster egress traffic | `list(string)` |
[ "0.0.0.0/0" ]
| no |
-| [cluster\_enabled\_log\_types](#input\_cluster\_enabled\_log\_types) | A list of the desired control plane logging to enable. For more information, see Amazon EKS Control Plane Logging documentation (https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html) | `list(string)` | `[]` | no |
-| [cluster\_encryption\_config](#input\_cluster\_encryption\_config) | Configuration block with encryption configuration for the cluster. See examples/secrets\_encryption/main.tf for example format |
| `[]` | no |
+| [cluster\_enabled\_log\_types](#input\_cluster\_enabled\_log\_types) | A list of the desired control plane logging to enable. For more information, see Amazon EKS Control Plane Logging documentation (https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html) | `list(string)` |
[ "audit", "api", "authenticator" ]
| no |
+| [cluster\_encryption\_config](#input\_cluster\_encryption\_config) | Configuration block with encryption configuration for the cluster |
| `[]` | no |
| [cluster\_endpoint\_private\_access](#input\_cluster\_endpoint\_private\_access) | Indicates whether or not the Amazon EKS private API server endpoint is enabled | `bool` | `false` | no |
| [cluster\_endpoint\_private\_access\_cidrs](#input\_cluster\_endpoint\_private\_access\_cidrs) | List of CIDR blocks which can access the Amazon EKS private API server endpoint. `cluster_endpoint_private_access` and `cluster_create_endpoint_private_access_sg_rule` must be set to `true` | `list(string)` | `[]` | no |
| [cluster\_endpoint\_private\_access\_sg](#input\_cluster\_endpoint\_private\_access\_sg) | List of security group IDs which can access the Amazon EKS private API server endpoint. `cluster_endpoint_private_access` and `cluster_create_endpoint_private_access_sg_rule` must be set to `true` | `list(string)` | `[]` | no |
@@ -447,8 +449,6 @@ Full contributing [guidelines are covered here](https://github.com/terraform-aws
| [cluster\_iam\_role\_permissions\_boundary](#input\_cluster\_iam\_role\_permissions\_boundary) | ARN of the policy that is used to set the permissions boundary for the cluster role | `string` | `null` | no |
| [cluster\_iam\_role\_tags](#input\_cluster\_iam\_role\_tags) | A map of additional tags to add to the cluster IAM role created | `map(string)` | `{}` | no |
| [cluster\_iam\_role\_use\_name\_prefix](#input\_cluster\_iam\_role\_use\_name\_prefix) | Determines whether cluster IAM role name (`cluster_iam_role_name`) is used as a prefix | `string` | `true` | no |
-| [cluster\_log\_kms\_key\_id](#input\_cluster\_log\_kms\_key\_id) | If a KMS Key ARN is set, this key will be used to encrypt the corresponding log group. Please be sure that the KMS Key has an appropriate key policy (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/encrypt-log-data-kms.html) | `string` | `""` | no |
-| [cluster\_log\_retention\_in\_days](#input\_cluster\_log\_retention\_in\_days) | Number of days to retain log events. Default retention - 90 days | `number` | `90` | no |
| [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster and default name (prefix) used throughout the resources created | `string` | `""` | no |
| [cluster\_security\_group\_id](#input\_cluster\_security\_group\_id) | If provided, the EKS cluster will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the workers | `string` | `""` | no |
| [cluster\_security\_group\_name](#input\_cluster\_security\_group\_name) | Name to use on cluster role created | `string` | `null` | no |
@@ -459,6 +459,7 @@ Full contributing [guidelines are covered here](https://github.com/terraform-aws
| [cluster\_timeouts](#input\_cluster\_timeouts) | Create, update, and delete timeout configurations for the cluster | `map(string)` | `{}` | no |
| [cluster\_version](#input\_cluster\_version) | Kubernetes minor version to use for the EKS cluster (for example 1.21) | `string` | `null` | no |
| [create](#input\_create) | Controls if EKS resources should be created (it affects almost all resources) | `bool` | `true` | no |
+| [create\_cloudwatch\_log\_group](#input\_create\_cloudwatch\_log\_group) | Determines whether a log group is created by this module for the cluster logs. If not, AWS will automatically create one if logging is enabled | `bool` | `true` | no |
| [create\_cluster\_iam\_role](#input\_create\_cluster\_iam\_role) | Determines whether a cluster IAM role is created or to use an existing IAM role | `bool` | `true` | no |
| [create\_cluster\_security\_group](#input\_create\_cluster\_security\_group) | Whether to create a security group for the cluster or attach the cluster to `cluster_security_group_id` | `bool` | `true` | no |
| [eks\_managed\_node\_group\_defaults](#input\_eks\_managed\_node\_group\_defaults) | Map of EKS managed node group default configurations | `any` | `{}` | no |
@@ -471,7 +472,7 @@ Full contributing [guidelines are covered here](https://github.com/terraform-aws
| [self\_managed\_node\_groups](#input\_self\_managed\_node\_groups) | Map of self-managed node group definitions to create | `any` | `{}` | no |
| [subnet\_ids](#input\_subnet\_ids) | A list of subnet IDs to place the EKS cluster and workers within | `list(string)` | `[]` | no |
| [tags](#input\_tags) | A map of tags to add to all resources. Tags added to launch configuration or templates override these values for ASG Tags only | `map(string)` | `{}` | no |
-| [vpc\_id](#input\_vpc\_id) | ID of the VPC where the cluster and workers will be provisioned | `string` | `null` | no |
+| [vpc\_id](#input\_vpc\_id) | ID of the VPC where the cluster and its nodes will be provisioned | `string` | `null` | no |
## Outputs
diff --git a/examples/complete/README.md b/examples/complete/README.md
index 39ba6d179e..c3bbde1c20 100644
--- a/examples/complete/README.md
+++ b/examples/complete/README.md
@@ -48,7 +48,6 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Type |
|------|------|
| [aws_security_group.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
-| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source |
## Inputs
diff --git a/examples/complete/main.tf b/examples/complete/main.tf
index b5840cf6d2..13f7a418bd 100644
--- a/examples/complete/main.tf
+++ b/examples/complete/main.tf
@@ -30,8 +30,10 @@ module "eks" {
cluster_endpoint_private_access = true
cluster_endpoint_public_access = true
- # TODO
- # vpc_security_group_ids = [aws_security_group.additional.id]
+ # Self Managed Node Group(s)
+ self_managed_node_group_defaults = {
+ vpc_security_group_ids = [aws_security_group.additional.id]
+ }
self_managed_node_groups = {
one = {
@@ -42,35 +44,30 @@ module "eks" {
asg_desired_capacity = 5
bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"
public_ip = true
-
- vpc_security_group_ids = [aws_security_group.additional.id] # TODO
}
}
- # # Managed Node Groups
- # node_groups_defaults = {
- # ami_type = "AL2_x86_64"
- # disk_size = 50
- # }
+ # EKS Managed Node Group(s)
+ eks_managed_node_group_defaults = {
+ ami_type = "AL2_x86_64"
+ # disk_size = 50
+ vpc_security_group_ids = [aws_security_group.additional.id]
+ create_launch_template = true
+ }
eks_managed_node_groups = {
- example = {
- desired_capacity = 1
- max_capacity = 10
- min_capacity = 1
-
- vpc_security_group_ids = [aws_security_group.additional.id] # TODO
+ eks_mng = {
+ min_size = 1
+ max_size = 10
+ desired_size = 1
instance_types = ["t3.large"]
capacity_type = "SPOT"
- k8s_labels = {
+ labels = {
Environment = "test"
GithubRepo = "terraform-aws-eks"
GithubOrg = "terraform-aws-modules"
}
- additional_tags = {
- ExtraTag = "example"
- }
taints = {
dedicated = {
key = "dedicated"
@@ -81,35 +78,38 @@ module "eks" {
update_config = {
max_unavailable_percentage = 50 # or set `max_unavailable`
}
+ tags = {
+ ExtraTag = "example"
+ }
}
}
- # # Fargate
- # fargate_profiles = {
- # default = {
- # name = "default"
- # selectors = [
- # {
- # namespace = "kube-system"
- # labels = {
- # k8s-app = "kube-dns"
- # }
- # },
- # {
- # namespace = "default"
- # }
- # ]
-
- # tags = {
- # Owner = "test"
- # }
-
- # timeouts = {
- # create = "20m"
- # delete = "20m"
- # }
- # }
- # }
+ # Fargate Profile(s)
+ fargate_profiles = {
+ default = {
+ fargate_profile_name = "default"
+ selectors = [
+ {
+ namespace = "kube-system"
+ labels = {
+ k8s-app = "kube-dns"
+ }
+ },
+ {
+ namespace = "default"
+ }
+ ]
+
+ tags = {
+ Owner = "test"
+ }
+
+ timeouts = {
+ create = "20m"
+ delete = "20m"
+ }
+ }
+ }
tags = local.tags
}
@@ -146,17 +146,18 @@ module "disabled_self_managed_node_group" {
# Supporting resources
################################################################################
-data "aws_availability_zones" "available" {}
-
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
version = "~> 3.0"
- name = local.name
- cidr = "10.0.0.0/16"
- azs = data.aws_availability_zones.available.names
- private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
- public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
+ name = local.name
+ cidr = "10.0.0.0/16"
+
+ azs = ["${local.region}a", "${local.region}b", "${local.region}c"]
+ private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
+ public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
+ intra_subnets = ["10.0.7.0/24", "10.0.8.0/24", "10.0.9.0/24"]
+
enable_nat_gateway = true
single_nat_gateway = true
enable_dns_hostnames = true
diff --git a/examples/complete/outputs.tf b/examples/complete/outputs.tf
index 7a3517e42a..bf2bba45dc 100644
--- a/examples/complete/outputs.tf
+++ b/examples/complete/outputs.tf
@@ -109,6 +109,7 @@ output "fargate_profiles" {
output "eks_managed_node_groups" {
description = "Map of attribute maps for all EKS managed node groups created"
value = module.eks.eks_managed_node_groups
+ sensitive = true
}
################################################################################
diff --git a/main.tf b/main.tf
index 04e3b79d27..297fc8dc0f 100644
--- a/main.tf
+++ b/main.tf
@@ -13,7 +13,7 @@ resource "aws_eks_cluster" "this" {
enabled_cluster_log_types = var.cluster_enabled_log_types
vpc_config {
- security_group_ids = var.create_cluster_security_group ? aws_security_group.this[0].id : var.cluster_security_group_id
+ security_group_ids = var.create_cluster_security_group ? [aws_security_group.this[0].id] : [var.cluster_security_group_id]
subnet_ids = var.subnet_ids
endpoint_private_access = var.cluster_endpoint_private_access
endpoint_public_access = var.cluster_endpoint_public_access
@@ -54,11 +54,11 @@ resource "aws_eks_cluster" "this" {
}
resource "aws_cloudwatch_log_group" "this" {
- count = var.create && length(var.cluster_enabled_log_types) > 0 ? 1 : 0
+ count = var.create && var.create_cloudwatch_log_group ? 1 : 0
name = "/aws/eks/${var.cluster_name}/cluster"
- retention_in_days = var.cluster_log_retention_in_days
- kms_key_id = var.cluster_log_kms_key_id
+ retention_in_days = var.cloudwatch_log_group_retention_in_days
+ kms_key_id = var.cloudwatch_log_group_kms_key_id
tags = var.tags
}
diff --git a/modules/eks-managed-node-group/README.md b/modules/eks-managed-node-group/README.md
index 87ed7a9106..c344e286df 100644
--- a/modules/eks-managed-node-group/README.md
+++ b/modules/eks-managed-node-group/README.md
@@ -75,13 +75,13 @@ No modules.
| [aws_iam_role_policy_attachment.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
| [aws_launch_template.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource |
| [aws_security_group.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
+| [aws_security_group_rule.all_egress](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
| [aws_security_group_rule.cluster_coredns_tcp_ingress](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
| [aws_security_group_rule.cluster_coredns_udp_ingress](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
| [aws_security_group_rule.cluster_ephemeral_ports_ingress](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
| [aws_security_group_rule.cluster_https_ingress](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
| [aws_security_group_rule.cluster_kubelet_ingress](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.ingress_self](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.worker_egress_all](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
+| [aws_security_group_rule.self_ingress](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
| [aws_iam_policy_document.assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
| [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source |
| [cloudinit_config.eks_optimized_ami_user_data](https://registry.terraform.io/providers/hashicorp/cloudinit/latest/docs/data-sources/config) | data source |
@@ -150,7 +150,7 @@ No modules.
| [pre\_bootstrap\_user\_data](#input\_pre\_bootstrap\_user\_data) | User data that is injected into the user data script ahead of the EKS bootstrap script | `string` | `""` | no |
| [ram\_disk\_id](#input\_ram\_disk\_id) | The ID of the ram disk | `string` | `null` | no |
| [remote\_access](#input\_remote\_access) | Configuration block with remote access settings | `map(string)` | `{}` | no |
-| [security\_group\_description](#input\_security\_group\_description) | Description for the security group | `string` | `"EKS worker security group"` | no |
+| [security\_group\_description](#input\_security\_group\_description) | Description for the security group | `string` | `"EKS managed node group security group"` | no |
| [security\_group\_egress\_cidr\_blocks](#input\_security\_group\_egress\_cidr\_blocks) | List of CIDR blocks that are permitted for security group egress traffic | `list(string)` |
[ "0.0.0.0/0" ]
| no |
| [security\_group\_name](#input\_security\_group\_name) | Name to use on security group created | `string` | `null` | no |
| [security\_group\_tags](#input\_security\_group\_tags) | A map of additional tags to add to the security group created | `map(string)` | `{}` | no |
diff --git a/modules/eks-managed-node-group/main.tf b/modules/eks-managed-node-group/main.tf
index 8ba6b55881..b369aa4db7 100644
--- a/modules/eks-managed-node-group/main.tf
+++ b/modules/eks-managed-node-group/main.tf
@@ -56,13 +56,14 @@ data "cloudinit_config" "eks_optimized_ami_user_data" {
locals {
use_custom_launch_template = var.create_launch_template || var.launch_template_name != null
+ launch_template_name = coalesce(var.launch_template_name, "${var.name}-eks-node-group")
}
resource "aws_launch_template" "this" {
count = var.create && var.create_launch_template ? 1 : 0
- name = var.launch_template_use_name_prefix ? null : var.launch_template_name
- name_prefix = var.launch_template_use_name_prefix ? "${var.launch_template_name}-" : null
+ name = var.launch_template_use_name_prefix ? null : local.launch_template_name
+ name_prefix = var.launch_template_use_name_prefix ? "${local.launch_template_name}-" : null
description = coalesce(var.description, "Custom launch template for ${var.name} EKS managed node group")
ebs_optimized = var.ebs_optimized
@@ -255,6 +256,19 @@ resource "aws_launch_template" "this" {
create_before_destroy = true
}
+ # Prevent premature access of security group roles and policies by pods that
+ # require permissions on create/destroy that depend on workers.
+ depends_on = [
+ aws_security_group_rule.cluster_https_ingress,
+ aws_security_group_rule.cluster_kubelet_ingress,
+ aws_security_group_rule.cluster_coredns_tcp_ingress,
+ aws_security_group_rule.cluster_coredns_udp_ingress,
+ aws_security_group_rule.cluster_ephemeral_ports_ingress,
+ aws_security_group_rule.self_ingress,
+ aws_security_group_rule.all_egress,
+ aws_iam_role_policy_attachment.this,
+ ]
+
tags = var.tags
}
@@ -262,12 +276,6 @@ resource "aws_launch_template" "this" {
# Node Group
################################################################################
-locals {
- launch_template_name = try(aws_launch_template.this[0].name, var.launch_template_name)
- # Change order to allow users to set version priority before using defaults
- launch_template_version = coalesce(var.launch_template_version, try(aws_launch_template.this[0].default_version, "$Default"))
-}
-
resource "aws_eks_node_group" "this" {
count = var.create ? 1 : 0
@@ -289,7 +297,7 @@ resource "aws_eks_node_group" "this" {
ami_type = var.ami_type
release_version = var.ami_release_version
capacity_type = var.capacity_type
- disk_size = var.disk_size
+ disk_size = local.use_custom_launch_template ? null : var.disk_size # if using LT, set disk size on LT or else it will error here
force_update_version = var.force_update_version
instance_types = var.instance_types
labels = var.labels
@@ -298,8 +306,9 @@ resource "aws_eks_node_group" "this" {
dynamic "launch_template" {
for_each = local.use_custom_launch_template ? [1] : []
content {
- name = local.launch_template_name
- version = local.launch_template_version
+ name = try(aws_launch_template.this[0].name, var.launch_template_name)
+ # Change order to allow users to set version priority before using defaults
+ version = coalesce(var.launch_template_version, try(aws_launch_template.this[0].default_version, "$Default"))
}
}
@@ -358,7 +367,7 @@ resource "aws_eks_node_group" "this" {
################################################################################
locals {
- security_group_name = coalesce(var.security_group_name, "${var.name}-worker")
+ security_group_name = coalesce(var.security_group_name, "${var.name}-eks-node-group")
create_security_group = var.create && var.create_security_group
}
@@ -442,7 +451,7 @@ resource "aws_security_group_rule" "cluster_ephemeral_ports_ingress" {
}
# TODO - move to separate security group in root that all node groups will get assigned
-resource "aws_security_group_rule" "ingress_self" {
+resource "aws_security_group_rule" "self_ingress" {
count = local.create_security_group ? 1 : 0
description = "Allow node to communicate with each other"
@@ -455,7 +464,7 @@ resource "aws_security_group_rule" "ingress_self" {
}
# Egress
-resource "aws_security_group_rule" "worker_egress_all" {
+resource "aws_security_group_rule" "all_egress" {
count = local.create_security_group ? 1 : 0
description = "Allow egress to all ports/protocols"
@@ -472,7 +481,7 @@ resource "aws_security_group_rule" "worker_egress_all" {
################################################################################
locals {
- iam_role_name = coalesce(var.iam_role_name, "${var.cluster_name}-worker")
+ iam_role_name = coalesce(var.iam_role_name, "${var.name}-eks-node-group")
policy_arn_prefix = "arn:${data.aws_partition.current.partition}:iam::aws:policy"
}
diff --git a/modules/eks-managed-node-group/variables.tf b/modules/eks-managed-node-group/variables.tf
index 04e3dc9669..8a8a63068d 100644
--- a/modules/eks-managed-node-group/variables.tf
+++ b/modules/eks-managed-node-group/variables.tf
@@ -389,7 +389,7 @@ variable "security_group_use_name_prefix" {
variable "security_group_description" {
description = "Description for the security group"
type = string
- default = "EKS worker security group"
+ default = "EKS managed node group security group"
}
variable "vpc_id" {
diff --git a/modules/fargate-profile/README.md b/modules/fargate-profile/README.md
index 43aef7e7cf..423780e1b7 100644
--- a/modules/fargate-profile/README.md
+++ b/modules/fargate-profile/README.md
@@ -63,7 +63,7 @@ No modules.
| [iam\_role\_permissions\_boundary](#input\_iam\_role\_permissions\_boundary) | ARN of the policy that is used to set the permissions boundary for the IAM role | `string` | `null` | no |
| [iam\_role\_tags](#input\_iam\_role\_tags) | A map of additional tags to add to the IAM role created | `map(string)` | `{}` | no |
| [iam\_role\_use\_name\_prefix](#input\_iam\_role\_use\_name\_prefix) | Determines whether the IAM role name (`iam_role_name`) is used as a prefix | `string` | `true` | no |
-| [selectors](#input\_selectors) | Configuration block(s) for selecting Kubernetes Pods to execute with this Fargate Profile | `map(string)` | `{}` | no |
+| [selectors](#input\_selectors) | Configuration block(s) for selecting Kubernetes Pods to execute with this Fargate Profile | `any` | `[]` | no |
| [subnet\_ids](#input\_subnet\_ids) | A list of subnet IDs for the EKS Fargate Profile | `list(string)` | `[]` | no |
| [tags](#input\_tags) | A map of tags to add to all resources | `map(string)` | `{}` | no |
| [timeouts](#input\_timeouts) | Create and delete timeout configurations for the Fargate Profile | `map(string)` | `{}` | no |
diff --git a/modules/fargate-profile/main.tf b/modules/fargate-profile/main.tf
index dfe5b4f34c..a1abba341b 100644
--- a/modules/fargate-profile/main.tf
+++ b/modules/fargate-profile/main.tf
@@ -1,7 +1,7 @@
data "aws_partition" "current" {}
locals {
- iam_role_name = coalesce(var.iam_role_name, var.fargate_profile_name)
+ iam_role_name = coalesce(var.iam_role_name, var.fargate_profile_name, "fargate-profile")
policy_arn_prefix = "arn:${data.aws_partition.current.partition}:iam::aws:policy"
}
@@ -51,7 +51,7 @@ resource "aws_iam_role_policy_attachment" "this" {
################################################################################
resource "aws_eks_fargate_profile" "this" {
- for_each = var.create ? 1 : 0
+ count = var.create ? 1 : 0
cluster_name = var.cluster_name
fargate_profile_name = var.fargate_profile_name
diff --git a/modules/fargate-profile/variables.tf b/modules/fargate-profile/variables.tf
index 5168b08a7e..de185adcc4 100644
--- a/modules/fargate-profile/variables.tf
+++ b/modules/fargate-profile/variables.tf
@@ -86,8 +86,8 @@ variable "subnet_ids" {
variable "selectors" {
description = "Configuration block(s) for selecting Kubernetes Pods to execute with this Fargate Profile"
- type = map(string)
- default = {}
+ type = any
+ default = []
}
variable "timeouts" {
diff --git a/modules/self-managed-node-group/README.md b/modules/self-managed-node-group/README.md
index 7e569e4fcc..5453b7dc99 100644
--- a/modules/self-managed-node-group/README.md
+++ b/modules/self-managed-node-group/README.md
@@ -41,13 +41,13 @@ No modules.
| [aws_iam_role_policy_attachment.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
| [aws_launch_template.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource |
| [aws_security_group.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
+| [aws_security_group_rule.all_egress](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
| [aws_security_group_rule.cluster_coredns_tcp_ingress](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
| [aws_security_group_rule.cluster_coredns_udp_ingress](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
| [aws_security_group_rule.cluster_ephemeral_ports_ingress](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
| [aws_security_group_rule.cluster_https_ingress](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
| [aws_security_group_rule.cluster_kubelet_ingress](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.ingress_self](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.worker_egress_all](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
+| [aws_security_group_rule.self_ingress](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
| [aws_ami.eks_default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source |
| [aws_iam_policy_document.assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
| [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source |
diff --git a/modules/self-managed-node-group/main.tf b/modules/self-managed-node-group/main.tf
index e5666ba9f3..7b48e3c5d6 100644
--- a/modules/self-managed-node-group/main.tf
+++ b/modules/self-managed-node-group/main.tf
@@ -119,7 +119,7 @@ resource "aws_launch_template" "this" {
}
iam_instance_profile {
- arn = var.create_iam_instance_profile ? aws_iam_role.this[0].arn : var.iam_instance_profile_arn
+ arn = var.create_iam_instance_profile ? aws_iam_instance_profile.this[0].arn : var.iam_instance_profile_arn
}
dynamic "instance_market_options" {
@@ -207,6 +207,19 @@ resource "aws_launch_template" "this" {
create_before_destroy = true
}
+ # Prevent premature access of security group roles and policies by pods that
+ # require permissions on create/destroy that depend on workers.
+ depends_on = [
+ aws_security_group_rule.cluster_https_ingress,
+ aws_security_group_rule.cluster_kubelet_ingress,
+ aws_security_group_rule.cluster_coredns_tcp_ingress,
+ aws_security_group_rule.cluster_coredns_udp_ingress,
+ aws_security_group_rule.cluster_ephemeral_ports_ingress,
+ aws_security_group_rule.self_ingress,
+ aws_security_group_rule.all_egress,
+ aws_iam_role_policy_attachment.this,
+ ]
+
tags = var.tags
}
@@ -215,8 +228,8 @@ resource "aws_launch_template" "this" {
################################################################################
locals {
- launch_template_name = var.create_launch_template ? aws_launch_template.this[0].name : var.launch_template_name
- launch_template_version = var.create_launch_template && var.launch_template_version == null ? aws_launch_template.this[0].latest_version : var.launch_template_version
+ launch_template_name = var.create && var.create_launch_template ? aws_launch_template.this[0].name : var.launch_template_name
+ launch_template_version = var.create && var.create_launch_template && var.launch_template_version == null ? aws_launch_template.this[0].latest_version : var.launch_template_version
}
resource "aws_autoscaling_group" "this" {
@@ -400,7 +413,7 @@ resource "aws_autoscaling_schedule" "this" {
################################################################################
locals {
- security_group_name = coalesce(var.security_group_name, "${var.name}-worker")
+ security_group_name = coalesce(var.security_group_name, "${var.name}-node-group")
create_security_group = var.create && var.create_security_group
}
@@ -484,7 +497,7 @@ resource "aws_security_group_rule" "cluster_ephemeral_ports_ingress" {
}
# TODO - move to separate security group in root that all node groups will get assigned
-resource "aws_security_group_rule" "ingress_self" {
+resource "aws_security_group_rule" "self_ingress" {
count = local.create_security_group ? 1 : 0
description = "Allow node to communicate with each other"
@@ -497,7 +510,7 @@ resource "aws_security_group_rule" "ingress_self" {
}
# Egress
-resource "aws_security_group_rule" "worker_egress_all" {
+resource "aws_security_group_rule" "all_egress" {
count = local.create_security_group ? 1 : 0
description = "Allow egress to all ports/protocols"
@@ -514,7 +527,7 @@ resource "aws_security_group_rule" "worker_egress_all" {
################################################################################
locals {
- iam_role_name = coalesce(var.iam_role_name, "${var.cluster_name}-worker")
+ iam_role_name = coalesce(var.iam_role_name, "${var.name}-node-group")
iam_role_policy_prefix = "arn:${data.aws_partition.current.partition}:iam::aws:policy"
}
diff --git a/node_groups.tf b/node_groups.tf
index ad8de2a5e6..a2825ad5e1 100644
--- a/node_groups.tf
+++ b/node_groups.tf
@@ -11,7 +11,7 @@ module "fargate_profile" {
cluster_name = aws_eks_cluster.this[0].name
fargate_profile_name = try(each.value.fargate_profile_name, each.key)
subnet_ids = try(each.value.subnet_ids, var.fargate_profile_defaults.subnet_ids, var.subnet_ids)
- selectors = try(each.value.selectors, var.fargate_profile_defaults.selectors, {})
+ selectors = try(each.value.selectors, var.fargate_profile_defaults.selectors, [])
timeouts = try(each.value.timeouts, var.fargate_profile_defaults.timeouts, {})
# IAM role
@@ -36,7 +36,9 @@ module "eks_managed_node_group" {
for_each = var.create ? var.eks_managed_node_groups : {}
- cluster_name = aws_eks_cluster.this[0].name
+ cluster_name = aws_eks_cluster.this[0].name
+ cluster_version = try(each.value.cluster_version, var.eks_managed_node_group_defaults.cluster_version, var.cluster_version)
+ cluster_security_group_id = var.create_cluster_security_group ? aws_security_group.this[0].id : var.cluster_security_group_id
# EKS Managed Node Group
name = try(each.value.name, each.key)
@@ -57,7 +59,6 @@ module "eks_managed_node_group" {
force_update_version = try(each.value.force_update_version, var.eks_managed_node_group_defaults.force_update_version, null)
instance_types = try(each.value.instance_types, var.eks_managed_node_group_defaults.instance_types, null)
labels = try(each.value.labels, var.eks_managed_node_group_defaults.labels, null)
- cluster_version = try(each.value.cluster_version, var.eks_managed_node_group_defaults.cluster_version, var.cluster_version)
remote_access = try(each.value.remote_access, var.eks_managed_node_group_defaults.remote_access, {})
taints = try(each.value.taints, var.eks_managed_node_group_defaults.taints, {})
@@ -81,11 +82,9 @@ module "eks_managed_node_group" {
launch_template_version = try(each.value.launch_template_version, var.eks_managed_node_group_defaults.launch_template_version, null)
description = try(each.value.description, var.eks_managed_node_group_defaults.description, null)
- ebs_optimized = try(each.value.ebs_optimized, var.eks_managed_node_group_defaults.ebs_optimized, null)
- key_name = try(each.value.key_name, var.eks_managed_node_group_defaults.key_name, null)
-
- vpc_security_group_ids = try(each.value.vpc_security_group_ids, var.eks_managed_node_group_defaults.vpc_security_group_ids, [])
-
+ ebs_optimized = try(each.value.ebs_optimized, var.eks_managed_node_group_defaults.ebs_optimized, null)
+ key_name = try(each.value.key_name, var.eks_managed_node_group_defaults.key_name, null)
+ vpc_security_group_ids = try(each.value.vpc_security_group_ids, var.eks_managed_node_group_defaults.vpc_security_group_ids, [])
default_version = try(each.value.default_version, var.eks_managed_node_group_defaults.default_version, null)
update_default_version = try(each.value.update_default_version, var.eks_managed_node_group_defaults.update_default_version, null)
disable_api_termination = try(each.value.disable_api_termination, var.eks_managed_node_group_defaults.disable_api_termination, null)
@@ -118,6 +117,15 @@ module "eks_managed_node_group" {
iam_role_tags = try(each.value.iam_role_tags, var.eks_managed_node_group_defaults.iam_role_tags, {})
iam_role_additional_policies = try(each.value.iam_role_additional_policies, var.eks_managed_node_group_defaults.iam_role_additional_policies, [])
+ # Security group
+ create_security_group = try(each.value.create_security_group, var.eks_managed_node_group_defaults.create_security_group, true)
+ security_group_name = try(each.value.security_group_name, var.eks_managed_node_group_defaults.security_group_name, null)
+ security_group_use_name_prefix = try(each.value.security_group_use_name_prefix, var.eks_managed_node_group_defaults.security_group_use_name_prefix, true)
+ security_group_description = try(each.value.security_group_description, var.eks_managed_node_group_defaults.security_group_description, "EKS managed node group security group")
+ vpc_id = try(each.value.vpc_id, var.eks_managed_node_group_defaults.vpc_id, var.vpc_id)
+ security_group_egress_cidr_blocks = try(each.value.security_group_egress_cidr_blocks, var.eks_managed_node_group_defaults.security_group_egress_cidr_blocks, ["0.0.0.0/0"])
+ security_group_tags = try(each.value.security_group_tags, var.eks_managed_node_group_defaults.security_group_tags, {})
+
tags = merge(var.tags, try(each.value.tags, var.eks_managed_node_group_defaults.tags, {}))
}
@@ -186,8 +194,8 @@ module "self_managed_node_group" {
key_name = try(each.value.key_name, var.self_managed_node_group_defaults.key_name, null)
user_data = try(each.value.user_data, var.self_managed_node_group_defaults.user_data, null)
- vpc_security_group_ids = try(each.value.vpc_security_group_ids, var.self_managed_node_group_defaults.vpc_security_group_ids, [])
-
+ vpc_security_group_ids = try(each.value.vpc_security_group_ids, var.self_managed_node_group_defaults.vpc_security_group_ids, [])
+ cluster_security_group_id = var.create_cluster_security_group ? aws_security_group.this[0].id : var.cluster_security_group_id
default_version = try(each.value.default_version, var.self_managed_node_group_defaults.default_version, null)
update_default_version = try(each.value.update_default_version, var.self_managed_node_group_defaults.update_default_version, null)
disable_api_termination = try(each.value.disable_api_termination, var.self_managed_node_group_defaults.disable_api_termination, null)
@@ -221,6 +229,15 @@ module "self_managed_node_group" {
iam_role_attach_cni_policy = try(each.value.iam_role_attach_cni_policy, var.self_managed_node_group_defaults.iam_role_attach_cni_policy, true)
iam_role_additional_policies = try(each.value.iam_role_additional_policies, var.self_managed_node_group_defaults.iam_role_additional_policies, [])
+ # Security group
+ create_security_group = try(each.value.create_security_group, var.eks_managed_node_group_defaults.create_security_group, true)
+ security_group_name = try(each.value.security_group_name, var.eks_managed_node_group_defaults.security_group_name, null)
+ security_group_use_name_prefix = try(each.value.security_group_use_name_prefix, var.eks_managed_node_group_defaults.security_group_use_name_prefix, true)
+ security_group_description = try(each.value.security_group_description, var.eks_managed_node_group_defaults.security_group_description, "Self managed node group security group")
+ vpc_id = try(each.value.vpc_id, var.eks_managed_node_group_defaults.vpc_id, var.vpc_id)
+ security_group_egress_cidr_blocks = try(each.value.security_group_egress_cidr_blocks, var.eks_managed_node_group_defaults.security_group_egress_cidr_blocks, ["0.0.0.0/0"])
+ security_group_tags = try(each.value.security_group_tags, var.eks_managed_node_group_defaults.security_group_tags, {})
+
tags = merge(var.tags, try(each.value.tags, var.self_managed_node_group_defaults.tags, {}))
propagate_tags = try(each.value.propagate_tags, var.self_managed_node_group_defaults.propagate_tags, [])
}
diff --git a/outputs.tf b/outputs.tf
index d754efc99d..e3d5f528c1 100644
--- a/outputs.tf
+++ b/outputs.tf
@@ -109,6 +109,7 @@ output "fargate_profiles" {
output "eks_managed_node_groups" {
description = "Map of attribute maps for all EKS managed node groups created"
value = module.eks_managed_node_group
+ sensitive = true
}
################################################################################
diff --git a/variables.tf b/variables.tf
index d9d1e4a415..b8c87929d1 100644
--- a/variables.tf
+++ b/variables.tf
@@ -35,7 +35,7 @@ variable "cluster_version" {
variable "cluster_enabled_log_types" {
description = "A list of the desired control plane logging to enable. For more information, see Amazon EKS Control Plane Logging documentation (https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html)"
type = list(string)
- default = []
+ default = ["audit", "api", "authenticator"]
}
variable "cluster_security_group_id" {
@@ -76,7 +76,7 @@ variable "cluster_service_ipv4_cidr" {
}
variable "cluster_encryption_config" {
- description = "Configuration block with encryption configuration for the cluster. See examples/secrets_encryption/main.tf for example format"
+ description = "Configuration block with encryption configuration for the cluster"
type = list(object({
provider_key_arn = string
resources = list(string)
@@ -96,20 +96,30 @@ variable "cluster_timeouts" {
default = {}
}
-variable "cluster_log_retention_in_days" {
+################################################################################
+# CloudWatch Log Group
+################################################################################
+
+variable "create_cloudwatch_log_group" {
+ description = "Determines whether a log group is created by this module for the cluster logs. If not, AWS will automatically create one if logging is enabled"
+ type = bool
+ default = true
+}
+
+variable "cloudwatch_log_group_retention_in_days" {
description = "Number of days to retain log events. Default retention - 90 days"
type = number
default = 90
}
-variable "cluster_log_kms_key_id" {
+variable "cloudwatch_log_group_kms_key_id" {
description = "If a KMS Key ARN is set, this key will be used to encrypt the corresponding log group. Please be sure that the KMS Key has an appropriate key policy (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/encrypt-log-data-kms.html)"
type = string
- default = ""
+ default = null
}
################################################################################
-# Cluster Security Group
+# Security Group
################################################################################
variable "create_cluster_security_group" {
@@ -119,7 +129,7 @@ variable "create_cluster_security_group" {
}
variable "vpc_id" {
- description = "ID of the VPC where the cluster and workers will be provisioned"
+ description = "ID of the VPC where the cluster and its nodes will be provisioned"
type = string
default = null
}
From 8ddf25ac7906ce753de367a55c6d1f8e480ed5ea Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Sat, 13 Nov 2021 20:04:48 -0500
Subject: [PATCH 29/83] chore: update IAM variables to be consistent
---
.github/workflows/release.yml | 2 +-
README.md | 25 ++++----
examples/complete/main.tf | 117 +++++++++++++++++-----------------
main.tf | 99 ++++++++++++++--------------
outputs.tf | 6 +-
variables.tf | 20 +++---
6 files changed, 135 insertions(+), 134 deletions(-)
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index 141937d863..7a096c2e75 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -7,7 +7,7 @@ on:
- main
- master
paths:
- - '**/*.py'
+ - '**/*.tpl'
- '**/*.tf'
jobs:
diff --git a/README.md b/README.md
index 35e09e6353..5c4875bf02 100644
--- a/README.md
+++ b/README.md
@@ -417,14 +417,15 @@ Full contributing [guidelines are covered here](https://github.com/terraform-aws
| [aws_cloudwatch_log_group.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_log_group) | resource |
| [aws_eks_cluster.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_cluster) | resource |
| [aws_iam_openid_connect_provider.oidc_provider](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_openid_connect_provider) | resource |
-| [aws_iam_policy.cluster_additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
-| [aws_iam_role.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
+| [aws_iam_role.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
+| [aws_iam_role_policy_attachment.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
| [aws_security_group.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
| [aws_security_group_rule.cluster_egress_internet](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
+| [aws_security_group_rule.cluster_ingress_https_nodes](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
| [aws_security_group_rule.cluster_private_access_cidrs_source](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
| [aws_security_group_rule.cluster_private_access_sg_source](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_iam_policy_document.cluster_additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
-| [aws_iam_policy_document.cluster_assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
+| [aws_iam_policy_document.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
+| [aws_iam_policy_document.assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
| [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source |
| [tls_certificate.this](https://registry.terraform.io/providers/hashicorp/tls/latest/docs/data-sources/certificate) | data source |
@@ -443,30 +444,30 @@ Full contributing [guidelines are covered here](https://github.com/terraform-aws
| [cluster\_endpoint\_private\_access\_sg](#input\_cluster\_endpoint\_private\_access\_sg) | List of security group IDs which can access the Amazon EKS private API server endpoint. `cluster_endpoint_private_access` and `cluster_create_endpoint_private_access_sg_rule` must be set to `true` | `list(string)` | `[]` | no |
| [cluster\_endpoint\_public\_access](#input\_cluster\_endpoint\_public\_access) | Indicates whether or not the Amazon EKS public API server endpoint is enabled. When it's set to `false` ensure to have a proper private access with `cluster_endpoint_private_access = true` | `bool` | `true` | no |
| [cluster\_endpoint\_public\_access\_cidrs](#input\_cluster\_endpoint\_public\_access\_cidrs) | List of CIDR blocks which can access the Amazon EKS public API server endpoint | `list(string)` |
[ "0.0.0.0/0" ]
| no |
-| [cluster\_iam\_role\_arn](#input\_cluster\_iam\_role\_arn) | Existing IAM role ARN for the cluster. Required if `create_cluster_iam_role` is set to `false` | `string` | `null` | no |
-| [cluster\_iam\_role\_name](#input\_cluster\_iam\_role\_name) | Name to use on cluster role created | `string` | `null` | no |
-| [cluster\_iam\_role\_path](#input\_cluster\_iam\_role\_path) | Cluster IAM role path | `string` | `null` | no |
-| [cluster\_iam\_role\_permissions\_boundary](#input\_cluster\_iam\_role\_permissions\_boundary) | ARN of the policy that is used to set the permissions boundary for the cluster role | `string` | `null` | no |
-| [cluster\_iam\_role\_tags](#input\_cluster\_iam\_role\_tags) | A map of additional tags to add to the cluster IAM role created | `map(string)` | `{}` | no |
-| [cluster\_iam\_role\_use\_name\_prefix](#input\_cluster\_iam\_role\_use\_name\_prefix) | Determines whether cluster IAM role name (`cluster_iam_role_name`) is used as a prefix | `string` | `true` | no |
| [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster and default name (prefix) used throughout the resources created | `string` | `""` | no |
| [cluster\_security\_group\_id](#input\_cluster\_security\_group\_id) | If provided, the EKS cluster will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the workers | `string` | `""` | no |
| [cluster\_security\_group\_name](#input\_cluster\_security\_group\_name) | Name to use on cluster role created | `string` | `null` | no |
| [cluster\_security\_group\_tags](#input\_cluster\_security\_group\_tags) | A map of additional tags to add to the cluster security group created | `map(string)` | `{}` | no |
-| [cluster\_security\_group\_use\_name\_prefix](#input\_cluster\_security\_group\_use\_name\_prefix) | Determines whether cluster IAM role name (`cluster_iam_role_name`) is used as a prefix | `string` | `true` | no |
+| [cluster\_security\_group\_use\_name\_prefix](#input\_cluster\_security\_group\_use\_name\_prefix) | Determines whether cluster IAM role name (`iam_role_name`) is used as a prefix | `string` | `true` | no |
| [cluster\_service\_ipv4\_cidr](#input\_cluster\_service\_ipv4\_cidr) | service ipv4 cidr for the kubernetes cluster | `string` | `null` | no |
| [cluster\_tags](#input\_cluster\_tags) | A map of additional tags to add to the cluster | `map(string)` | `{}` | no |
| [cluster\_timeouts](#input\_cluster\_timeouts) | Create, update, and delete timeout configurations for the cluster | `map(string)` | `{}` | no |
| [cluster\_version](#input\_cluster\_version) | Kubernetes minor version to use for the EKS cluster (for example 1.21) | `string` | `null` | no |
| [create](#input\_create) | Controls if EKS resources should be created (it affects almost all resources) | `bool` | `true` | no |
| [create\_cloudwatch\_log\_group](#input\_create\_cloudwatch\_log\_group) | Determines whether a log group is created by this module for the cluster logs. If not, AWS will automatically create one if logging is enabled | `bool` | `true` | no |
-| [create\_cluster\_iam\_role](#input\_create\_cluster\_iam\_role) | Determines whether a cluster IAM role is created or to use an existing IAM role | `bool` | `true` | no |
| [create\_cluster\_security\_group](#input\_create\_cluster\_security\_group) | Whether to create a security group for the cluster or attach the cluster to `cluster_security_group_id` | `bool` | `true` | no |
+| [create\_iam\_role](#input\_create\_iam\_role) | Determines whether a cluster IAM role is created or to use an existing IAM role | `bool` | `true` | no |
| [eks\_managed\_node\_group\_defaults](#input\_eks\_managed\_node\_group\_defaults) | Map of EKS managed node group default configurations | `any` | `{}` | no |
| [eks\_managed\_node\_groups](#input\_eks\_managed\_node\_groups) | Map of EKS managed node group definitions to create | `any` | `{}` | no |
| [enable\_irsa](#input\_enable\_irsa) | Whether to create OpenID Connect Provider for EKS to enable IRSA | `bool` | `false` | no |
| [fargate\_profile\_defaults](#input\_fargate\_profile\_defaults) | Map of Fargate Profile default configurations | `any` | `{}` | no |
| [fargate\_profiles](#input\_fargate\_profiles) | Map of Fargate Profile definitions to create | `any` | `{}` | no |
+| [iam\_role\_arn](#input\_iam\_role\_arn) | Existing IAM role ARN for the cluster. Required if `create_iam_role` is set to `false` | `string` | `null` | no |
+| [iam\_role\_name](#input\_iam\_role\_name) | Name to use on cluster role created | `string` | `null` | no |
+| [iam\_role\_path](#input\_iam\_role\_path) | Cluster IAM role path | `string` | `null` | no |
+| [iam\_role\_permissions\_boundary](#input\_iam\_role\_permissions\_boundary) | ARN of the policy that is used to set the permissions boundary for the cluster role | `string` | `null` | no |
+| [iam\_role\_tags](#input\_iam\_role\_tags) | A map of additional tags to add to the cluster IAM role created | `map(string)` | `{}` | no |
+| [iam\_role\_use\_name\_prefix](#input\_iam\_role\_use\_name\_prefix) | Determines whether cluster IAM role name (`iam_role_name`) is used as a prefix | `string` | `true` | no |
| [openid\_connect\_audiences](#input\_openid\_connect\_audiences) | List of OpenID Connect audience client IDs to add to the IRSA provider | `list(string)` | `[]` | no |
| [self\_managed\_node\_group\_defaults](#input\_self\_managed\_node\_group\_defaults) | Map of self-managed node group default configurations | `any` | `{}` | no |
| [self\_managed\_node\_groups](#input\_self\_managed\_node\_groups) | Map of self-managed node group definitions to create | `any` | `{}` | no |
diff --git a/examples/complete/main.tf b/examples/complete/main.tf
index 13f7a418bd..1581583262 100644
--- a/examples/complete/main.tf
+++ b/examples/complete/main.tf
@@ -36,15 +36,15 @@ module "eks" {
}
self_managed_node_groups = {
- one = {
- name = "spot-1"
- override_instance_types = ["m5.large", "m5d.large", "m6i.large"]
- spot_instance_pools = 4
- asg_max_size = 5
- asg_desired_capacity = 5
- bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"
- public_ip = true
- }
+ # one = {
+ # name = "spot-1"
+ # override_instance_types = ["m5.large", "m5d.large", "m6i.large"]
+ # spot_instance_pools = 4
+ # asg_max_size = 5
+ # asg_desired_capacity = 5
+ # bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"
+ # public_ip = true
+ # }
}
# EKS Managed Node Group(s)
@@ -56,59 +56,59 @@ module "eks" {
}
eks_managed_node_groups = {
- eks_mng = {
- min_size = 1
- max_size = 10
- desired_size = 1
-
- instance_types = ["t3.large"]
- capacity_type = "SPOT"
- labels = {
- Environment = "test"
- GithubRepo = "terraform-aws-eks"
- GithubOrg = "terraform-aws-modules"
- }
- taints = {
- dedicated = {
- key = "dedicated"
- value = "gpuGroup"
- effect = "NO_SCHEDULE"
- }
- }
- update_config = {
- max_unavailable_percentage = 50 # or set `max_unavailable`
- }
- tags = {
- ExtraTag = "example"
- }
- }
+ # eks_mng = {
+ # min_size = 1
+ # max_size = 10
+ # desired_size = 1
+
+ # instance_types = ["t3.large"]
+ # capacity_type = "SPOT"
+ # labels = {
+ # Environment = "test"
+ # GithubRepo = "terraform-aws-eks"
+ # GithubOrg = "terraform-aws-modules"
+ # }
+ # taints = {
+ # dedicated = {
+ # key = "dedicated"
+ # value = "gpuGroup"
+ # effect = "NO_SCHEDULE"
+ # }
+ # }
+ # update_config = {
+ # max_unavailable_percentage = 50 # or set `max_unavailable`
+ # }
+ # tags = {
+ # ExtraTag = "example"
+ # }
+ # }
}
# Fargate Profile(s)
fargate_profiles = {
- default = {
- fargate_profile_name = "default"
- selectors = [
- {
- namespace = "kube-system"
- labels = {
- k8s-app = "kube-dns"
- }
- },
- {
- namespace = "default"
- }
- ]
-
- tags = {
- Owner = "test"
- }
-
- timeouts = {
- create = "20m"
- delete = "20m"
- }
- }
+ # default = {
+ # fargate_profile_name = "default"
+ # selectors = [
+ # {
+ # namespace = "kube-system"
+ # labels = {
+ # k8s-app = "kube-dns"
+ # }
+ # },
+ # {
+ # namespace = "default"
+ # }
+ # ]
+
+ # tags = {
+ # Owner = "test"
+ # }
+
+ # timeouts = {
+ # create = "20m"
+ # delete = "20m"
+ # }
+ # }
}
tags = local.tags
@@ -156,7 +156,6 @@ module "vpc" {
azs = ["${local.region}a", "${local.region}b", "${local.region}c"]
private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
- intra_subnets = ["10.0.7.0/24", "10.0.8.0/24", "10.0.9.0/24"]
enable_nat_gateway = true
single_nat_gateway = true
diff --git a/main.tf b/main.tf
index 297fc8dc0f..aa281a359a 100644
--- a/main.tf
+++ b/main.tf
@@ -8,7 +8,7 @@ resource "aws_eks_cluster" "this" {
count = var.create ? 1 : 0
name = var.cluster_name
- role_arn = try(aws_iam_role.cluster[0].arn, var.cluster_iam_role_arn)
+ role_arn = try(aws_iam_role.this[0].arn, var.iam_role_arn)
version = var.cluster_version
enabled_cluster_log_types = var.cluster_enabled_log_types
@@ -47,6 +47,7 @@ resource "aws_eks_cluster" "this" {
}
depends_on = [
+ aws_iam_role_policy_attachment.this,
aws_security_group_rule.cluster_egress_internet,
# aws_security_group_rule.cluster_https_worker_ingress,
aws_cloudwatch_log_group.this
@@ -103,17 +104,20 @@ resource "aws_security_group_rule" "cluster_egress_internet" {
type = "egress"
}
-# resource "aws_security_group_rule" "cluster_https_worker_ingress" {
-# count = local.create_cluster_sg && var.create_worker_security_group ? 1 : 0
+resource "aws_security_group_rule" "cluster_ingress_https_nodes" {
+ for_each = local.create_cluster_sg ? merge(
+ { for k, v in module.self_managed_node_group : k => v.security_group_id },
+ { for k, v in module.eks_managed_node_group : k => v.security_group_id }
+ ) : {}
-# description = "Allow pods to communicate with the EKS cluster API"
-# protocol = "tcp"
-# security_group_id = aws_security_group.this[0].id
-# source_security_group_id = local.worker_security_group_id # TODO - what a circle, oy
-# from_port = 443
-# to_port = 443
-# type = "ingress"
-# }
+ description = "Allow pods to communicate with the EKS cluster API"
+ protocol = "tcp"
+ security_group_id = aws_security_group.this[0].id
+ source_security_group_id = each.value
+ from_port = 443
+ to_port = 443
+ type = "ingress"
+}
resource "aws_security_group_rule" "cluster_private_access_cidrs_source" {
count = local.enable_cluster_private_endpoint_sg_access && length(var.cluster_endpoint_private_access_cidrs) > 0 ? 1 : 0
@@ -184,32 +188,12 @@ resource "aws_iam_openid_connect_provider" "oidc_provider" {
################################################################################
locals {
- cluster_iam_role_name = coalesce(var.cluster_iam_role_name, "${var.cluster_name}-cluster")
- policy_arn_prefix = "arn:${data.aws_partition.current.partition}:iam::aws:policy"
-}
-
-resource "aws_iam_role" "cluster" {
- count = var.create && var.create_cluster_iam_role ? 1 : 0
-
- name = var.cluster_iam_role_use_name_prefix ? null : local.cluster_iam_role_name
- name_prefix = var.cluster_iam_role_use_name_prefix ? try("${local.cluster_iam_role_name}-", local.cluster_iam_role_name) : null
- path = var.cluster_iam_role_path
-
- assume_role_policy = data.aws_iam_policy_document.cluster_assume_role_policy[0].json
- permissions_boundary = var.cluster_iam_role_permissions_boundary
- managed_policy_arns = [
- "${local.policy_arn_prefix}/AmazonEKSClusterPolicy",
- "${local.policy_arn_prefix}/AmazonEKSServicePolicy",
- "${local.policy_arn_prefix}/AmazonEKSVPCResourceController",
- aws_iam_policy.cluster_additional[0].arn,
- ]
- force_detach_policies = true
-
- tags = merge(var.tags, var.cluster_iam_role_tags)
+ iam_role_name = coalesce(var.iam_role_name, "${var.cluster_name}-cluster")
+ policy_arn_prefix = "arn:${data.aws_partition.current.partition}:iam::aws:policy"
}
-data "aws_iam_policy_document" "cluster_assume_role_policy" {
- count = var.create && var.create_cluster_iam_role ? 1 : 0
+data "aws_iam_policy_document" "assume_role_policy" {
+ count = var.create && var.create_iam_role ? 1 : 0
statement {
sid = "EKSClusterAssumeRole"
@@ -222,8 +206,27 @@ data "aws_iam_policy_document" "cluster_assume_role_policy" {
}
}
-data "aws_iam_policy_document" "cluster_additional" {
- count = var.create && var.create_cluster_iam_role ? 1 : 0
+resource "aws_iam_role" "this" {
+ count = var.create && var.create_iam_role ? 1 : 0
+
+ name = var.iam_role_use_name_prefix ? null : local.iam_role_name
+ name_prefix = var.iam_role_use_name_prefix ? try("${local.iam_role_name}-", local.iam_role_name) : null
+ path = var.iam_role_path
+
+ assume_role_policy = data.aws_iam_policy_document.assume_role_policy[0].json
+ permissions_boundary = var.iam_role_permissions_boundary
+ force_detach_policies = true
+
+ inline_policy {
+ name = "additional-alb"
+ policy = data.aws_iam_policy_document.additional[0].json
+ }
+
+ tags = merge(var.tags, var.iam_role_tags)
+}
+
+data "aws_iam_policy_document" "additional" {
+ count = var.create && var.create_iam_role ? 1 : 0
# Permissions required to create AWSServiceRoleForElasticLoadBalancing service-linked role by EKS during ELB provisioning
statement {
@@ -240,21 +243,19 @@ data "aws_iam_policy_document" "cluster_additional" {
# Deny permissions to logs:CreateLogGroup it is not needed since we create the log group ourselve in this module,
# and it is causing trouble during cleanup/deletion
statement {
- effect = "Deny"
- actions = [
- "logs:CreateLogGroup"
- ]
+ effect = "Deny"
+ actions = ["logs:CreateLogGroup"]
resources = ["*"]
}
}
-resource "aws_iam_policy" "cluster_additional" {
- count = var.create && var.create_cluster_iam_role ? 1 : 0
-
- name = var.cluster_iam_role_use_name_prefix ? null : local.cluster_iam_role_name
- name_prefix = var.cluster_iam_role_use_name_prefix ? try("${local.cluster_iam_role_name}-", local.cluster_iam_role_name) : null
- description = "Additional permissions for EKS cluster"
- policy = data.aws_iam_policy_document.cluster_additional[0].json
+# Policies attached ref https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_node_group
+resource "aws_iam_role_policy_attachment" "this" {
+ for_each = var.create && var.create_iam_role ? toset([
+ "${local.policy_arn_prefix}/AmazonEKSClusterPolicy",
+ "${local.policy_arn_prefix}/AmazonEKSVPCResourceController",
+ ]) : toset([])
- tags = merge(var.tags, var.cluster_iam_role_tags)
+ policy_arn = each.value
+ role = aws_iam_role.this[0].name
}
diff --git a/outputs.tf b/outputs.tf
index e3d5f528c1..5136c0ad30 100644
--- a/outputs.tf
+++ b/outputs.tf
@@ -66,17 +66,17 @@ output "oidc_provider_arn" {
output "cluster_iam_role_name" {
description = "IAM role name of the EKS cluster"
- value = try(aws_iam_role.cluster[0].name, "")
+ value = try(aws_iam_role.this[0].name, "")
}
output "cluster_iam_role_arn" {
description = "IAM role ARN of the EKS cluster"
- value = try(aws_iam_role.cluster[0].arn, "")
+ value = try(aws_iam_role.this[0].arn, "")
}
output "cluster_iam_role_unique_id" {
description = "Stable and unique string identifying the IAM role"
- value = try(aws_iam_role.cluster[0].unique_id, "")
+ value = try(aws_iam_role.this[0].unique_id, "")
}
################################################################################
diff --git a/variables.tf b/variables.tf
index b8c87929d1..3849aebeaf 100644
--- a/variables.tf
+++ b/variables.tf
@@ -20,8 +20,8 @@ variable "cluster_name" {
default = ""
}
-variable "cluster_iam_role_arn" {
- description = "Existing IAM role ARN for the cluster. Required if `create_cluster_iam_role` is set to `false`"
+variable "iam_role_arn" {
+ description = "Existing IAM role ARN for the cluster. Required if `create_iam_role` is set to `false`"
type = string
default = null
}
@@ -141,7 +141,7 @@ variable "cluster_security_group_name" {
}
variable "cluster_security_group_use_name_prefix" {
- description = "Determines whether cluster IAM role name (`cluster_iam_role_name`) is used as a prefix"
+ description = "Determines whether cluster IAM role name (`iam_role_name`) is used as a prefix"
type = string
default = true
}
@@ -196,37 +196,37 @@ variable "openid_connect_audiences" {
# Cluster IAM Role
################################################################################
-variable "create_cluster_iam_role" {
+variable "create_iam_role" {
description = "Determines whether a cluster IAM role is created or to use an existing IAM role"
type = bool
default = true
}
-variable "cluster_iam_role_name" {
+variable "iam_role_name" {
description = "Name to use on cluster role created"
type = string
default = null
}
-variable "cluster_iam_role_use_name_prefix" {
- description = "Determines whether cluster IAM role name (`cluster_iam_role_name`) is used as a prefix"
+variable "iam_role_use_name_prefix" {
+ description = "Determines whether cluster IAM role name (`iam_role_name`) is used as a prefix"
type = string
default = true
}
-variable "cluster_iam_role_path" {
+variable "iam_role_path" {
description = "Cluster IAM role path"
type = string
default = null
}
-variable "cluster_iam_role_permissions_boundary" {
+variable "iam_role_permissions_boundary" {
description = "ARN of the policy that is used to set the permissions boundary for the cluster role"
type = string
default = null
}
-variable "cluster_iam_role_tags" {
+variable "iam_role_tags" {
description = "A map of additional tags to add to the cluster IAM role created"
type = map(string)
default = {}
From 28c90b7f7bba05fcedb9211b11edac9a4ff6faa2 Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Sun, 14 Nov 2021 17:48:39 -0500
Subject: [PATCH 30/83] chore: update main cluster and node group security
groups
---
README.md | 33 ++++---
main.tf | 228 ++++++++++++++++++++++++++++++++-----------------
node_groups.tf | 4 +-
outputs.tf | 25 +++++-
variables.tf | 76 ++++++++++++-----
5 files changed, 251 insertions(+), 115 deletions(-)
diff --git a/README.md b/README.md
index 5c4875bf02..87c427e8ad 100644
--- a/README.md
+++ b/README.md
@@ -419,11 +419,10 @@ Full contributing [guidelines are covered here](https://github.com/terraform-aws
| [aws_iam_openid_connect_provider.oidc_provider](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_openid_connect_provider) | resource |
| [aws_iam_role.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
| [aws_iam_role_policy_attachment.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
-| [aws_security_group.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
-| [aws_security_group_rule.cluster_egress_internet](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.cluster_ingress_https_nodes](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.cluster_private_access_cidrs_source](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.cluster_private_access_sg_source](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
+| [aws_security_group.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
+| [aws_security_group.node](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
+| [aws_security_group_rule.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
+| [aws_security_group_rule.node](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
| [aws_iam_policy_document.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
| [aws_iam_policy_document.assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
| [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source |
@@ -435,28 +434,27 @@ Full contributing [guidelines are covered here](https://github.com/terraform-aws
|------|-------------|------|---------|:--------:|
| [cloudwatch\_log\_group\_kms\_key\_id](#input\_cloudwatch\_log\_group\_kms\_key\_id) | If a KMS Key ARN is set, this key will be used to encrypt the corresponding log group. Please be sure that the KMS Key has an appropriate key policy (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/encrypt-log-data-kms.html) | `string` | `null` | no |
| [cloudwatch\_log\_group\_retention\_in\_days](#input\_cloudwatch\_log\_group\_retention\_in\_days) | Number of days to retain log events. Default retention - 90 days | `number` | `90` | no |
-| [cluster\_create\_endpoint\_private\_access\_sg\_rule](#input\_cluster\_create\_endpoint\_private\_access\_sg\_rule) | Whether to create security group rules for the access to the Amazon EKS private API server endpoint. If `true`, `cluster_endpoint_private_access_cidrs` and/or 'cluster\_endpoint\_private\_access\_sg' should be provided | `bool` | `false` | no |
-| [cluster\_egress\_cidrs](#input\_cluster\_egress\_cidrs) | List of CIDR blocks that are permitted for cluster egress traffic | `list(string)` |
[ "0.0.0.0/0" ]
| no |
+| [cluster\_additional\_security\_group\_rules](#input\_cluster\_additional\_security\_group\_rules) | List of additional security group rules to add to the cluster security group created | `map(any)` | `{}` | no |
| [cluster\_enabled\_log\_types](#input\_cluster\_enabled\_log\_types) | A list of the desired control plane logging to enable. For more information, see Amazon EKS Control Plane Logging documentation (https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html) | `list(string)` |
[ "audit", "api", "authenticator" ]
| no |
| [cluster\_encryption\_config](#input\_cluster\_encryption\_config) | Configuration block with encryption configuration for the cluster |
| `[]` | no |
| [cluster\_endpoint\_private\_access](#input\_cluster\_endpoint\_private\_access) | Indicates whether or not the Amazon EKS private API server endpoint is enabled | `bool` | `false` | no |
-| [cluster\_endpoint\_private\_access\_cidrs](#input\_cluster\_endpoint\_private\_access\_cidrs) | List of CIDR blocks which can access the Amazon EKS private API server endpoint. `cluster_endpoint_private_access` and `cluster_create_endpoint_private_access_sg_rule` must be set to `true` | `list(string)` | `[]` | no |
-| [cluster\_endpoint\_private\_access\_sg](#input\_cluster\_endpoint\_private\_access\_sg) | List of security group IDs which can access the Amazon EKS private API server endpoint. `cluster_endpoint_private_access` and `cluster_create_endpoint_private_access_sg_rule` must be set to `true` | `list(string)` | `[]` | no |
| [cluster\_endpoint\_public\_access](#input\_cluster\_endpoint\_public\_access) | Indicates whether or not the Amazon EKS public API server endpoint is enabled. When it's set to `false` ensure to have a proper private access with `cluster_endpoint_private_access = true` | `bool` | `true` | no |
| [cluster\_endpoint\_public\_access\_cidrs](#input\_cluster\_endpoint\_public\_access\_cidrs) | List of CIDR blocks which can access the Amazon EKS public API server endpoint | `list(string)` |
[ "0.0.0.0/0" ]
| no |
| [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster and default name (prefix) used throughout the resources created | `string` | `""` | no |
+| [cluster\_security\_group\_description](#input\_cluster\_security\_group\_description) | Description of the cluster security group created | `string` | `"EKS cluster security group"` | no |
| [cluster\_security\_group\_id](#input\_cluster\_security\_group\_id) | If provided, the EKS cluster will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the workers | `string` | `""` | no |
-| [cluster\_security\_group\_name](#input\_cluster\_security\_group\_name) | Name to use on cluster role created | `string` | `null` | no |
+| [cluster\_security\_group\_name](#input\_cluster\_security\_group\_name) | Name to use on cluster security group created | `string` | `null` | no |
| [cluster\_security\_group\_tags](#input\_cluster\_security\_group\_tags) | A map of additional tags to add to the cluster security group created | `map(string)` | `{}` | no |
-| [cluster\_security\_group\_use\_name\_prefix](#input\_cluster\_security\_group\_use\_name\_prefix) | Determines whether cluster IAM role name (`iam_role_name`) is used as a prefix | `string` | `true` | no |
+| [cluster\_security\_group\_use\_name\_prefix](#input\_cluster\_security\_group\_use\_name\_prefix) | Determines whether cluster security group name (`cluster_security_group_name`) is used as a prefix | `string` | `true` | no |
| [cluster\_service\_ipv4\_cidr](#input\_cluster\_service\_ipv4\_cidr) | service ipv4 cidr for the kubernetes cluster | `string` | `null` | no |
| [cluster\_tags](#input\_cluster\_tags) | A map of additional tags to add to the cluster | `map(string)` | `{}` | no |
| [cluster\_timeouts](#input\_cluster\_timeouts) | Create, update, and delete timeout configurations for the cluster | `map(string)` | `{}` | no |
| [cluster\_version](#input\_cluster\_version) | Kubernetes minor version to use for the EKS cluster (for example 1.21) | `string` | `null` | no |
| [create](#input\_create) | Controls if EKS resources should be created (it affects almost all resources) | `bool` | `true` | no |
| [create\_cloudwatch\_log\_group](#input\_create\_cloudwatch\_log\_group) | Determines whether a log group is created by this module for the cluster logs. If not, AWS will automatically create one if logging is enabled | `bool` | `true` | no |
-| [create\_cluster\_security\_group](#input\_create\_cluster\_security\_group) | Whether to create a security group for the cluster or attach the cluster to `cluster_security_group_id` | `bool` | `true` | no |
+| [create\_cluster\_security\_group](#input\_create\_cluster\_security\_group) | Whether to create a security group for the cluster or use the existing `cluster_security_group_id` | `bool` | `true` | no |
| [create\_iam\_role](#input\_create\_iam\_role) | Determines whether a cluster IAM role is created or to use an existing IAM role | `bool` | `true` | no |
+| [create\_node\_security\_group](#input\_create\_node\_security\_group) | Whether to create a security group for the node groups or use the existing `node_security_group_id` | `bool` | `true` | no |
| [eks\_managed\_node\_group\_defaults](#input\_eks\_managed\_node\_group\_defaults) | Map of EKS managed node group default configurations | `any` | `{}` | no |
| [eks\_managed\_node\_groups](#input\_eks\_managed\_node\_groups) | Map of EKS managed node group definitions to create | `any` | `{}` | no |
| [enable\_irsa](#input\_enable\_irsa) | Whether to create OpenID Connect Provider for EKS to enable IRSA | `bool` | `false` | no |
@@ -468,6 +466,12 @@ Full contributing [guidelines are covered here](https://github.com/terraform-aws
| [iam\_role\_permissions\_boundary](#input\_iam\_role\_permissions\_boundary) | ARN of the policy that is used to set the permissions boundary for the cluster role | `string` | `null` | no |
| [iam\_role\_tags](#input\_iam\_role\_tags) | A map of additional tags to add to the cluster IAM role created | `map(string)` | `{}` | no |
| [iam\_role\_use\_name\_prefix](#input\_iam\_role\_use\_name\_prefix) | Determines whether cluster IAM role name (`iam_role_name`) is used as a prefix | `string` | `true` | no |
+| [node\_additional\_security\_group\_rules](#input\_node\_additional\_security\_group\_rules) | List of additional security group rules to add to the node security group created | `map(any)` | `{}` | no |
+| [node\_security\_group\_description](#input\_node\_security\_group\_description) | Description of the node security group created | `string` | `"EKS node shared security group"` | no |
+| [node\_security\_group\_id](#input\_node\_security\_group\_id) | ID of an existing security group to attach to the node groups created | `string` | `""` | no |
+| [node\_security\_group\_name](#input\_node\_security\_group\_name) | Name to use on node security group created | `string` | `null` | no |
+| [node\_security\_group\_tags](#input\_node\_security\_group\_tags) | A map of additional tags to add to the node security group created | `map(string)` | `{}` | no |
+| [node\_security\_group\_use\_name\_prefix](#input\_node\_security\_group\_use\_name\_prefix) | Determines whether node security group name (`node_security_group_name`) is used as a prefix | `string` | `true` | no |
| [openid\_connect\_audiences](#input\_openid\_connect\_audiences) | List of OpenID Connect audience client IDs to add to the IRSA provider | `list(string)` | `[]` | no |
| [self\_managed\_node\_group\_defaults](#input\_self\_managed\_node\_group\_defaults) | Map of self-managed node group default configurations | `any` | `{}` | no |
| [self\_managed\_node\_groups](#input\_self\_managed\_node\_groups) | Map of self-managed node group definitions to create | `any` | `{}` | no |
@@ -490,11 +494,14 @@ Full contributing [guidelines are covered here](https://github.com/terraform-aws
| [cluster\_id](#output\_cluster\_id) | The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready |
| [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider |
| [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster |
+| [cluster\_primary\_security\_group\_id](#output\_cluster\_primary\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console |
| [cluster\_security\_group\_arn](#output\_cluster\_security\_group\_arn) | Amazon Resource Name (ARN) of the cluster security group |
-| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console |
+| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | ID of the cluster security group |
| [cluster\_status](#output\_cluster\_status) | Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED` |
| [eks\_managed\_node\_groups](#output\_eks\_managed\_node\_groups) | Map of attribute maps for all EKS managed node groups created |
| [fargate\_profiles](#output\_fargate\_profiles) | Map of attribute maps for all EKS Fargate Profiles created |
+| [node\_security\_group\_arn](#output\_node\_security\_group\_arn) | Amazon Resource Name (ARN) of the node shared security group |
+| [node\_security\_group\_id](#output\_node\_security\_group\_id) | ID of the node shared security group |
| [oidc\_provider\_arn](#output\_oidc\_provider\_arn) | The ARN of the OIDC Provider if `enable_irsa = true` |
| [self\_managed\_node\_groups](#output\_self\_managed\_node\_groups) | Map of attribute maps for all self managed node groups created |
diff --git a/main.tf b/main.tf
index aa281a359a..55c6b7c1ba 100644
--- a/main.tf
+++ b/main.tf
@@ -13,7 +13,7 @@ resource "aws_eks_cluster" "this" {
enabled_cluster_log_types = var.cluster_enabled_log_types
vpc_config {
- security_group_ids = var.create_cluster_security_group ? [aws_security_group.this[0].id] : [var.cluster_security_group_id]
+ security_group_ids = [local.cluster_security_group_id]
subnet_ids = var.subnet_ids
endpoint_private_access = var.cluster_endpoint_private_access
endpoint_public_access = var.cluster_endpoint_public_access
@@ -48,8 +48,8 @@ resource "aws_eks_cluster" "this" {
depends_on = [
aws_iam_role_policy_attachment.this,
- aws_security_group_rule.cluster_egress_internet,
- # aws_security_group_rule.cluster_https_worker_ingress,
+ aws_security_group_rule.cluster,
+ aws_security_group_rule.node,
aws_cloudwatch_log_group.this
]
}
@@ -65,98 +65,173 @@ resource "aws_cloudwatch_log_group" "this" {
}
################################################################################
-# Security Group
+# Cluster Security Group
# Defaults follow https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html
################################################################################
locals {
- cluster_sg_name = coalesce(var.cluster_security_group_name, "${var.cluster_name}-cluster")
- create_cluster_sg = var.create && var.create_cluster_security_group
- enable_cluster_private_endpoint_sg_access = local.create_cluster_sg && var.cluster_create_endpoint_private_access_sg_rule && var.cluster_endpoint_private_access
+ cluster_sg_name = coalesce(var.cluster_security_group_name, "${var.cluster_name}-cluster")
+ create_cluster_sg = var.create && var.create_cluster_security_group
+
+ cluster_security_group_id = local.create_cluster_sg ? aws_security_group.cluster[0].id : var.cluster_security_group_id
+
+ cluster_security_group_rules = {
+ ingress_nodes_443 = {
+ description = "Node groups to cluster API"
+ protocol = "tcp"
+ from_port = 443
+ to_port = 443
+ type = "ingress"
+ source_node_security_group = true
+ }
+ egress_nodes_443 = {
+ description = "Cluster API to node groups"
+ protocol = "tcp"
+ from_port = 443
+ to_port = 443
+ type = "egress"
+ source_node_security_group = true
+ }
+ egress_nodes_kubelet = {
+ description = "Cluster API to node kubelets"
+ protocol = "tcp"
+ from_port = 10250
+ to_port = 10250
+ type = "egress"
+ source_node_security_group = true
+ }
+ }
}
-resource "aws_security_group" "this" {
+resource "aws_security_group" "cluster" {
count = local.create_cluster_sg ? 1 : 0
name = var.cluster_security_group_use_name_prefix ? null : local.cluster_sg_name
name_prefix = var.cluster_security_group_use_name_prefix ? "${local.cluster_sg_name}-" : null
- description = "EKS cluster security group"
+ description = var.cluster_security_group_description
vpc_id = var.vpc_id
tags = merge(
var.tags,
- {
- "Name" = local.cluster_sg_name
- },
+ { "Name" = local.cluster_sg_name },
var.cluster_security_group_tags
)
}
-resource "aws_security_group_rule" "cluster_egress_internet" {
- count = local.create_cluster_sg ? 1 : 0
-
- description = "Allow cluster egress access to the Internet"
- protocol = "-1"
- security_group_id = aws_security_group.this[0].id
- cidr_blocks = var.cluster_egress_cidrs
- from_port = 0
- to_port = 0
- type = "egress"
-}
-
-resource "aws_security_group_rule" "cluster_ingress_https_nodes" {
- for_each = local.create_cluster_sg ? merge(
- { for k, v in module.self_managed_node_group : k => v.security_group_id },
- { for k, v in module.eks_managed_node_group : k => v.security_group_id }
- ) : {}
-
- description = "Allow pods to communicate with the EKS cluster API"
- protocol = "tcp"
- security_group_id = aws_security_group.this[0].id
- source_security_group_id = each.value
- from_port = 443
- to_port = 443
- type = "ingress"
+resource "aws_security_group_rule" "cluster" {
+ for_each = local.create_cluster_sg ? merge(local.cluster_security_group_rules, var.cluster_additional_security_group_rules) : {}
+
+ # Required
+ security_group_id = aws_security_group.cluster[0].id
+ protocol = each.value.protocol
+ from_port = each.value.from_port
+ to_port = each.value.to_port
+ type = each.value.type
+
+ # Optional
+ description = try(each.value.description, null)
+ cidr_blocks = try(each.value.cidr_blocks, null)
+ ipv6_cidr_blocks = try(each.value.ipv6_cidr_blocks, null)
+ prefix_list_ids = try(each.value.prefix_list_ids, [])
+ self = try(each.value.self, null)
+ source_security_group_id = try(
+ each.value.source_security_group_id,
+ try(each.value.source_node_security_group, false) ? local.node_security_group_id : null
+ )
}
-resource "aws_security_group_rule" "cluster_private_access_cidrs_source" {
- count = local.enable_cluster_private_endpoint_sg_access && length(var.cluster_endpoint_private_access_cidrs) > 0 ? 1 : 0
- description = "Allow private K8S API ingress from custom CIDR source"
- type = "ingress"
- from_port = 443
- to_port = 443
- protocol = "tcp"
- cidr_blocks = var.cluster_endpoint_private_access_cidrs
+################################################################################
+# Node Security Group
+# Defaults follow https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html
+################################################################################
- security_group_id = aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id
+locals {
+ node_sg_name = coalesce(var.node_security_group_name, "${var.cluster_name}-node")
+ create_node_sg = var.create && var.create_node_security_group
+
+ node_security_group_id = local.create_node_sg ? aws_security_group.node[0].id : var.node_security_group_id
+
+ node_security_group_rules = {
+ egress_cluster_443 = {
+ description = "Node groups to cluster API"
+ protocol = "tcp"
+ from_port = 443
+ to_port = 443
+ type = "egress"
+ source_cluster_security_group = true
+ }
+ ingress_cluster_443 = {
+ description = "Cluster API to node groups"
+ protocol = "tcp"
+ from_port = 443
+ to_port = 443
+ type = "ingress"
+ source_cluster_security_group = true
+ }
+ ingress_cluster_kubelet = {
+ description = "Cluster API to node kubelets"
+ protocol = "tcp"
+ from_port = 10250
+ to_port = 10250
+ type = "ingress"
+ source_cluster_security_group = true
+ }
+ ingress_self_coredns_tcp = {
+ description = "CoreDNS"
+ protocol = "tcp"
+ from_port = 53
+ to_port = 53
+ type = "ingress"
+ self = true
+ }
+ ingress_self_coredns_udp = {
+ description = "CoreDNS"
+ protocol = "udp"
+ from_port = 53
+ to_port = 53
+ type = "ingress"
+ self = true
+ }
+ }
}
-resource "aws_security_group_rule" "cluster_private_access_sg_source" {
- for_each = local.enable_cluster_private_endpoint_sg_access ? toset(var.cluster_endpoint_private_access_sg) : toset([])
+resource "aws_security_group" "node" {
+ count = local.create_node_sg ? 1 : 0
- description = "Allow private K8S API ingress from custom Security Groups source"
- type = "ingress"
- from_port = 443
- to_port = 443
- protocol = "tcp"
- source_security_group_id = each.value
+ name = var.node_security_group_use_name_prefix ? null : local.node_sg_name
+ name_prefix = var.node_security_group_use_name_prefix ? "${local.node_sg_name}-" : null
+ description = var.node_security_group_description
+ vpc_id = var.vpc_id
- security_group_id = aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id
+ tags = merge(
+ var.tags,
+ { "Name" = local.node_sg_name },
+ var.node_security_group_tags
+ )
}
-# TODO
-# resource "aws_security_group_rule" "cluster_primary_ingress_worker" {
-# count = local.create_security_group && var.worker_create_cluster_primary_security_group_rules ? 1 : 0
-
-# description = "Allow pods running on worker to send communication to cluster primary security group (e.g. Fargate pods)."
-# protocol = "all"
-# security_group_id = aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id
-# source_security_group_id = local.worker_security_group_id
-# from_port = 0
-# to_port = 65535
-# type = "ingress"
-# }
+resource "aws_security_group_rule" "node" {
+ for_each = local.create_node_sg ? merge(local.node_security_group_rules, var.node_additional_security_group_rules) : {}
+
+ # Required
+ security_group_id = aws_security_group.node[0].id
+ protocol = each.value.protocol
+ from_port = each.value.from_port
+ to_port = each.value.to_port
+ type = each.value.type
+
+ # Optional
+ description = try(each.value.description, null)
+ cidr_blocks = try(each.value.cidr_blocks, null)
+ ipv6_cidr_blocks = try(each.value.ipv6_cidr_blocks, null)
+ prefix_list_ids = try(each.value.prefix_list_ids, [])
+ self = try(each.value.self, null)
+ source_security_group_id = try(
+ each.value.source_security_group_id,
+ try(each.value.source_cluster_security_group, false) ? local.cluster_security_group_id : null
+ )
+}
################################################################################
# IRSA
@@ -176,9 +251,7 @@ resource "aws_iam_openid_connect_provider" "oidc_provider" {
url = aws_eks_cluster.this[0].identity[0].oidc[0].issuer
tags = merge(
- {
- Name = "${var.cluster_name}-eks-irsa"
- },
+ { Name = "${var.cluster_name}-eks-irsa" },
var.tags
)
}
@@ -210,7 +283,7 @@ resource "aws_iam_role" "this" {
count = var.create && var.create_iam_role ? 1 : 0
name = var.iam_role_use_name_prefix ? null : local.iam_role_name
- name_prefix = var.iam_role_use_name_prefix ? try("${local.iam_role_name}-", local.iam_role_name) : null
+ name_prefix = var.iam_role_use_name_prefix ? "${local.iam_role_name}-" : null
path = var.iam_role_path
assume_role_policy = data.aws_iam_policy_document.assume_role_policy[0].json
@@ -218,7 +291,7 @@ resource "aws_iam_role" "this" {
force_detach_policies = true
inline_policy {
- name = "additional-alb"
+ name = local.iam_role_name
policy = data.aws_iam_policy_document.additional[0].json
}
@@ -228,20 +301,23 @@ resource "aws_iam_role" "this" {
data "aws_iam_policy_document" "additional" {
count = var.create && var.create_iam_role ? 1 : 0
- # Permissions required to create AWSServiceRoleForElasticLoadBalancing service-linked role by EKS during ELB provisioning
+ # Permissions required to create AWSServiceRoleForElasticLoadBalancing
+ # service-linked role by EKS during ELB provisioning
statement {
sid = "ELBServiceLinkedRole"
effect = "Allow"
actions = [
"ec2:DescribeAccountAttributes",
+ "ec2:DescribeAddresses",
"ec2:DescribeInternetGateways",
- "ec2:DescribeAddresses"
+ "elasticloadbalancing:SetIpAddressType",
+ "elasticloadbalancing:SetSubnets"
]
resources = ["*"]
}
- # Deny permissions to logs:CreateLogGroup it is not needed since we create the log group ourselve in this module,
- # and it is causing trouble during cleanup/deletion
+ # Deny permissions to logs:CreateLogGroup since its created through Terraform
+ # in this module, and it causes issues during cleanup/deletion
statement {
effect = "Deny"
actions = ["logs:CreateLogGroup"]
diff --git a/node_groups.tf b/node_groups.tf
index a2825ad5e1..a340faa215 100644
--- a/node_groups.tf
+++ b/node_groups.tf
@@ -38,7 +38,7 @@ module "eks_managed_node_group" {
cluster_name = aws_eks_cluster.this[0].name
cluster_version = try(each.value.cluster_version, var.eks_managed_node_group_defaults.cluster_version, var.cluster_version)
- cluster_security_group_id = var.create_cluster_security_group ? aws_security_group.this[0].id : var.cluster_security_group_id
+ cluster_security_group_id = local.cluster_security_group_id
# EKS Managed Node Group
name = try(each.value.name, each.key)
@@ -195,7 +195,7 @@ module "self_managed_node_group" {
user_data = try(each.value.user_data, var.self_managed_node_group_defaults.user_data, null)
vpc_security_group_ids = try(each.value.vpc_security_group_ids, var.self_managed_node_group_defaults.vpc_security_group_ids, [])
- cluster_security_group_id = var.create_cluster_security_group ? aws_security_group.this[0].id : var.cluster_security_group_id
+ cluster_security_group_id = local.cluster_security_group_id
default_version = try(each.value.default_version, var.self_managed_node_group_defaults.default_version, null)
update_default_version = try(each.value.update_default_version, var.self_managed_node_group_defaults.update_default_version, null)
disable_api_termination = try(each.value.disable_api_termination, var.self_managed_node_group_defaults.disable_api_termination, null)
diff --git a/outputs.tf b/outputs.tf
index 5136c0ad30..5c684babdc 100644
--- a/outputs.tf
+++ b/outputs.tf
@@ -37,18 +37,37 @@ output "cluster_status" {
value = try(aws_eks_cluster.this[0].status, "")
}
-output "cluster_security_group_id" {
+output "cluster_primary_security_group_id" {
description = "Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console"
value = try(aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id, "")
}
################################################################################
-# Security Group
+# Cluster Security Group
################################################################################
output "cluster_security_group_arn" {
description = "Amazon Resource Name (ARN) of the cluster security group"
- value = try(aws_security_group.this[0].arn, "")
+ value = try(aws_security_group.cluster[0].arn, "")
+}
+
+output "cluster_security_group_id" {
+ description = "ID of the cluster security group"
+ value = try(aws_security_group.cluster[0].id, "")
+}
+
+################################################################################
+# Node Security Group
+################################################################################
+
+output "node_security_group_arn" {
+ description = "Amazon Resource Name (ARN) of the node shared security group"
+ value = try(aws_security_group.node[0].arn, "")
+}
+
+output "node_security_group_id" {
+ description = "ID of the node shared security group"
+ value = try(aws_security_group.node[0].id, "")
}
################################################################################
diff --git a/variables.tf b/variables.tf
index 3849aebeaf..f8592938b4 100644
--- a/variables.tf
+++ b/variables.tf
@@ -119,11 +119,11 @@ variable "cloudwatch_log_group_kms_key_id" {
}
################################################################################
-# Security Group
+# Cluster Security Group
################################################################################
variable "create_cluster_security_group" {
- description = "Whether to create a security group for the cluster or attach the cluster to `cluster_security_group_id`"
+ description = "Whether to create a security group for the cluster or use the existing `cluster_security_group_id`"
type = bool
default = true
}
@@ -135,43 +135,77 @@ variable "vpc_id" {
}
variable "cluster_security_group_name" {
- description = "Name to use on cluster role created"
+ description = "Name to use on cluster security group created"
type = string
default = null
}
variable "cluster_security_group_use_name_prefix" {
- description = "Determines whether cluster IAM role name (`iam_role_name`) is used as a prefix"
+ description = "Determines whether cluster security group name (`cluster_security_group_name`) is used as a prefix"
type = string
default = true
}
-variable "cluster_egress_cidrs" {
- description = "List of CIDR blocks that are permitted for cluster egress traffic"
- type = list(string)
- default = ["0.0.0.0/0"]
+variable "cluster_security_group_description" {
+ description = "Description of the cluster security group created"
+ type = string
+ default = "EKS cluster security group"
+}
+
+variable "cluster_additional_security_group_rules" {
+ description = "List of additional security group rules to add to the cluster security group created"
+ type = map(any)
+ default = {}
+}
+
+variable "cluster_security_group_tags" {
+ description = "A map of additional tags to add to the cluster security group created"
+ type = map(string)
+ default = {}
}
-variable "cluster_create_endpoint_private_access_sg_rule" {
- description = "Whether to create security group rules for the access to the Amazon EKS private API server endpoint. If `true`, `cluster_endpoint_private_access_cidrs` and/or 'cluster_endpoint_private_access_sg' should be provided"
+################################################################################
+# Node Security Group
+################################################################################
+
+variable "create_node_security_group" {
+ description = "Whether to create a security group for the node groups or use the existing `node_security_group_id`"
type = bool
- default = false
+ default = true
}
-variable "cluster_endpoint_private_access_cidrs" {
- description = "List of CIDR blocks which can access the Amazon EKS private API server endpoint. `cluster_endpoint_private_access` and `cluster_create_endpoint_private_access_sg_rule` must be set to `true`"
- type = list(string)
- default = []
+variable "node_security_group_id" {
+ description = "ID of an existing security group to attach to the node groups created"
+ type = string
+ default = ""
}
-variable "cluster_endpoint_private_access_sg" {
- description = "List of security group IDs which can access the Amazon EKS private API server endpoint. `cluster_endpoint_private_access` and `cluster_create_endpoint_private_access_sg_rule` must be set to `true`"
- type = list(string)
- default = []
+variable "node_security_group_name" {
+ description = "Name to use on node security group created"
+ type = string
+ default = null
}
-variable "cluster_security_group_tags" {
- description = "A map of additional tags to add to the cluster security group created"
+variable "node_security_group_use_name_prefix" {
+ description = "Determines whether node security group name (`node_security_group_name`) is used as a prefix"
+ type = string
+ default = true
+}
+
+variable "node_security_group_description" {
+ description = "Description of the node security group created"
+ type = string
+ default = "EKS node shared security group"
+}
+
+variable "node_additional_security_group_rules" {
+ description = "List of additional security group rules to add to the node security group created"
+ type = map(any)
+ default = {}
+}
+
+variable "node_security_group_tags" {
+ description = "A map of additional tags to add to the node security group created"
type = map(string)
default = {}
}
From c277f8983001ebad3f244d6979b74387a7813b6b Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Mon, 15 Nov 2021 14:44:23 -0500
Subject: [PATCH 31/83] chore: troubleshooting minimum network connectivity
requirements
---
README.md | 1 -
examples/complete/README.md | 8 +-
examples/complete/main.tf | 136 +++++++------
main.tf | 81 +++++---
modules/eks-managed-node-group/README.md | 10 +-
modules/eks-managed-node-group/main.tf | 201 ++++++++++---------
modules/eks-managed-node-group/variables.tf | 33 ++-
modules/self-managed-node-group/README.md | 10 +-
modules/self-managed-node-group/main.tf | 194 ++++++++++--------
modules/self-managed-node-group/variables.tf | 33 ++-
node_groups.tf | 78 ++++---
variables.tf | 1 -
12 files changed, 439 insertions(+), 347 deletions(-)
diff --git a/README.md b/README.md
index 87c427e8ad..6422f10bbe 100644
--- a/README.md
+++ b/README.md
@@ -423,7 +423,6 @@ Full contributing [guidelines are covered here](https://github.com/terraform-aws
| [aws_security_group.node](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
| [aws_security_group_rule.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
| [aws_security_group_rule.node](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_iam_policy_document.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
| [aws_iam_policy_document.assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
| [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source |
| [tls_certificate.this](https://registry.terraform.io/providers/hashicorp/tls/latest/docs/data-sources/certificate) | data source |
diff --git a/examples/complete/README.md b/examples/complete/README.md
index c3bbde1c20..79ab97572e 100644
--- a/examples/complete/README.md
+++ b/examples/complete/README.md
@@ -28,9 +28,7 @@ Note that this example may create resources which cost money. Run `terraform des
## Providers
-| Name | Version |
-|------|---------|
-| [aws](#provider\_aws) | >= 3.64 |
+No providers.
## Modules
@@ -45,9 +43,7 @@ Note that this example may create resources which cost money. Run `terraform des
## Resources
-| Name | Type |
-|------|------|
-| [aws_security_group.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
+No resources.
## Inputs
diff --git a/examples/complete/main.tf b/examples/complete/main.tf
index 1581583262..f243061967 100644
--- a/examples/complete/main.tf
+++ b/examples/complete/main.tf
@@ -32,7 +32,7 @@ module "eks" {
# Self Managed Node Group(s)
self_managed_node_group_defaults = {
- vpc_security_group_ids = [aws_security_group.additional.id]
+ # vpc_security_group_ids = [aws_security_group.additional.id]
}
self_managed_node_groups = {
@@ -51,64 +51,64 @@ module "eks" {
eks_managed_node_group_defaults = {
ami_type = "AL2_x86_64"
# disk_size = 50
- vpc_security_group_ids = [aws_security_group.additional.id]
+ # vpc_security_group_ids = [aws_security_group.additional.id]
create_launch_template = true
}
eks_managed_node_groups = {
- # eks_mng = {
- # min_size = 1
- # max_size = 10
- # desired_size = 1
-
- # instance_types = ["t3.large"]
- # capacity_type = "SPOT"
- # labels = {
- # Environment = "test"
- # GithubRepo = "terraform-aws-eks"
- # GithubOrg = "terraform-aws-modules"
- # }
- # taints = {
- # dedicated = {
- # key = "dedicated"
- # value = "gpuGroup"
- # effect = "NO_SCHEDULE"
- # }
- # }
- # update_config = {
- # max_unavailable_percentage = 50 # or set `max_unavailable`
- # }
- # tags = {
- # ExtraTag = "example"
- # }
- # }
+ blue = {
+ min_size = 1
+ max_size = 10
+ desired_size = 1
+
+ instance_types = ["t3.large"]
+ capacity_type = "SPOT"
+ labels = {
+ Environment = "test"
+ GithubRepo = "terraform-aws-eks"
+ GithubOrg = "terraform-aws-modules"
+ }
+ # taints = {
+ # dedicated = {
+ # key = "dedicated"
+ # value = "gpuGroup"
+ # effect = "NO_SCHEDULE"
+ # }
+ # }
+ update_config = {
+ max_unavailable_percentage = 50 # or set `max_unavailable`
+ }
+ tags = {
+ ExtraTag = "example"
+ }
+ }
}
# Fargate Profile(s)
fargate_profiles = {
- # default = {
- # fargate_profile_name = "default"
- # selectors = [
- # {
- # namespace = "kube-system"
- # labels = {
- # k8s-app = "kube-dns"
- # }
- # },
- # {
- # namespace = "default"
- # }
- # ]
-
- # tags = {
- # Owner = "test"
- # }
-
- # timeouts = {
- # create = "20m"
- # delete = "20m"
- # }
- # }
+ default = {
+ fargate_profile_name = "default"
+ selectors = [
+ {
+ namespace = "kube-system"
+ labels = {
+ k8s-app = "kube-dns"
+ }
+ },
+ {
+ namespace = "default"
+ }
+ ]
+
+ tags = {
+ Owner = "test"
+ }
+
+ timeouts = {
+ create = "20m"
+ delete = "20m"
+ }
+ }
}
tags = local.tags
@@ -161,6 +161,10 @@ module "vpc" {
single_nat_gateway = true
enable_dns_hostnames = true
+ enable_flow_log = true
+ create_flow_log_cloudwatch_iam_role = true
+ create_flow_log_cloudwatch_log_group = true
+
public_subnet_tags = {
"kubernetes.io/cluster/${local.name}" = "shared"
"kubernetes.io/role/elb" = "1"
@@ -174,18 +178,18 @@ module "vpc" {
tags = local.tags
}
-resource "aws_security_group" "additional" {
- name_prefix = "${local.name}-additional"
- vpc_id = module.vpc.vpc_id
-
- ingress {
- from_port = 22
- to_port = 22
- protocol = "tcp"
- cidr_blocks = [
- "10.0.0.0/8",
- "172.16.0.0/12",
- "192.168.0.0/16",
- ]
- }
-}
+# resource "aws_security_group" "additional" {
+# name_prefix = "${local.name}-additional"
+# vpc_id = module.vpc.vpc_id
+
+# ingress {
+# from_port = 22
+# to_port = 22
+# protocol = "tcp"
+# cidr_blocks = [
+# "10.0.0.0/8",
+# "172.16.0.0/12",
+# "192.168.0.0/16",
+# ]
+# }
+# }
diff --git a/main.tf b/main.tf
index 55c6b7c1ba..f90c4cdf35 100644
--- a/main.tf
+++ b/main.tf
@@ -140,7 +140,6 @@ resource "aws_security_group_rule" "cluster" {
)
}
-
################################################################################
# Node Security Group
# Defaults follow https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html
@@ -178,21 +177,37 @@ locals {
source_cluster_security_group = true
}
ingress_self_coredns_tcp = {
- description = "CoreDNS"
+ description = "Node to node CoreDNS"
protocol = "tcp"
from_port = 53
to_port = 53
type = "ingress"
self = true
}
+ egress_self_coredns_tcp = {
+ description = "Node to node CoreDNS"
+ protocol = "tcp"
+ from_port = 53
+ to_port = 53
+ type = "egress"
+ self = true
+ }
ingress_self_coredns_udp = {
- description = "CoreDNS"
+ description = "Node to node CoreDNS"
protocol = "udp"
from_port = 53
to_port = 53
type = "ingress"
self = true
}
+ egress_self_coredns_udp = {
+ description = "Node to node CoreDNS"
+ protocol = "udp"
+ from_port = 53
+ to_port = 53
+ type = "egress"
+ self = true
+ }
}
}
@@ -290,40 +305,40 @@ resource "aws_iam_role" "this" {
permissions_boundary = var.iam_role_permissions_boundary
force_detach_policies = true
- inline_policy {
- name = local.iam_role_name
- policy = data.aws_iam_policy_document.additional[0].json
- }
+ # inline_policy {
+ # name = local.iam_role_name
+ # policy = data.aws_iam_policy_document.additional[0].json
+ # }
tags = merge(var.tags, var.iam_role_tags)
}
-data "aws_iam_policy_document" "additional" {
- count = var.create && var.create_iam_role ? 1 : 0
-
- # Permissions required to create AWSServiceRoleForElasticLoadBalancing
- # service-linked role by EKS during ELB provisioning
- statement {
- sid = "ELBServiceLinkedRole"
- effect = "Allow"
- actions = [
- "ec2:DescribeAccountAttributes",
- "ec2:DescribeAddresses",
- "ec2:DescribeInternetGateways",
- "elasticloadbalancing:SetIpAddressType",
- "elasticloadbalancing:SetSubnets"
- ]
- resources = ["*"]
- }
-
- # Deny permissions to logs:CreateLogGroup since its created through Terraform
- # in this module, and it causes issues during cleanup/deletion
- statement {
- effect = "Deny"
- actions = ["logs:CreateLogGroup"]
- resources = ["*"]
- }
-}
+# data "aws_iam_policy_document" "additional" {
+# count = var.create && var.create_iam_role ? 1 : 0
+
+# # Permissions required to create AWSServiceRoleForElasticLoadBalancing
+# # service-linked role by EKS during ELB provisioning
+# statement {
+# sid = "ELBServiceLinkedRole"
+# effect = "Allow"
+# actions = [
+# "ec2:DescribeAccountAttributes",
+# "ec2:DescribeAddresses",
+# "ec2:DescribeInternetGateways",
+# "elasticloadbalancing:SetIpAddressType",
+# "elasticloadbalancing:SetSubnets"
+# ]
+# resources = ["*"]
+# }
+
+# # Deny permissions to logs:CreateLogGroup since its created through Terraform
+# # in this module, and it causes issues during cleanup/deletion
+# statement {
+# effect = "Deny"
+# actions = ["logs:CreateLogGroup"]
+# resources = ["*"]
+# }
+# }
# Policies attached ref https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_node_group
resource "aws_iam_role_policy_attachment" "this" {
diff --git a/modules/eks-managed-node-group/README.md b/modules/eks-managed-node-group/README.md
index c344e286df..f0add1dd18 100644
--- a/modules/eks-managed-node-group/README.md
+++ b/modules/eks-managed-node-group/README.md
@@ -75,13 +75,7 @@ No modules.
| [aws_iam_role_policy_attachment.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
| [aws_launch_template.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource |
| [aws_security_group.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
-| [aws_security_group_rule.all_egress](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.cluster_coredns_tcp_ingress](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.cluster_coredns_udp_ingress](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.cluster_ephemeral_ports_ingress](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.cluster_https_ingress](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.cluster_kubelet_ingress](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.self_ingress](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
+| [aws_security_group_rule.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
| [aws_iam_policy_document.assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
| [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source |
| [cloudinit_config.eks_optimized_ami_user_data](https://registry.terraform.io/providers/hashicorp/cloudinit/latest/docs/data-sources/config) | data source |
@@ -151,8 +145,8 @@ No modules.
| [ram\_disk\_id](#input\_ram\_disk\_id) | The ID of the ram disk | `string` | `null` | no |
| [remote\_access](#input\_remote\_access) | Configuration block with remote access settings | `map(string)` | `{}` | no |
| [security\_group\_description](#input\_security\_group\_description) | Description for the security group | `string` | `"EKS managed node group security group"` | no |
-| [security\_group\_egress\_cidr\_blocks](#input\_security\_group\_egress\_cidr\_blocks) | List of CIDR blocks that are permitted for security group egress traffic | `list(string)` |
[ "0.0.0.0/0" ]
| no |
| [security\_group\_name](#input\_security\_group\_name) | Name to use on security group created | `string` | `null` | no |
+| [security\_group\_rules](#input\_security\_group\_rules) | List of security group rules to add to the security group created | `map(any)` |
| no |
| [security\_group\_tags](#input\_security\_group\_tags) | A map of additional tags to add to the security group created | `map(string)` | `{}` | no |
| [security\_group\_use\_name\_prefix](#input\_security\_group\_use\_name\_prefix) | Determines whether the security group name (`security_group_name`) is used as a prefix | `string` | `true` | no |
| [subnet\_ids](#input\_subnet\_ids) | Identifiers of EC2 Subnets to associate with the EKS Node Group. These subnets must have the following resource tag: `kubernetes.io/cluster/CLUSTER_NAME` | `list(string)` | `null` | no |
diff --git a/modules/eks-managed-node-group/main.tf b/modules/eks-managed-node-group/main.tf
index b369aa4db7..ea1caeb74c 100644
--- a/modules/eks-managed-node-group/main.tf
+++ b/modules/eks-managed-node-group/main.tf
@@ -257,15 +257,9 @@ resource "aws_launch_template" "this" {
}
# Prevent premature access of security group roles and policies by pods that
- # require permissions on create/destroy that depend on workers.
+ # require permissions on create/destroy that depend on nodes
depends_on = [
- aws_security_group_rule.cluster_https_ingress,
- aws_security_group_rule.cluster_kubelet_ingress,
- aws_security_group_rule.cluster_coredns_tcp_ingress,
- aws_security_group_rule.cluster_coredns_udp_ingress,
- aws_security_group_rule.cluster_ephemeral_ports_ingress,
- aws_security_group_rule.self_ingress,
- aws_security_group_rule.all_egress,
+ aws_security_group_rule.this,
aws_iam_role_policy_attachment.this,
]
@@ -381,101 +375,120 @@ resource "aws_security_group" "this" {
tags = merge(
var.tags,
- {
- "Name" = local.security_group_name
- "kubernetes.io/cluster/${var.cluster_name}" = "owned"
- },
+ { "Name" = local.security_group_name },
var.security_group_tags
)
}
-# Ingress
-resource "aws_security_group_rule" "cluster_https_ingress" {
- count = local.create_security_group ? 1 : 0
-
- description = "Allow communication from cluster control plane on 443/HTTPS"
- protocol = "tcp"
- security_group_id = aws_security_group.this[0].id
- source_security_group_id = var.cluster_security_group_id
- from_port = 443
- to_port = 443
- type = "ingress"
-}
-
-resource "aws_security_group_rule" "cluster_kubelet_ingress" {
- count = local.create_security_group ? 1 : 0
-
- description = "Allow communication from the cluster control plane to kubelet"
- protocol = "tcp"
- security_group_id = aws_security_group.this[0].id
- source_security_group_id = var.cluster_security_group_id
- from_port = 10250
- to_port = 10250
- type = "ingress"
-}
-
-resource "aws_security_group_rule" "cluster_coredns_tcp_ingress" {
- count = local.create_security_group ? 1 : 0
-
- description = "Allow communication from cluster control plane on 53/TCP for CoreDNS"
- protocol = "tcp"
- security_group_id = aws_security_group.this[0].id
- source_security_group_id = var.cluster_security_group_id
- from_port = 53
- to_port = 53
- type = "ingress"
-}
-
-resource "aws_security_group_rule" "cluster_coredns_udp_ingress" {
- count = local.create_security_group ? 1 : 0
-
- description = "Allow communication from cluster control plane on 53/UDP for CoreDNS"
- protocol = "udp"
- security_group_id = aws_security_group.this[0].id
- source_security_group_id = var.cluster_security_group_id
- from_port = 53
- to_port = 53
- type = "ingress"
-}
+resource "aws_security_group_rule" "this" {
+ for_each = local.create_security_group ? var.security_group_rules : {}
-resource "aws_security_group_rule" "cluster_ephemeral_ports_ingress" {
- count = local.create_security_group ? 1 : 0
-
- description = "Allow communication from the cluster control plane on Linux ephemeral ports"
- protocol = "tcp"
- security_group_id = aws_security_group.this[0].id
- source_security_group_id = var.cluster_security_group_id
- from_port = 1025
- to_port = 65535
- type = "ingress"
-}
-
-# TODO - move to separate security group in root that all node groups will get assigned
-resource "aws_security_group_rule" "self_ingress" {
- count = local.create_security_group ? 1 : 0
-
- description = "Allow node to communicate with each other"
- protocol = "-1"
+ # Required
security_group_id = aws_security_group.this[0].id
- self = true
- from_port = 0
- to_port = 65535
- type = "ingress"
-}
-
-# Egress
-resource "aws_security_group_rule" "all_egress" {
- count = local.create_security_group ? 1 : 0
+ protocol = each.value.protocol
+ from_port = each.value.from_port
+ to_port = each.value.to_port
+ type = each.value.type
- description = "Allow egress to all ports/protocols"
- protocol = "-1"
- security_group_id = aws_security_group.this[0].id
- cidr_blocks = var.security_group_egress_cidr_blocks
- from_port = 0
- to_port = 65535
- type = "egress"
+ # Optional
+ description = try(each.value.description, null)
+ cidr_blocks = try(each.value.cidr_blocks, null)
+ ipv6_cidr_blocks = try(each.value.ipv6_cidr_blocks, null)
+ prefix_list_ids = try(each.value.prefix_list_ids, [])
+ self = try(each.value.self, null)
+ source_security_group_id = try(
+ each.value.source_security_group_id,
+ try(each.value.source_cluster_security_group, false) ? var.cluster_security_group_id : null
+ )
}
+# # Ingress
+# resource "aws_security_group_rule" "cluster_https_ingress" {
+# count = local.create_security_group ? 1 : 0
+
+# description = "Allow communication from cluster control plane on 443/HTTPS"
+# protocol = "tcp"
+# security_group_id = aws_security_group.this[0].id
+# source_security_group_id = var.cluster_security_group_id
+# from_port = 443
+# to_port = 443
+# type = "ingress"
+# }
+
+# resource "aws_security_group_rule" "cluster_kubelet_ingress" {
+# count = local.create_security_group ? 1 : 0
+
+# description = "Allow communication from the cluster control plane to kubelet"
+# protocol = "tcp"
+# security_group_id = aws_security_group.this[0].id
+# source_security_group_id = var.cluster_security_group_id
+# from_port = 10250
+# to_port = 10250
+# type = "ingress"
+# }
+
+# resource "aws_security_group_rule" "cluster_coredns_tcp_ingress" {
+# count = local.create_security_group ? 1 : 0
+
+# description = "Allow communication from cluster control plane on 53/TCP for CoreDNS"
+# protocol = "tcp"
+# security_group_id = aws_security_group.this[0].id
+# source_security_group_id = var.cluster_security_group_id
+# from_port = 53
+# to_port = 53
+# type = "ingress"
+# }
+
+# resource "aws_security_group_rule" "cluster_coredns_udp_ingress" {
+# count = local.create_security_group ? 1 : 0
+
+# description = "Allow communication from cluster control plane on 53/UDP for CoreDNS"
+# protocol = "udp"
+# security_group_id = aws_security_group.this[0].id
+# source_security_group_id = var.cluster_security_group_id
+# from_port = 53
+# to_port = 53
+# type = "ingress"
+# }
+
+# resource "aws_security_group_rule" "cluster_ephemeral_ports_ingress" {
+# count = local.create_security_group ? 1 : 0
+
+# description = "Allow communication from the cluster control plane on Linux ephemeral ports"
+# protocol = "tcp"
+# security_group_id = aws_security_group.this[0].id
+# source_security_group_id = var.cluster_security_group_id
+# from_port = 1025
+# to_port = 65535
+# type = "ingress"
+# }
+
+# # TODO - move to separate security group in root that all node groups will get assigned
+# resource "aws_security_group_rule" "self_ingress" {
+# count = local.create_security_group ? 1 : 0
+
+# description = "Allow node to communicate with each other"
+# protocol = "-1"
+# security_group_id = aws_security_group.this[0].id
+# self = true
+# from_port = 0
+# to_port = 65535
+# type = "ingress"
+# }
+
+# # Egress
+# resource "aws_security_group_rule" "all_egress" {
+# count = local.create_security_group ? 1 : 0
+
+# description = "Allow egress to all ports/protocols"
+# protocol = "-1"
+# security_group_id = aws_security_group.this[0].id
+# cidr_blocks = var.security_group_egress_cidr_blocks
+# from_port = 0
+# to_port = 65535
+# type = "egress"
+# }
+
################################################################################
# IAM Role
################################################################################
diff --git a/modules/eks-managed-node-group/variables.tf b/modules/eks-managed-node-group/variables.tf
index 8a8a63068d..5b68fd1842 100644
--- a/modules/eks-managed-node-group/variables.tf
+++ b/modules/eks-managed-node-group/variables.tf
@@ -398,10 +398,35 @@ variable "vpc_id" {
default = null
}
-variable "security_group_egress_cidr_blocks" {
- description = "List of CIDR blocks that are permitted for security group egress traffic"
- type = list(string)
- default = ["0.0.0.0/0"]
+variable "security_group_rules" {
+ description = "List of security group rules to add to the security group created"
+ type = map(any)
+ default = {
+ egress_https_default = {
+ description = "Egress all HTTPS to internet"
+ protocol = "tcp"
+ from_port = 443
+ to_port = 443
+ type = "egress"
+ cidr_blocks = ["0.0.0.0/0"]
+ }
+ egress_ntp_tcp_default = {
+ description = "Egress NTP/TCP to internet"
+ protocol = "tcp"
+ from_port = 123
+ to_port = 123
+ type = "egress"
+ cidr_blocks = ["0.0.0.0/0"]
+ }
+ egress_ntp_udp_default = {
+ description = "Egress NTP/UDP to internet"
+ protocol = "udp"
+ from_port = 123
+ to_port = 123
+ type = "egress"
+ cidr_blocks = ["0.0.0.0/0"]
+ }
+ }
}
variable "cluster_security_group_id" {
diff --git a/modules/self-managed-node-group/README.md b/modules/self-managed-node-group/README.md
index 5453b7dc99..0330fa9336 100644
--- a/modules/self-managed-node-group/README.md
+++ b/modules/self-managed-node-group/README.md
@@ -41,13 +41,7 @@ No modules.
| [aws_iam_role_policy_attachment.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
| [aws_launch_template.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource |
| [aws_security_group.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
-| [aws_security_group_rule.all_egress](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.cluster_coredns_tcp_ingress](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.cluster_coredns_udp_ingress](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.cluster_ephemeral_ports_ingress](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.cluster_https_ingress](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.cluster_kubelet_ingress](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.self_ingress](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
+| [aws_security_group_rule.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
| [aws_ami.eks_default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source |
| [aws_iam_policy_document.assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
| [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source |
@@ -122,8 +116,8 @@ No modules.
| [ram\_disk\_id](#input\_ram\_disk\_id) | The ID of the ram disk | `string` | `null` | no |
| [schedules](#input\_schedules) | Map of autoscaling group schedule to create | `map(any)` | `{}` | no |
| [security\_group\_description](#input\_security\_group\_description) | Description for the security group | `string` | `"EKS worker security group"` | no |
-| [security\_group\_egress\_cidr\_blocks](#input\_security\_group\_egress\_cidr\_blocks) | List of CIDR blocks that are permitted for security group egress traffic | `list(string)` |
[ "0.0.0.0/0" ]
| no |
| [security\_group\_name](#input\_security\_group\_name) | Name to use on security group created | `string` | `null` | no |
+| [security\_group\_rules](#input\_security\_group\_rules) | List of security group rules to add to the security group created | `map(any)` |
| no |
+| [security\_group\_rules](#input\_security\_group\_rules) | List of security group rules to add to the security group created | `any` | `{}` | no |
| [security\_group\_tags](#input\_security\_group\_tags) | A map of additional tags to add to the security group created | `map(string)` | `{}` | no |
| [security\_group\_use\_name\_prefix](#input\_security\_group\_use\_name\_prefix) | Determines whether the security group name (`security_group_name`) is used as a prefix | `string` | `true` | no |
| [subnet\_ids](#input\_subnet\_ids) | Identifiers of EC2 Subnets to associate with the EKS Node Group. These subnets must have the following resource tag: `kubernetes.io/cluster/CLUSTER_NAME` | `list(string)` | `null` | no |
diff --git a/modules/eks-managed-node-group/main.tf b/modules/eks-managed-node-group/main.tf
index 44d1b164eb..3810b02c19 100644
--- a/modules/eks-managed-node-group/main.tf
+++ b/modules/eks-managed-node-group/main.tf
@@ -4,6 +4,32 @@ data "aws_partition" "current" {}
# User Data
################################################################################
+locals {
+ platform = {
+ bottlerocket = {
+ user_data = var.custom_user_data != "" ? var.custom_user_data : base64encode(templatefile(
+ "${path.module}/../../templates/bottlerocket_user_data.tpl",
+ {
+ ami_id = var.ami_id
+ # Required to bootstrap node
+ cluster_name = var.cluster_name
+ # https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-custom-ami
+ cluster_endpoint = var.cluster_endpoint
+ cluster_auth_base64 = var.cluster_auth_base64
+ cluster_dns_ip = var.cluster_dns_ip
+ # Optional
+ bootstrap_extra_args = var.bootstrap_extra_args
+ }
+ ))
+ }
+ linux = {
+ user_data = var.custom_user_data != "" ? var.custom_user_data : try(data.cloudinit_config.eks_optimized_ami_user_data[0].rendered, "")
+ }
+ # Not supported on EKS managed node groups
+ # windows = {}
+ }
+}
+
# https://github.com/aws/containers-roadmap/issues/596#issuecomment-675097667
# An important note is that user data must in MIME multi-part archive format,
# as by default, EKS will merge the bootstrapping command required for nodes to join the
@@ -12,11 +38,13 @@ data "aws_partition" "current" {}
# See docs for more details -> https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-user-data
data "cloudinit_config" "eks_optimized_ami_user_data" {
- count = var.create && (local.use_custom_launch_template && var.pre_bootstrap_user_data != "") || (var.ami_id != null && var.custom_ami_is_eks_optimized) ? 1 : 0
+ count = var.create && var.platform == "linux" && ((local.use_custom_launch_template && var.pre_bootstrap_user_data != "") || (var.ami_id != "" && var.ami_is_eks_optimized)) ? 1 : 0
- gzip = false
- boundary = "//"
+ base64_encode = true
+ gzip = false
+ boundary = "//"
+ # Prepend to existing user data suppled by AWS EKS
dynamic "part" {
for_each = var.pre_bootstrap_user_data != "" ? [1] : []
content {
@@ -25,11 +53,12 @@ data "cloudinit_config" "eks_optimized_ami_user_data" {
}
}
+ # Supply all of bootstrap user data due to custom AMI
dynamic "part" {
- for_each = var.ami_id != null && var.custom_ami_is_eks_optimized ? [1] : []
+ for_each = var.ami_id != "" && var.ami_is_eks_optimized ? [1] : []
content {
content_type = "text/x-shellscript"
- content = templatefile("${path.module}/../../templates/linux_user_data.sh.tpl",
+ content = templatefile("${path.module}/../../templates/linux_user_data.tpl",
{
# Required to bootstrap node
cluster_name = var.cluster_name
@@ -51,7 +80,7 @@ data "cloudinit_config" "eks_optimized_ami_user_data" {
################################################################################
locals {
- use_custom_launch_template = var.create_launch_template || var.launch_template_name != null
+ use_custom_launch_template = var.launch_template_name != ""
launch_template_name_int = coalesce(var.launch_template_name, "${var.name}-eks-node-group")
}
@@ -67,7 +96,7 @@ resource "aws_launch_template" "this" {
# # Set on node group instead
# instance_type = var.launch_template_instance_type
key_name = var.key_name
- user_data = try(data.cloudinit_config.eks_optimized_ami_user_data[0].rendered, var.custom_user_data)
+ user_data = local.platform[var.platform].user_data
vpc_security_group_ids = compact(concat([try(aws_security_group.this[0].id, "")], var.vpc_security_group_ids))
@@ -275,10 +304,11 @@ resource "aws_launch_template" "this" {
################################################################################
locals {
- launch_template_name = try(aws_launch_template.this[0].name, var.launch_template_name)
+ launch_template_name = try(aws_launch_template.this[0].name, var.launch_template_name, null)
# Change order to allow users to set version priority before using defaults
launch_template_version = coalesce(var.launch_template_version, try(aws_launch_template.this[0].default_version, "$Default"))
}
+
resource "aws_eks_node_group" "this" {
count = var.create ? 1 : 0
@@ -298,9 +328,9 @@ resource "aws_eks_node_group" "this" {
node_group_name_prefix = var.use_name_prefix ? "${var.name}-" : null
# https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-custom-ami
- ami_type = var.ami_id != null ? null : var.ami_type
- release_version = var.ami_id != null ? null : var.ami_release_version
- version = var.ami_id != null ? null : var.cluster_version
+ ami_type = var.ami_id != "" ? null : var.ami_type
+ release_version = var.ami_id != "" ? null : var.ami_release_version
+ version = var.ami_id != "" ? null : var.cluster_version
capacity_type = var.capacity_type
disk_size = local.use_custom_launch_template ? null : var.disk_size # if using LT, set disk size on LT or else it will error here
diff --git a/modules/eks-managed-node-group/variables.tf b/modules/eks-managed-node-group/variables.tf
index 8095e17e7f..caec1c569c 100644
--- a/modules/eks-managed-node-group/variables.tf
+++ b/modules/eks-managed-node-group/variables.tf
@@ -10,20 +10,26 @@ variable "tags" {
default = {}
}
+variable "platform" {
+ description = "Identifies if the OS platform is `bottlerocket`, `linux`, or `windows` based"
+ type = string
+ default = "linux"
+}
+
################################################################################
# User Data
################################################################################
variable "custom_user_data" {
- description = "Base64-encoded user data used; should be used when `custom_ami_is_eks_optimized` = `false` to boostrap and join instances to the cluster"
+ description = "Base64-encoded user data used; should be used when `ami_is_eks_optimized` = `false` to boostrap and join instances to the cluster"
type = string
- default = null
+ default = ""
}
-variable "custom_ami_is_eks_optimized" {
- description = "Determines whether the custom AMI ID provided (`ami_id`) is an EKS optimized AMI derivative or not; if `true` then the module will add the boostrap user data"
+variable "ami_is_eks_optimized" {
+ description = "Determines whether the AMI ID provided (`ami_id`) is an EKS optimized AMI derivative or not; if `true` then the module will add the boostrap user data"
type = bool
- default = false
+ default = true
}
variable "cluster_endpoint" {
@@ -393,32 +399,7 @@ variable "vpc_id" {
variable "security_group_rules" {
description = "List of security group rules to add to the security group created"
type = any
- default = {
- egress_https_default = {
- description = "Egress all HTTPS to internet"
- protocol = "tcp"
- from_port = 443
- to_port = 443
- type = "egress"
- cidr_blocks = ["0.0.0.0/0"]
- }
- egress_ntp_tcp_default = {
- description = "Egress NTP/TCP to internet"
- protocol = "tcp"
- from_port = 123
- to_port = 123
- type = "egress"
- cidr_blocks = ["0.0.0.0/0"]
- }
- egress_ntp_udp_default = {
- description = "Egress NTP/UDP to internet"
- protocol = "udp"
- from_port = 123
- to_port = 123
- type = "egress"
- cidr_blocks = ["0.0.0.0/0"]
- }
- }
+ default = {}
}
variable "cluster_security_group_id" {
diff --git a/modules/self-managed-node-group/README.md b/modules/self-managed-node-group/README.md
index 6b4dac7843..c6e129b5a9 100644
--- a/modules/self-managed-node-group/README.md
+++ b/modules/self-managed-node-group/README.md
@@ -72,7 +72,7 @@ No modules.
| [create\_schedule](#input\_create\_schedule) | Determines whether to create autoscaling group schedule or not | `bool` | `true` | no |
| [create\_security\_group](#input\_create\_security\_group) | Whether to create a security group | `bool` | `true` | no |
| [credit\_specification](#input\_credit\_specification) | Customize the credit specification of the instance | `map(string)` | `null` | no |
-| [custom\_user\_data](#input\_custom\_user\_data) | Base64-encoded user data used; should be used when `custom_ami_is_eks_optimized` = `false` to boostrap and join instances to the cluster | `string` | `null` | no |
+| [custom\_user\_data](#input\_custom\_user\_data) | Base64-encoded user data used; should be used when `ami_is_eks_optimized` = `false` to boostrap and join instances to the cluster | `string` | `""` | no |
| [default\_cooldown](#input\_default\_cooldown) | The amount of time, in seconds, after a scaling activity completes before another scaling activity can start | `number` | `null` | no |
| [default\_version](#input\_default\_version) | Default Version of the launch template | `string` | `null` | no |
| [delete\_timeout](#input\_delete\_timeout) | Delete timeout to wait for destroying autoscaling group | `string` | `null` | no |
@@ -121,6 +121,7 @@ No modules.
| [network\_interfaces](#input\_network\_interfaces) | Customize network interfaces to be attached at instance boot time | `list(any)` | `[]` | no |
| [placement](#input\_placement) | The placement of the instance | `map(string)` | `null` | no |
| [placement\_group](#input\_placement\_group) | The name of the placement group into which you'll launch your instances, if any | `string` | `null` | no |
+| [platform](#input\_platform) | Identifies if the OS platform is `bottlerocket`, `linux`, or `windows` based | `string` | `"linux"` | no |
| [post\_bootstrap\_user\_data](#input\_post\_bootstrap\_user\_data) | User data that is appended to the user data script after of the EKS bootstrap script. Only valid when using a custom EKS optimized AMI derivative | `string` | `""` | no |
| [pre\_bootstrap\_user\_data](#input\_pre\_bootstrap\_user\_data) | User data that is injected into the user data script ahead of the EKS bootstrap script | `string` | `""` | no |
| [propagate\_tags](#input\_propagate\_tags) | A list of tag blocks. Each element should have keys named key, value, and propagate\_at\_launch | `list(map(string))` | `[]` | no |
@@ -129,7 +130,7 @@ No modules.
| [schedules](#input\_schedules) | Map of autoscaling group schedule to create | `map(any)` | `{}` | no |
| [security\_group\_description](#input\_security\_group\_description) | Description for the security group | `string` | `"EKS worker security group"` | no |
| [security\_group\_name](#input\_security\_group\_name) | Name to use on security group created | `string` | `null` | no |
-| [security\_group\_rules](#input\_security\_group\_rules) | List of security group rules to add to the security group created | `any` |
| `[]` | no |
| [cluster\_endpoint\_private\_access](#input\_cluster\_endpoint\_private\_access) | Indicates whether or not the Amazon EKS private API server endpoint is enabled | `bool` | `false` | no |
| [cluster\_endpoint\_public\_access](#input\_cluster\_endpoint\_public\_access) | Indicates whether or not the Amazon EKS public API server endpoint is enabled. When it's set to `false` ensure to have a proper private access with `cluster_endpoint_private_access = true` | `bool` | `true` | no |
| [cluster\_endpoint\_public\_access\_cidrs](#input\_cluster\_endpoint\_public\_access\_cidrs) | List of CIDR blocks which can access the Amazon EKS public API server endpoint | `list(string)` |
[ "0.0.0.0/0" ]
| no |
+| [cluster\_identity\_providers](#input\_cluster\_identity\_providers) | Map of cluster identity provider configurations to enable for the cluster. Note - this is different from IRSA | `any` | `{}` | no |
| [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster and default name (prefix) used throughout the resources created | `string` | `""` | no |
| [cluster\_security\_group\_description](#input\_cluster\_security\_group\_description) | Description of the cluster security group created | `string` | `"EKS cluster security group"` | no |
| [cluster\_security\_group\_id](#input\_cluster\_security\_group\_id) | If provided, the EKS cluster will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the workers | `string` | `""` | no |
@@ -503,8 +507,10 @@ Full contributing [guidelines are covered here](https://github.com/terraform-aws
| Name | Description |
|------|-------------|
+| [aws\_auth\_configmap\_yaml](#output\_aws\_auth\_configmap\_yaml) | Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles |
| [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created |
| [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created |
+| [cluster\_addons](#output\_cluster\_addons) | Map of attribute maps for all EKS cluster addons enabled |
| [cluster\_arn](#output\_cluster\_arn) | The Amazon Resource Name (ARN) of the cluster |
| [cluster\_certificate\_authority\_data](#output\_cluster\_certificate\_authority\_data) | Base64 encoded certificate data required to communicate with the cluster |
| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for your Kubernetes API server |
@@ -512,6 +518,7 @@ Full contributing [guidelines are covered here](https://github.com/terraform-aws
| [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster |
| [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role |
| [cluster\_id](#output\_cluster\_id) | The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready |
+| [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled |
| [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider |
| [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster |
| [cluster\_primary\_security\_group\_id](#output\_cluster\_primary\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console |
diff --git a/examples/bottlerocket/README.md b/examples/bottlerocket/README.md
index 6254dda70e..79b47e11ea 100644
--- a/examples/bottlerocket/README.md
+++ b/examples/bottlerocket/README.md
@@ -26,7 +26,7 @@ Note that this example may create resources which cost money. Run `terraform des
| [terraform](#requirement\_terraform) | >= 0.13.1 |
<<<<<<< HEAD
| [aws](#requirement\_aws) | >= 3.64 |
-| [random](#requirement\_random) | >= 2.0 |
+| [null](#requirement\_null) | >= 3.0 |
| [tls](#requirement\_tls) | >= 2.2 |
=======
| [aws](#requirement\_aws) | >= 3.56 |
@@ -42,6 +42,7 @@ Note that this example may create resources which cost money. Run `terraform des
|------|---------|
<<<<<<< HEAD
| [aws](#provider\_aws) | >= 3.64 |
+| [null](#provider\_null) | >= 3.0 |
| [tls](#provider\_tls) | >= 2.2 |
=======
| [aws](#provider\_aws) | >= 3.56 |
@@ -61,8 +62,10 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Type |
|------|------|
| [aws_key_pair.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/key_pair) | resource |
+| [null_resource.patch_cni](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
| [tls_private_key.this](https://registry.terraform.io/providers/hashicorp/tls/latest/docs/resources/private_key) | resource |
| [aws_ami.bottlerocket_ami](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source |
+| [aws_eks_cluster_auth.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
## Inputs
@@ -72,8 +75,10 @@ No inputs.
| Name | Description |
|------|-------------|
+| [aws\_auth\_configmap\_yaml](#output\_aws\_auth\_configmap\_yaml) | Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles |
| [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created |
| [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created |
+| [cluster\_addons](#output\_cluster\_addons) | Map of attribute maps for all EKS cluster addons enabled |
| [cluster\_arn](#output\_cluster\_arn) | The Amazon Resource Name (ARN) of the cluster |
| [cluster\_certificate\_authority\_data](#output\_cluster\_certificate\_authority\_data) | Base64 encoded certificate data required to communicate with the cluster |
| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for your Kubernetes API server |
@@ -81,6 +86,7 @@ No inputs.
| [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster |
| [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role |
| [cluster\_id](#output\_cluster\_id) | The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready |
+| [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled |
| [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider |
| [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster |
| [cluster\_primary\_security\_group\_id](#output\_cluster\_primary\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console |
diff --git a/examples/bottlerocket/main.tf b/examples/bottlerocket/main.tf
index b05adc2aa3..4e6f00001a 100644
--- a/examples/bottlerocket/main.tf
+++ b/examples/bottlerocket/main.tf
@@ -30,16 +30,30 @@ module "eks" {
cluster_endpoint_private_access = true
cluster_endpoint_public_access = true
+ cluster_addons = {
+ coredns = {
+ resolve_conflicts = "OVERWRITE"
+ }
+ kube-proxy = {}
+ vpc-cni = {
+ resolve_conflicts = "OVERWRITE"
+ }
+ }
+
+ # Self Managed Node Group(s)
+ self_managed_node_group_defaults = {
+ update_default_version = true
+ }
+
self_managed_node_groups = {
- one = {
+ two = {
name = "bottlerocket-nodes"
- create_launch_template = true
- platform = "bottlerocket"
- ami_id = data.aws_ami.bottlerocket_ami.id
- instance_type = "m5.large"
- desired_size = 2
- key_name = aws_key_pair.this.key_name
+ platform = "bottlerocket"
+ ami_id = data.aws_ami.bottlerocket_ami.id
+ instance_type = "m5.large"
+ desired_size = 2
+ key_name = aws_key_pair.this.key_name
iam_role_additional_policies = ["arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"]
@@ -64,6 +78,59 @@ module "eks" {
tags = local.tags
}
+################################################################################
+# aws-auth configmap
+# Only EKS managed node groups automatically add roles to aws-auth configmap
+# so we need to ensure fargate profiles and self-managed node roles are added
+################################################################################
+
+data "aws_eks_cluster_auth" "this" {
+ name = module.eks.cluster_id
+}
+
+locals {
+ kubeconfig = yamlencode({
+ apiVersion = "v1"
+ kind = "Config"
+ current-context = "terraform"
+ clusters = [{
+ name = "${module.eks.cluster_id}"
+ cluster = {
+ certificate-authority-data = "${module.eks.cluster_certificate_authority_data}"
+ server = "${module.eks.cluster_endpoint}"
+ }
+ }]
+ contexts = [{
+ name = "terraform"
+ context = {
+ cluster = "${module.eks.cluster_id}"
+ user = "terraform"
+ }
+ }]
+ users = [{
+ name = "terraform"
+ user = {
+ token = "${data.aws_eks_cluster_auth.this.token}"
+ }
+ }]
+ })
+}
+
+resource "null_resource" "patch_cni" {
+ triggers = {
+ kubeconfig = base64encode(local.kubeconfig)
+ cmd_patch = "kubectl patch configmap/aws-auth -n kube-system --patch \"${module.eks.aws_auth_configmap_yaml}\" -n kube-system --kubeconfig <(echo $KUBECONFIG | base64 --decode)"
+ }
+
+ provisioner "local-exec" {
+ interpreter = ["/bin/bash", "-c"]
+ environment = {
+ KUBECONFIG = self.triggers.kubeconfig
+ }
+ command = self.triggers.cmd_patch
+ }
+}
+
################################################################################
# Supporting Resources
################################################################################
@@ -87,10 +154,6 @@ resource "aws_key_pair" "this" {
public_key = tls_private_key.this.public_key_openssh
}
-################################################################################
-# Supporting Resources
-################################################################################
-
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
version = "~> 3.0"
diff --git a/examples/bottlerocket/outputs.tf b/examples/bottlerocket/outputs.tf
index 2fb59838e5..3e9620157b 100644
--- a/examples/bottlerocket/outputs.tf
+++ b/examples/bottlerocket/outputs.tf
@@ -98,6 +98,24 @@ output "cluster_iam_role_unique_id" {
value = module.eks.cluster_iam_role_unique_id
}
+################################################################################
+# EKS Addons
+################################################################################
+
+output "cluster_addons" {
+ description = "Map of attribute maps for all EKS cluster addons enabled"
+ value = module.eks.cluster_addons
+}
+
+################################################################################
+# EKS Identity Provider
+################################################################################
+
+output "cluster_identity_providers" {
+ description = "Map of attribute maps for all EKS identity providers enabled"
+ value = module.eks.cluster_identity_providers
+}
+
################################################################################
# CloudWatch Log Group
################################################################################
@@ -138,3 +156,12 @@ output "self_managed_node_groups" {
description = "Map of attribute maps for all self managed node groups created"
value = module.eks.self_managed_node_groups
}
+
+################################################################################
+# Additional
+################################################################################
+
+output "aws_auth_configmap_yaml" {
+ description = "Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles"
+ value = module.eks.aws_auth_configmap_yaml
+}
diff --git a/examples/bottlerocket/userdata.toml b/examples/bottlerocket/userdata.toml
deleted file mode 100644
index 85019675a6..0000000000
--- a/examples/bottlerocket/userdata.toml
+++ /dev/null
@@ -1,24 +0,0 @@
-# https://github.com/bottlerocket-os/bottlerocket/blob/develop/README.md#description-of-settings
-[settings.kubernetes]
-api-server = "${endpoint}"
-cluster-certificate = "${cluster_auth_base64}"
-cluster-name = "${cluster_name}"
-${additional_userdata}
-
-# Hardening based on https://github.com/bottlerocket-os/bottlerocket/blob/develop/SECURITY_GUIDANCE.md
-
-# Enable kernel lockdown in "integrity" mode.
-# This prevents modifications to the running kernel, even by privileged users.
-[settings.kernel]
-lockdown = "integrity"
-
-# The admin host container provides SSH access and runs with "superpowers".
-# It is disabled by default, but can be disabled explicitly.
-[settings.host-containers.admin]
-enabled = ${enable_admin_container}
-
-# The control host container provides out-of-band access via SSM.
-# It is enabled by default, and can be disabled if you do not expect to use SSM.
-# This could leave you with no way to access the API and change settings on an existing node!
-[settings.host-containers.control]
-enabled = ${enable_control_container}
diff --git a/examples/bottlerocket/versions.tf b/examples/bottlerocket/versions.tf
index 68cca1cfc8..48001da51b 100644
--- a/examples/bottlerocket/versions.tf
+++ b/examples/bottlerocket/versions.tf
@@ -7,9 +7,9 @@ terraform {
<<<<<<< HEAD
version = ">= 3.64"
}
- random = {
- source = "hashicorp/random"
- version = ">= 2.0"
+ null = {
+ source = "hashicorp/null"
+ version = ">= 3.0"
}
tls = {
source = "hashicorp/tls"
diff --git a/examples/complete/README.md b/examples/complete/README.md
index 50f0314f72..35f561faed 100644
--- a/examples/complete/README.md
+++ b/examples/complete/README.md
@@ -26,6 +26,7 @@ Note that this example may create resources which cost money. Run `terraform des
| [terraform](#requirement\_terraform) | >= 0.13.1 |
<<<<<<< HEAD
| [aws](#requirement\_aws) | >= 3.64 |
+<<<<<<< HEAD
| [http](#requirement\_http) | >= 2.4.1 |
=======
| [aws](#requirement\_aws) | >= 3.56 |
@@ -33,6 +34,9 @@ Note that this example may create resources which cost money. Run `terraform des
| [local](#requirement\_local) | >= 1.4 |
| [random](#requirement\_random) | >= 2.1 |
>>>>>>> b876ff9 (fix: update CI/CD process to enable auto-release workflow (#1698))
+=======
+| [null](#requirement\_null) | >= 3.0 |
+>>>>>>> e831206 (feat: add additional resources, outputs for aws-auth configmap)
## Providers
@@ -40,10 +44,14 @@ Note that this example may create resources which cost money. Run `terraform des
|------|---------|
<<<<<<< HEAD
| [aws](#provider\_aws) | >= 3.64 |
+<<<<<<< HEAD
=======
| [aws](#provider\_aws) | >= 3.56 |
| [random](#provider\_random) | >= 2.1 |
>>>>>>> b876ff9 (fix: update CI/CD process to enable auto-release workflow (#1698))
+=======
+| [null](#provider\_null) | >= 3.0 |
+>>>>>>> e831206 (feat: add additional resources, outputs for aws-auth configmap)
## Modules
@@ -66,6 +74,8 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Type |
|------|------|
| [aws_security_group.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
+| [null_resource.patch_cni](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
+| [aws_eks_cluster_auth.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
## Inputs
@@ -75,8 +85,10 @@ No inputs.
| Name | Description |
|------|-------------|
+| [aws\_auth\_configmap\_yaml](#output\_aws\_auth\_configmap\_yaml) | Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles |
| [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created |
| [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created |
+| [cluster\_addons](#output\_cluster\_addons) | Map of attribute maps for all EKS cluster addons enabled |
| [cluster\_arn](#output\_cluster\_arn) | The Amazon Resource Name (ARN) of the cluster |
| [cluster\_certificate\_authority\_data](#output\_cluster\_certificate\_authority\_data) | Base64 encoded certificate data required to communicate with the cluster |
| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for your Kubernetes API server |
@@ -84,6 +96,7 @@ No inputs.
| [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster |
| [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role |
| [cluster\_id](#output\_cluster\_id) | The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready |
+| [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled |
| [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider |
| [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster |
| [cluster\_security\_group\_arn](#output\_cluster\_security\_group\_arn) | Amazon Resource Name (ARN) of the cluster security group |
diff --git a/examples/complete/main.tf b/examples/complete/main.tf
index ddaa7eb74c..494191202b 100644
--- a/examples/complete/main.tf
+++ b/examples/complete/main.tf
@@ -21,42 +21,81 @@ locals {
module "eks" {
source = "../.."
- cluster_name = local.name
- cluster_version = local.cluster_version
+ cluster_name = local.name
+ cluster_version = local.cluster_version
+ cluster_endpoint_private_access = true
+ cluster_endpoint_public_access = true
+
+ cluster_addons = {
+ coredns = {
+ resolve_conflicts = "OVERWRITE"
+ }
+ kube-proxy = {}
+ vpc-cni = {
+ resolve_conflicts = "OVERWRITE"
+ }
+ }
vpc_id = module.vpc.vpc_id
subnet_ids = module.vpc.private_subnets
- cluster_endpoint_private_access = true
- cluster_endpoint_public_access = true
+ enable_irsa = true
# Self Managed Node Group(s)
self_managed_node_group_defaults = {
- vpc_security_group_ids = [aws_security_group.additional.id]
+ update_default_version = true
+ vpc_security_group_ids = [aws_security_group.additional.id]
+ iam_role_additional_policies = ["arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"]
}
self_managed_node_groups = {
one = {
- name = "spot-1"
- override_instance_types = ["m5.large", "m5d.large", "m6i.large"]
- spot_instance_pools = 4
- asg_max_size = 5
- asg_desired_capacity = 5
- bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"
- public_ip = true
+ name = "spot-1"
+
+ use_mixed_instances_policy = true
+ mixed_instances_policy = {
+ instances_distribution = {
+ on_demand_base_capacity = 0
+ on_demand_percentage_above_base_capacity = 10
+ spot_allocation_strategy = "capacity-optimized"
+ }
+
+ override = [
+ {
+ instance_type = "m5.large"
+ weighted_capacity = "2"
+ },
+ {
+ instance_type = "m6i.large"
+ weighted_capacity = "1"
+ },
+ ]
+ }
+
+ max_size = 5
+ desired_size = 5
+ bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"
+ post_bootstrap_user_data = <<-EOT
+ cd /tmp
+ sudo yum install -y https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_amd64/amazon-ssm-agent.rpm
+ sudo systemctl enable amazon-ssm-agent
+ sudo systemctl start amazon-ssm-agent
+ EOT
+ public_ip = true
}
}
# EKS Managed Node Group(s)
eks_managed_node_group_defaults = {
- ami_type = "AL2_x86_64"
- # disk_size = 50
+ ami_type = "AL2_x86_64"
+ disk_size = 50
vpc_security_group_ids = [aws_security_group.additional.id]
create_launch_template = true
}
eks_managed_node_groups = {
- blue = {
+ blue = {}
+ green = {
min_size = 1
max_size = 10
desired_size = 1
@@ -83,7 +122,6 @@ module "eks" {
ExtraTag = "example"
}
}
- green = {}
}
# Fargate Profile(s)
@@ -145,72 +183,57 @@ module "disabled_self_managed_node_group" {
}
################################################################################
-# Kubernetes provider configuration
+# aws-auth configmap
+# Only EKS managed node groups automatically add roles to aws-auth configmap
+# so we need to ensure fargate profiles and self-managed node roles are added
################################################################################
-# data "aws_eks_cluster" "cluster" {
-# name = module.eks.cluster_id
-# }
-
-# data "aws_eks_cluster_auth" "cluster" {
-# name = module.eks.cluster_id
-# }
-
-# provider "kubernetes" {
-# host = data.aws_eks_cluster.cluster.endpoint
-# cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data)
-# token = data.aws_eks_cluster_auth.cluster.token
-# experiments {
-# manifest_resource = true
-# }
-# }
-
-# locals {
-# fargate_roles = [for k, v in module.eks.fargate_profiles :
-# {
-# rolearn = v.iam_role_arn
-# username = "system:node:{{SessionName}}"
-# groups = [
-# "system:bootstrappers",
-# "system:nodes",
-# "system:node-proxier",
-# ]
-# }
-# ]
-# linux_roles = [for k, v in module.eks.eks_managed_node_groups :
-# {
-# rolearn = v.iam_role_arn
-# username = "system:node:{{EC2PrivateDNSName}}"
-# groups = [
-# "system:bootstrappers",
-# "system:nodes",
-# ]
-# }
-# ]
-# }
-
-# data "http" "wait_for_cluster" {
-# url = "${module.eks.cluster_endpoint}/healthz"
-# ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
-# timeout = 300
-# }
-
-# resource "kubernetes_manifest" "aws_auth" {
-# manifest = {
-# apiVersion = "v1"
-# kind = "ConfigMap"
-# metadata = {
-# name = "aws-auth"
-# namespace = "kube-system"
-# }
-
-# data = {
-# mapRoles = yamlencode(concat(local.fargate_roles, local.linux_roles))
-# }
-# }
-
-# depends_on = [data.http.wait_for_cluster]
-# }
+data "aws_eks_cluster_auth" "this" {
+ name = module.eks.cluster_id
+}
+
+locals {
+ kubeconfig = yamlencode({
+ apiVersion = "v1"
+ kind = "Config"
+ current-context = "terraform"
+ clusters = [{
+ name = "${module.eks.cluster_id}"
+ cluster = {
+ certificate-authority-data = "${module.eks.cluster_certificate_authority_data}"
+ server = "${module.eks.cluster_endpoint}"
+ }
+ }]
+ contexts = [{
+ name = "terraform"
+ context = {
+ cluster = "${module.eks.cluster_id}"
+ user = "terraform"
+ }
+ }]
+ users = [{
+ name = "terraform"
+ user = {
+ token = "${data.aws_eks_cluster_auth.this.token}"
+ }
+ }]
+ })
+}
+
+resource "null_resource" "patch_cni" {
+ triggers = {
+ kubeconfig = base64encode(local.kubeconfig)
+ cmd_patch = "kubectl patch configmap/aws-auth -n kube-system --patch \"${module.eks.aws_auth_configmap_yaml}\" -n kube-system --kubeconfig <(echo $KUBECONFIG | base64 --decode)"
+ }
+
+ provisioner "local-exec" {
+ interpreter = ["/bin/bash", "-c"]
+ environment = {
+ KUBECONFIG = self.triggers.kubeconfig
+ }
+ command = self.triggers.cmd_patch
+ }
+}
################################################################################
# Supporting resources
diff --git a/examples/complete/outputs.tf b/examples/complete/outputs.tf
index 7a3517e42a..8a9153c099 100644
--- a/examples/complete/outputs.tf
+++ b/examples/complete/outputs.tf
@@ -79,6 +79,24 @@ output "cluster_iam_role_unique_id" {
value = module.eks.cluster_iam_role_unique_id
}
+################################################################################
+# EKS Addons
+################################################################################
+
+output "cluster_addons" {
+ description = "Map of attribute maps for all EKS cluster addons enabled"
+ value = module.eks.cluster_addons
+}
+
+################################################################################
+# EKS Identity Provider
+################################################################################
+
+output "cluster_identity_providers" {
+ description = "Map of attribute maps for all EKS identity providers enabled"
+ value = module.eks.cluster_identity_providers
+}
+
################################################################################
# CloudWatch Log Group
################################################################################
@@ -119,3 +137,12 @@ output "self_managed_node_groups" {
description = "Map of attribute maps for all self managed node groups created"
value = module.eks.self_managed_node_groups
}
+
+################################################################################
+# Additional
+################################################################################
+
+output "aws_auth_configmap_yaml" {
+ description = "Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles"
+ value = module.eks.aws_auth_configmap_yaml
+}
diff --git a/examples/complete/versions.tf b/examples/complete/versions.tf
index 99948dade5..142c367bee 100644
--- a/examples/complete/versions.tf
+++ b/examples/complete/versions.tf
@@ -7,6 +7,7 @@ terraform {
<<<<<<< HEAD
version = ">= 3.64"
}
+<<<<<<< HEAD
http = {
source = "terraform-aws-modules/http"
version = ">= 2.4.1"
@@ -25,6 +26,11 @@ terraform {
source = "hashicorp/random"
version = ">= 2.1"
>>>>>>> b876ff9 (fix: update CI/CD process to enable auto-release workflow (#1698))
+=======
+ null = {
+ source = "hashicorp/null"
+ version = ">= 3.0"
+>>>>>>> e831206 (feat: add additional resources, outputs for aws-auth configmap)
}
}
}
diff --git a/examples/eks_managed_node_group/README.md b/examples/eks_managed_node_group/README.md
index ff741d3385..ddd7162b2f 100644
--- a/examples/eks_managed_node_group/README.md
+++ b/examples/eks_managed_node_group/README.md
@@ -59,8 +59,10 @@ No inputs.
| Name | Description |
|------|-------------|
+| [aws\_auth\_configmap\_yaml](#output\_aws\_auth\_configmap\_yaml) | Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles |
| [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created |
| [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created |
+| [cluster\_addons](#output\_cluster\_addons) | Map of attribute maps for all EKS cluster addons enabled |
| [cluster\_arn](#output\_cluster\_arn) | The Amazon Resource Name (ARN) of the cluster |
| [cluster\_certificate\_authority\_data](#output\_cluster\_certificate\_authority\_data) | Base64 encoded certificate data required to communicate with the cluster |
| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for your Kubernetes API server |
@@ -68,6 +70,7 @@ No inputs.
| [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster |
| [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role |
| [cluster\_id](#output\_cluster\_id) | The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready |
+| [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled |
| [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider |
| [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster |
| [cluster\_primary\_security\_group\_id](#output\_cluster\_primary\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console |
diff --git a/examples/eks_managed_node_group/outputs.tf b/examples/eks_managed_node_group/outputs.tf
index 2fb59838e5..3e9620157b 100644
--- a/examples/eks_managed_node_group/outputs.tf
+++ b/examples/eks_managed_node_group/outputs.tf
@@ -98,6 +98,24 @@ output "cluster_iam_role_unique_id" {
value = module.eks.cluster_iam_role_unique_id
}
+################################################################################
+# EKS Addons
+################################################################################
+
+output "cluster_addons" {
+ description = "Map of attribute maps for all EKS cluster addons enabled"
+ value = module.eks.cluster_addons
+}
+
+################################################################################
+# EKS Identity Provider
+################################################################################
+
+output "cluster_identity_providers" {
+ description = "Map of attribute maps for all EKS identity providers enabled"
+ value = module.eks.cluster_identity_providers
+}
+
################################################################################
# CloudWatch Log Group
################################################################################
@@ -138,3 +156,12 @@ output "self_managed_node_groups" {
description = "Map of attribute maps for all self managed node groups created"
value = module.eks.self_managed_node_groups
}
+
+################################################################################
+# Additional
+################################################################################
+
+output "aws_auth_configmap_yaml" {
+ description = "Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles"
+ value = module.eks.aws_auth_configmap_yaml
+}
diff --git a/examples/fargate/README.md b/examples/fargate/README.md
index 6cb698feb3..6eaff1432d 100644
--- a/examples/fargate/README.md
+++ b/examples/fargate/README.md
@@ -67,8 +67,10 @@ No inputs.
| Name | Description |
|------|-------------|
+| [aws\_auth\_configmap\_yaml](#output\_aws\_auth\_configmap\_yaml) | Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles |
| [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created |
| [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created |
+| [cluster\_addons](#output\_cluster\_addons) | Map of attribute maps for all EKS cluster addons enabled |
| [cluster\_arn](#output\_cluster\_arn) | The Amazon Resource Name (ARN) of the cluster |
| [cluster\_certificate\_authority\_data](#output\_cluster\_certificate\_authority\_data) | Base64 encoded certificate data required to communicate with the cluster |
| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for your Kubernetes API server |
@@ -76,6 +78,7 @@ No inputs.
| [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster |
| [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role |
| [cluster\_id](#output\_cluster\_id) | The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready |
+| [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled |
| [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider |
| [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster |
| [cluster\_primary\_security\_group\_id](#output\_cluster\_primary\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console |
diff --git a/examples/fargate/outputs.tf b/examples/fargate/outputs.tf
index 2fb59838e5..3e9620157b 100644
--- a/examples/fargate/outputs.tf
+++ b/examples/fargate/outputs.tf
@@ -98,6 +98,24 @@ output "cluster_iam_role_unique_id" {
value = module.eks.cluster_iam_role_unique_id
}
+################################################################################
+# EKS Addons
+################################################################################
+
+output "cluster_addons" {
+ description = "Map of attribute maps for all EKS cluster addons enabled"
+ value = module.eks.cluster_addons
+}
+
+################################################################################
+# EKS Identity Provider
+################################################################################
+
+output "cluster_identity_providers" {
+ description = "Map of attribute maps for all EKS identity providers enabled"
+ value = module.eks.cluster_identity_providers
+}
+
################################################################################
# CloudWatch Log Group
################################################################################
@@ -138,3 +156,12 @@ output "self_managed_node_groups" {
description = "Map of attribute maps for all self managed node groups created"
value = module.eks.self_managed_node_groups
}
+
+################################################################################
+# Additional
+################################################################################
+
+output "aws_auth_configmap_yaml" {
+ description = "Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles"
+ value = module.eks.aws_auth_configmap_yaml
+}
diff --git a/examples/instance_refresh/README.md b/examples/instance_refresh/README.md
index 9fc275cc63..b3ed0ade4b 100644
--- a/examples/instance_refresh/README.md
+++ b/examples/instance_refresh/README.md
@@ -84,8 +84,10 @@ No inputs.
| Name | Description |
|------|-------------|
+| [aws\_auth\_configmap\_yaml](#output\_aws\_auth\_configmap\_yaml) | Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles |
| [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created |
| [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created |
+| [cluster\_addons](#output\_cluster\_addons) | Map of attribute maps for all EKS cluster addons enabled |
| [cluster\_arn](#output\_cluster\_arn) | The Amazon Resource Name (ARN) of the cluster |
| [cluster\_certificate\_authority\_data](#output\_cluster\_certificate\_authority\_data) | Base64 encoded certificate data required to communicate with the cluster |
| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for your Kubernetes API server |
@@ -93,6 +95,7 @@ No inputs.
| [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster |
| [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role |
| [cluster\_id](#output\_cluster\_id) | The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready |
+| [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled |
| [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider |
| [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster |
| [cluster\_primary\_security\_group\_id](#output\_cluster\_primary\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console |
diff --git a/examples/instance_refresh/outputs.tf b/examples/instance_refresh/outputs.tf
index 2fb59838e5..3e9620157b 100644
--- a/examples/instance_refresh/outputs.tf
+++ b/examples/instance_refresh/outputs.tf
@@ -98,6 +98,24 @@ output "cluster_iam_role_unique_id" {
value = module.eks.cluster_iam_role_unique_id
}
+################################################################################
+# EKS Addons
+################################################################################
+
+output "cluster_addons" {
+ description = "Map of attribute maps for all EKS cluster addons enabled"
+ value = module.eks.cluster_addons
+}
+
+################################################################################
+# EKS Identity Provider
+################################################################################
+
+output "cluster_identity_providers" {
+ description = "Map of attribute maps for all EKS identity providers enabled"
+ value = module.eks.cluster_identity_providers
+}
+
################################################################################
# CloudWatch Log Group
################################################################################
@@ -138,3 +156,12 @@ output "self_managed_node_groups" {
description = "Map of attribute maps for all self managed node groups created"
value = module.eks.self_managed_node_groups
}
+
+################################################################################
+# Additional
+################################################################################
+
+output "aws_auth_configmap_yaml" {
+ description = "Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles"
+ value = module.eks.aws_auth_configmap_yaml
+}
diff --git a/examples/irsa/README.md b/examples/irsa/README.md
index 04511cc589..d4dd0f0eac 100644
--- a/examples/irsa/README.md
+++ b/examples/irsa/README.md
@@ -71,8 +71,10 @@ No inputs.
| Name | Description |
|------|-------------|
+| [aws\_auth\_configmap\_yaml](#output\_aws\_auth\_configmap\_yaml) | Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles |
| [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created |
| [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created |
+| [cluster\_addons](#output\_cluster\_addons) | Map of attribute maps for all EKS cluster addons enabled |
| [cluster\_arn](#output\_cluster\_arn) | The Amazon Resource Name (ARN) of the cluster |
| [cluster\_certificate\_authority\_data](#output\_cluster\_certificate\_authority\_data) | Base64 encoded certificate data required to communicate with the cluster |
| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for your Kubernetes API server |
@@ -80,6 +82,7 @@ No inputs.
| [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster |
| [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role |
| [cluster\_id](#output\_cluster\_id) | The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready |
+| [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled |
| [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider |
| [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster |
| [cluster\_primary\_security\_group\_id](#output\_cluster\_primary\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console |
diff --git a/examples/irsa/outputs.tf b/examples/irsa/outputs.tf
index 2fb59838e5..3e9620157b 100644
--- a/examples/irsa/outputs.tf
+++ b/examples/irsa/outputs.tf
@@ -98,6 +98,24 @@ output "cluster_iam_role_unique_id" {
value = module.eks.cluster_iam_role_unique_id
}
+################################################################################
+# EKS Addons
+################################################################################
+
+output "cluster_addons" {
+ description = "Map of attribute maps for all EKS cluster addons enabled"
+ value = module.eks.cluster_addons
+}
+
+################################################################################
+# EKS Identity Provider
+################################################################################
+
+output "cluster_identity_providers" {
+ description = "Map of attribute maps for all EKS identity providers enabled"
+ value = module.eks.cluster_identity_providers
+}
+
################################################################################
# CloudWatch Log Group
################################################################################
@@ -138,3 +156,12 @@ output "self_managed_node_groups" {
description = "Map of attribute maps for all self managed node groups created"
value = module.eks.self_managed_node_groups
}
+
+################################################################################
+# Additional
+################################################################################
+
+output "aws_auth_configmap_yaml" {
+ description = "Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles"
+ value = module.eks.aws_auth_configmap_yaml
+}
diff --git a/examples/secrets_encryption/README.md b/examples/secrets_encryption/README.md
index 2af9fd993c..133b4316c7 100644
--- a/examples/secrets_encryption/README.md
+++ b/examples/secrets_encryption/README.md
@@ -63,8 +63,10 @@ No inputs.
| Name | Description |
|------|-------------|
+| [aws\_auth\_configmap\_yaml](#output\_aws\_auth\_configmap\_yaml) | Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles |
| [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created |
| [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created |
+| [cluster\_addons](#output\_cluster\_addons) | Map of attribute maps for all EKS cluster addons enabled |
| [cluster\_arn](#output\_cluster\_arn) | The Amazon Resource Name (ARN) of the cluster |
| [cluster\_certificate\_authority\_data](#output\_cluster\_certificate\_authority\_data) | Base64 encoded certificate data required to communicate with the cluster |
| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for your Kubernetes API server |
@@ -72,6 +74,7 @@ No inputs.
| [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster |
| [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role |
| [cluster\_id](#output\_cluster\_id) | The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready |
+| [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled |
| [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider |
| [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster |
| [cluster\_primary\_security\_group\_id](#output\_cluster\_primary\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console |
diff --git a/examples/secrets_encryption/outputs.tf b/examples/secrets_encryption/outputs.tf
index 2fb59838e5..3e9620157b 100644
--- a/examples/secrets_encryption/outputs.tf
+++ b/examples/secrets_encryption/outputs.tf
@@ -98,6 +98,24 @@ output "cluster_iam_role_unique_id" {
value = module.eks.cluster_iam_role_unique_id
}
+################################################################################
+# EKS Addons
+################################################################################
+
+output "cluster_addons" {
+ description = "Map of attribute maps for all EKS cluster addons enabled"
+ value = module.eks.cluster_addons
+}
+
+################################################################################
+# EKS Identity Provider
+################################################################################
+
+output "cluster_identity_providers" {
+ description = "Map of attribute maps for all EKS identity providers enabled"
+ value = module.eks.cluster_identity_providers
+}
+
################################################################################
# CloudWatch Log Group
################################################################################
@@ -138,3 +156,12 @@ output "self_managed_node_groups" {
description = "Map of attribute maps for all self managed node groups created"
value = module.eks.self_managed_node_groups
}
+
+################################################################################
+# Additional
+################################################################################
+
+output "aws_auth_configmap_yaml" {
+ description = "Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles"
+ value = module.eks.aws_auth_configmap_yaml
+}
diff --git a/examples/self_managed_node_groups/README.md b/examples/self_managed_node_groups/README.md
index 68ffbed477..30056a9aee 100644
--- a/examples/self_managed_node_groups/README.md
+++ b/examples/self_managed_node_groups/README.md
@@ -57,8 +57,10 @@ No inputs.
| Name | Description |
|------|-------------|
+| [aws\_auth\_configmap\_yaml](#output\_aws\_auth\_configmap\_yaml) | Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles |
| [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created |
| [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created |
+| [cluster\_addons](#output\_cluster\_addons) | Map of attribute maps for all EKS cluster addons enabled |
| [cluster\_arn](#output\_cluster\_arn) | The Amazon Resource Name (ARN) of the cluster |
| [cluster\_certificate\_authority\_data](#output\_cluster\_certificate\_authority\_data) | Base64 encoded certificate data required to communicate with the cluster |
| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for your Kubernetes API server |
@@ -66,6 +68,7 @@ No inputs.
| [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster |
| [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role |
| [cluster\_id](#output\_cluster\_id) | The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready |
+| [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled |
| [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider |
| [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster |
| [cluster\_primary\_security\_group\_id](#output\_cluster\_primary\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console |
diff --git a/examples/self_managed_node_groups/outputs.tf b/examples/self_managed_node_groups/outputs.tf
index 2fb59838e5..3e9620157b 100644
--- a/examples/self_managed_node_groups/outputs.tf
+++ b/examples/self_managed_node_groups/outputs.tf
@@ -98,6 +98,24 @@ output "cluster_iam_role_unique_id" {
value = module.eks.cluster_iam_role_unique_id
}
+################################################################################
+# EKS Addons
+################################################################################
+
+output "cluster_addons" {
+ description = "Map of attribute maps for all EKS cluster addons enabled"
+ value = module.eks.cluster_addons
+}
+
+################################################################################
+# EKS Identity Provider
+################################################################################
+
+output "cluster_identity_providers" {
+ description = "Map of attribute maps for all EKS identity providers enabled"
+ value = module.eks.cluster_identity_providers
+}
+
################################################################################
# CloudWatch Log Group
################################################################################
@@ -138,3 +156,12 @@ output "self_managed_node_groups" {
description = "Map of attribute maps for all self managed node groups created"
value = module.eks.self_managed_node_groups
}
+
+################################################################################
+# Additional
+################################################################################
+
+output "aws_auth_configmap_yaml" {
+ description = "Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles"
+ value = module.eks.aws_auth_configmap_yaml
+}
diff --git a/main.tf b/main.tf
index 8c08cb6ebc..6b640218d2 100644
--- a/main.tf
+++ b/main.tf
@@ -142,6 +142,7 @@ resource "aws_security_group_rule" "cluster" {
################################################################################
# IRSA
+# Note - this is different from EKS identity provider
################################################################################
data "tls_certificate" "this" {
@@ -227,3 +228,44 @@ resource "aws_iam_role_policy_attachment" "this" {
policy_arn = each.value
role = aws_iam_role.this[0].name
}
+
+################################################################################
+# EKS Addons
+################################################################################
+
+resource "aws_eks_addon" "this" {
+ for_each = { for k, v in var.cluster_addons : k => v if var.create }
+
+ cluster_name = aws_eks_cluster.this[0].name
+ addon_name = try(each.value.name, each.key)
+
+ addon_version = lookup(each.value, "addon_version", null)
+ resolve_conflicts = lookup(each.value, "resolve_conflicts", null)
+ service_account_role_arn = lookup(each.value, "service_account_role_arn", null)
+
+ tags = var.tags
+}
+
+################################################################################
+# EKS Identity Provider
+# Note - this is different from IRSA
+################################################################################
+
+resource "aws_eks_identity_provider_config" "this" {
+ for_each = { for k, v in var.cluster_identity_providers : k => v if var.create }
+
+ cluster_name = aws_eks_cluster.this[0].name
+
+ oidc {
+ client_id = each.value.client_id
+ groups_claim = lookup(each.value, "groups_claim", null)
+ groups_prefix = lookup(each.value, "groups_prefix", null)
+ identity_provider_config_name = try(each.value.identity_provider_config_name, each.key)
+ issuer_url = each.value.issuer_url
+ required_claims = lookup(each.value, "required_claims", null)
+ username_claim = lookup(each.value, "username_claim", null)
+ username_prefix = lookup(each.value, "username_prefix", null)
+ }
+
+ tags = var.tags
+}
diff --git a/modules/self-managed-node-group/README.md b/modules/self-managed-node-group/README.md
index 3c9b0b6250..274acc3b4a 100644
--- a/modules/self-managed-node-group/README.md
+++ b/modules/self-managed-node-group/README.md
@@ -172,6 +172,7 @@ No modules.
| [launch\_template\_arn](#output\_launch\_template\_arn) | The ARN of the launch template |
| [launch\_template\_id](#output\_launch\_template\_id) | The ID of the launch template |
| [launch\_template\_latest\_version](#output\_launch\_template\_latest\_version) | The latest version of the launch template |
+| [platform](#output\_platform) | Identifies if the OS platform is `bottlerocket`, `linux`, or `windows` based |
| [security\_group\_arn](#output\_security\_group\_arn) | Amazon Resource Name (ARN) of the security group |
| [security\_group\_id](#output\_security\_group\_id) | ID of the security group |
diff --git a/modules/self-managed-node-group/main.tf b/modules/self-managed-node-group/main.tf
index 688e49607d..321efc233c 100644
--- a/modules/self-managed-node-group/main.tf
+++ b/modules/self-managed-node-group/main.tf
@@ -20,6 +20,17 @@ locals {
platform = {
bottlerocket = {
content_type = "application/toml"
+ user_data = var.platform == "bottlerocket" ? base64encode(templatefile("${path.module}/../../templates/${var.platform}_user_data.tpl",
+ {
+ ami_id = var.ami_id
+ # Required to bootstrap node
+ cluster_name = var.cluster_name
+ cluster_endpoint = var.cluster_endpoint
+ cluster_auth_base64 = var.cluster_auth_base64
+ # Optional
+ bootstrap_extra_args = var.bootstrap_extra_args
+ }
+ )) : ""
}
linux = {
content_type = "text/x-shellscript"
@@ -31,7 +42,7 @@ locals {
}
data "cloudinit_config" "eks_optimized_ami_user_data" {
- count = var.create && var.enable_bootstrap_user_data ? 1 : 0
+ count = var.create && var.enable_bootstrap_user_data && var.platform != "bottlerocket" ? 1 : 0
gzip = false
boundary = "//"
@@ -44,41 +55,20 @@ data "cloudinit_config" "eks_optimized_ami_user_data" {
}
}
- dynamic "part" {
- for_each = var.platform != "bottlerocket" ? [1] : []
- content {
- content_type = local.platform[var.platform].content_type
- content = templatefile("${path.module}/../../templates/${var.platform}_user_data.tpl",
- {
- ami_id = var.ami_id
- # Required to bootstrap node
- cluster_name = var.cluster_name
- cluster_endpoint = var.cluster_endpoint
- cluster_auth_base64 = var.cluster_auth_base64
- # Optional
- bootstrap_extra_args = var.bootstrap_extra_args
- post_bootstrap_user_data = var.post_bootstrap_user_data
- }
- )
- }
- }
-
- dynamic "part" {
- for_each = var.platform == "bottlerocket" ? [1] : []
- content {
- content_type = local.platform[var.platform].content_type
- content = templatefile("${path.module}/../../templates/${var.platform}_user_data.tpl",
- {
- ami_id = var.ami_id
- # Required to bootstrap node
- cluster_name = var.cluster_name
- cluster_endpoint = var.cluster_endpoint
- cluster_auth_base64 = var.cluster_auth_base64
- # Optional
- bootstrap_extra_args = var.bootstrap_extra_args
- }
- )
- }
+ part {
+ content_type = local.platform[var.platform].content_type
+ content = templatefile("${path.module}/../../templates/${var.platform}_user_data.tpl",
+ {
+ ami_id = "JustNeedsToBeSomethingToEnsureUserDataIsPopulated"
+ # Required to bootstrap node
+ cluster_name = var.cluster_name
+ cluster_endpoint = var.cluster_endpoint
+ cluster_auth_base64 = var.cluster_auth_base64
+ # Optional
+ bootstrap_extra_args = var.bootstrap_extra_args
+ post_bootstrap_user_data = var.post_bootstrap_user_data
+ }
+ )
}
}
@@ -101,7 +91,7 @@ resource "aws_launch_template" "this" {
image_id = coalesce(var.ami_id, data.aws_ami.eks_default[0].image_id)
instance_type = var.instance_type
key_name = var.key_name
- user_data = try(data.cloudinit_config.eks_optimized_ami_user_data[0].rendered, var.custom_user_data)
+ user_data = var.platform == "bottlerocket" ? local.platform.bottlerocket.user_data : try(data.cloudinit_config.eks_optimized_ami_user_data[0].rendered, var.custom_user_data)
vpc_security_group_ids = compact(concat([try(aws_security_group.this[0].id, "")], var.vpc_security_group_ids))
@@ -308,9 +298,13 @@ resource "aws_autoscaling_group" "this" {
name = var.use_name_prefix ? null : var.name
name_prefix = var.use_name_prefix ? "${var.name}-" : null
- launch_template {
- name = local.launch_template_name
- version = local.launch_template_version
+ dynamic "launch_template" {
+ for_each = var.use_mixed_instances_policy ? [] : [1]
+
+ content {
+ name = local.launch_template_name
+ version = local.launch_template_version
+ }
}
availability_zones = var.availability_zones
@@ -375,7 +369,7 @@ resource "aws_autoscaling_group" "this" {
for_each = var.use_mixed_instances_policy ? [var.mixed_instances_policy] : []
content {
dynamic "instances_distribution" {
- for_each = lookup(mixed_instances_policy.value, "instances_distribution", null) != null ? [mixed_instances_policy.value.instances_distribution] : []
+ for_each = try([mixed_instances_policy.value.instances_distribution], [])
content {
on_demand_allocation_strategy = lookup(instances_distribution.value, "on_demand_allocation_strategy", null)
on_demand_base_capacity = lookup(instances_distribution.value, "on_demand_base_capacity", null)
@@ -393,7 +387,7 @@ resource "aws_autoscaling_group" "this" {
}
dynamic "override" {
- for_each = lookup(mixed_instances_policy.value, "override", null) != null ? mixed_instances_policy.value.override : []
+ for_each = try(mixed_instances_policy.value.override, [])
content {
instance_type = lookup(override.value, "instance_type", null)
weighted_capacity = lookup(override.value, "weighted_capacity", null)
diff --git a/modules/self-managed-node-group/outputs.tf b/modules/self-managed-node-group/outputs.tf
index 2187eb9661..983c92a80c 100644
--- a/modules/self-managed-node-group/outputs.tf
+++ b/modules/self-managed-node-group/outputs.tf
@@ -136,3 +136,12 @@ output "iam_instance_profile_unique" {
description = "Stable and unique string identifying the IAM instance profile"
value = try(aws_iam_instance_profile.this[0].unique_id, "")
}
+
+################################################################################
+# Additional
+################################################################################
+
+output "platform" {
+ description = "Identifies if the OS platform is `bottlerocket`, `linux`, or `windows` based"
+ value = var.platform
+}
diff --git a/node_groups.tf b/node_groups.tf
index a622684531..98107324df 100644
--- a/node_groups.tf
+++ b/node_groups.tf
@@ -334,7 +334,7 @@ module "self_managed_node_group" {
bootstrap_extra_args = try(each.value.bootstrap_extra_args, var.self_managed_node_group_defaults.bootstrap_extra_args, "")
# Launch Template
- create_launch_template = try(each.value.create_launch_template, var.self_managed_node_group_defaults.create_launch_template, false)
+ create_launch_template = try(each.value.create_launch_template, var.self_managed_node_group_defaults.create_launch_template, true)
description = try(each.value.description, var.self_managed_node_group_defaults.description, "Custom launch template for ${try(each.value.name, each.key)} self managed node group")
ebs_optimized = try(each.value.ebs_optimized, var.self_managed_node_group_defaults.ebs_optimized, null)
diff --git a/outputs.tf b/outputs.tf
index cb4e45bb15..72ba73d676 100644
--- a/outputs.tf
+++ b/outputs.tf
@@ -98,6 +98,24 @@ output "cluster_iam_role_unique_id" {
value = try(aws_iam_role.this[0].unique_id, "")
}
+################################################################################
+# EKS Addons
+################################################################################
+
+output "cluster_addons" {
+ description = "Map of attribute maps for all EKS cluster addons enabled"
+ value = aws_eks_addon.this
+}
+
+################################################################################
+# EKS Identity Provider
+################################################################################
+
+output "cluster_identity_providers" {
+ description = "Map of attribute maps for all EKS identity providers enabled"
+ value = aws_eks_identity_provider_config.this
+}
+
################################################################################
# CloudWatch Log Group
################################################################################
@@ -138,3 +156,19 @@ output "self_managed_node_groups" {
description = "Map of attribute maps for all self managed node groups created"
value = module.self_managed_node_group
}
+
+################################################################################
+# Additional
+################################################################################
+
+output "aws_auth_configmap_yaml" {
+ description = "Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles"
+ value = templatefile("${path.module}/templates/aws_auth_cm.tpl",
+ {
+ eks_managed_role_arns = [for group in module.eks_managed_node_group : group.iam_role_arn]
+ self_managed_role_arns = [for group in module.self_managed_node_group : group.iam_role_arn if group.platform != "windows"]
+ win32_self_managed_role_arns = [for group in module.self_managed_node_group : group.iam_role_arn if group.platform == "windows"]
+ fargate_profile_arns = [for group in module.fargate_profile : group.fargate_profile_arn]
+ }
+ )
+}
diff --git a/templates/aws_auth_cm.tpl b/templates/aws_auth_cm.tpl
new file mode 100644
index 0000000000..abf2102a54
--- /dev/null
+++ b/templates/aws_auth_cm.tpl
@@ -0,0 +1,37 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: aws-auth
+ namespace: kube-system
+data:
+ mapRoles: |
+%{ for role in eks_managed_role_arns ~}
+ - rolearn: ${role}
+ username: system:node:{{EC2PrivateDNSName}}
+ groups:
+ - system:bootstrappers
+ - system:nodes
+%{ endfor ~}
+%{ for role in self_managed_role_arns ~}
+ - rolearn: ${role}
+ username: system:node:{{EC2PrivateDNSName}}
+ groups:
+ - system:bootstrappers
+ - system:nodes
+%{ endfor ~}
+%{ for role in win32_self_managed_role_arns ~}
+ - rolearn: ${role}
+ username: system:node:{{EC2PrivateDNSName}}
+ groups:
+ - eks:kube-proxy-windows
+ - system:bootstrappers
+ - system:nodes
+%{ endfor ~}
+%{ for role in fargate_profile_arns ~}
+ - rolearn: ${role}
+ username: system:node:{{SessionName}}
+ groups:
+ - system:bootstrappers
+ - system:nodes
+ - system:node-proxier
+%{ endfor ~}
diff --git a/templates/linux_user_data.tpl b/templates/linux_user_data.tpl
index e8ad408395..78f40d354d 100644
--- a/templates/linux_user_data.tpl
+++ b/templates/linux_user_data.tpl
@@ -1,7 +1,8 @@
-#!/bin/bash -e
+#!/bin/bash
+set -e
%{ if length(ami_id) > 0 ~}
-/etc/eks/bootstrap.sh ${cluster_name} ${bootstrap_extra_args} \
- --apiserver-endpoint ${cluster_endpoint} \
- --b64-cluster-ca ${cluster_auth_base64}
+B64_CLUSTER_CA=${cluster_auth_base64}
+API_SERVER_URL=${cluster_endpoint}
+/etc/eks/bootstrap.sh ${cluster_name} ${bootstrap_extra_args} --b64-cluster-ca $B64_CLUSTER_CA --apiserver-endpoint $API_SERVER_URL
%{ endif ~}
${post_bootstrap_user_data}
diff --git a/variables.tf b/variables.tf
index 53b0b44478..13101c7af6 100644
--- a/variables.tf
+++ b/variables.tf
@@ -265,6 +265,26 @@ variable "iam_role_tags" {
default = {}
}
+################################################################################
+# EKS Addons
+################################################################################
+
+variable "cluster_addons" {
+ description = "Map of cluster addon configurations to enable for the cluster. Addon name can be the map keys or set with `name`"
+ type = any
+ default = {}
+}
+
+################################################################################
+# EKS Identity Provider
+################################################################################
+
+variable "cluster_identity_providers" {
+ description = "Map of cluster identity provider configurations to enable for the cluster. Note - this is different from IRSA"
+ type = any
+ default = {}
+}
+
################################################################################
# Fargate
################################################################################
From 7f65baf050165e43b5bb9657e1664c3e02ba3af9 Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Wed, 1 Dec 2021 17:06:27 -0500
Subject: [PATCH 44/83] chore: re-testing by running from scratch
---
examples/bottlerocket/main.tf | 187 ------------
examples/complete/README.md | 3 +-
examples/complete/main.tf | 19 +-
examples/eks_managed_node_group/README.md | 5 +-
examples/eks_managed_node_group/main.tf | 274 ++++++++++--------
examples/eks_managed_node_group/versions.tf | 6 +-
examples/fargate/README.md | 10 +-
examples/fargate/main.tf | 71 ++---
examples/secrets_encryption/main.tf | 98 -------
examples/secrets_encryption/outputs.tf | 167 -----------
.../README.md | 15 +-
.../main.tf | 118 +++++++-
.../outputs.tf | 0
.../variables.tf | 0
.../versions.tf | 0
examples/self_managed_node_groups/README.md | 84 ------
examples/self_managed_node_groups/outputs.tf | 167 -----------
.../self_managed_node_groups/variables.tf | 0
examples/user_data/README.md | 58 ++++
examples/user_data/main.tf | 110 +++++++
examples/user_data/outputs.tf | 41 +++
.../templates/bottlerocket_custom.tpl | 7 +
examples/user_data/templates/linux_custom.tpl | 8 +
.../user_data/templates/windows_custom.tpl | 9 +
.../variables.tf | 0
.../versions.tf | 0
modules/_user_data/README.md | 60 ++++
modules/_user_data/main.tf | 75 +++++
modules/_user_data/outputs.tf | 4 +
modules/_user_data/variables.tf | 65 +++++
modules/_user_data/versions.tf | 10 +
modules/self-managed-node-group/README.md | 2 +-
modules/self-managed-node-group/variables.tf | 4 +-
node_groups.tf | 2 +-
templates/bottlerocket_user_data.tpl | 5 +-
templates/linux_user_data.tpl | 7 +-
templates/windows_user_data.tpl | 2 -
37 files changed, 803 insertions(+), 890 deletions(-)
delete mode 100644 examples/bottlerocket/main.tf
delete mode 100644 examples/secrets_encryption/main.tf
delete mode 100644 examples/secrets_encryption/outputs.tf
rename examples/{bottlerocket => self_managed_node_group}/README.md (88%)
rename examples/{self_managed_node_groups => self_managed_node_group}/main.tf (71%)
rename examples/{bottlerocket => self_managed_node_group}/outputs.tf (100%)
rename examples/{bottlerocket => self_managed_node_group}/variables.tf (100%)
rename examples/{bottlerocket => self_managed_node_group}/versions.tf (100%)
delete mode 100644 examples/self_managed_node_groups/README.md
delete mode 100644 examples/self_managed_node_groups/outputs.tf
delete mode 100644 examples/self_managed_node_groups/variables.tf
create mode 100644 examples/user_data/README.md
create mode 100644 examples/user_data/main.tf
create mode 100644 examples/user_data/outputs.tf
create mode 100644 examples/user_data/templates/bottlerocket_custom.tpl
create mode 100644 examples/user_data/templates/linux_custom.tpl
create mode 100644 examples/user_data/templates/windows_custom.tpl
rename examples/{secrets_encryption => user_data}/variables.tf (100%)
rename examples/{self_managed_node_groups => user_data}/versions.tf (100%)
create mode 100644 modules/_user_data/README.md
create mode 100644 modules/_user_data/main.tf
create mode 100644 modules/_user_data/outputs.tf
create mode 100644 modules/_user_data/variables.tf
create mode 100644 modules/_user_data/versions.tf
diff --git a/examples/bottlerocket/main.tf b/examples/bottlerocket/main.tf
deleted file mode 100644
index 4e6f00001a..0000000000
--- a/examples/bottlerocket/main.tf
+++ /dev/null
@@ -1,187 +0,0 @@
-provider "aws" {
- region = local.region
-}
-
-locals {
- name = "ex-${replace(basename(path.cwd), "_", "-")}"
- cluster_version = "1.21"
- region = "eu-west-1"
-
- tags = {
- Example = local.name
- GithubRepo = "terraform-aws-eks"
- GithubOrg = "terraform-aws-modules"
- }
-}
-
-################################################################################
-# EKS Module
-################################################################################
-
-module "eks" {
- source = "../.."
-
- cluster_name = local.name
- cluster_version = local.cluster_version
-
- vpc_id = module.vpc.vpc_id
- subnet_ids = module.vpc.private_subnets
-
- cluster_endpoint_private_access = true
- cluster_endpoint_public_access = true
-
- cluster_addons = {
- coredns = {
- resolve_conflicts = "OVERWRITE"
- }
- kube-proxy = {}
- vpc-cni = {
- resolve_conflicts = "OVERWRITE"
- }
- }
-
- # Self Managed Node Group(s)
- self_managed_node_group_defaults = {
- update_default_version = true
- }
-
- self_managed_node_groups = {
- two = {
- name = "bottlerocket-nodes"
-
- platform = "bottlerocket"
- ami_id = data.aws_ami.bottlerocket_ami.id
- instance_type = "m5.large"
- desired_size = 2
- key_name = aws_key_pair.this.key_name
-
- iam_role_additional_policies = ["arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"]
-
- bootstrap_extra_args = <<-EOT
- # The admin host container provides SSH access and runs with "superpowers".
- # It is disabled by default, but can be disabled explicitly.
- [settings.host-containers.admin]
- enabled = false
-
- # The control host container provides out-of-band access via SSM.
- # It is enabled by default, and can be disabled if you do not expect to use SSM.
- # This could leave you with no way to access the API and change settings on an existing node!
- [settings.host-containers.control]
- enabled = true
-
- [settings.kubernetes.node-labels]
- ingress = "allowed"
- EOT
- }
- }
-
- tags = local.tags
-}
-
-################################################################################
-# aws-auth configmap
-# Only EKS managed node groups automatically add roles to aws-auth configmap
-# so we need to ensure fargate profiles and self-managed node roles are added
-################################################################################
-
-data "aws_eks_cluster_auth" "this" {
- name = module.eks.cluster_id
-}
-
-locals {
- kubeconfig = yamlencode({
- apiVersion = "v1"
- kind = "Config"
- current-context = "terraform"
- clusters = [{
- name = "${module.eks.cluster_id}"
- cluster = {
- certificate-authority-data = "${module.eks.cluster_certificate_authority_data}"
- server = "${module.eks.cluster_endpoint}"
- }
- }]
- contexts = [{
- name = "terraform"
- context = {
- cluster = "${module.eks.cluster_id}"
- user = "terraform"
- }
- }]
- users = [{
- name = "terraform"
- user = {
- token = "${data.aws_eks_cluster_auth.this.token}"
- }
- }]
- })
-}
-
-resource "null_resource" "patch_cni" {
- triggers = {
- kubeconfig = base64encode(local.kubeconfig)
- cmd_patch = "kubectl patch configmap/aws-auth -n kube-system --patch \"${module.eks.aws_auth_configmap_yaml}\" -n kube-system --kubeconfig <(echo $KUBECONFIG | base64 --decode)"
- }
-
- provisioner "local-exec" {
- interpreter = ["/bin/bash", "-c"]
- environment = {
- KUBECONFIG = self.triggers.kubeconfig
- }
- command = self.triggers.cmd_patch
- }
-}
-
-################################################################################
-# Supporting Resources
-################################################################################
-
-data "aws_ami" "bottlerocket_ami" {
- most_recent = true
- owners = ["amazon"]
-
- filter {
- name = "name"
- values = ["bottlerocket-aws-k8s-${local.cluster_version}-x86_64-*"]
- }
-}
-
-resource "tls_private_key" "this" {
- algorithm = "RSA"
-}
-
-resource "aws_key_pair" "this" {
- key_name = local.name
- public_key = tls_private_key.this.public_key_openssh
-}
-
-module "vpc" {
- source = "terraform-aws-modules/vpc/aws"
- version = "~> 3.0"
-
- name = local.name
- cidr = "10.0.0.0/16"
-
- azs = ["${local.region}a", "${local.region}b", "${local.region}c"]
- private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
- public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
-
- enable_nat_gateway = true
- single_nat_gateway = true
- enable_dns_hostnames = true
-
- enable_flow_log = true
- create_flow_log_cloudwatch_iam_role = true
- create_flow_log_cloudwatch_log_group = true
-
- public_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/elb" = 1
- }
-
- private_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/internal-elb" = 1
- }
-
- tags = local.tags
-}
diff --git a/examples/complete/README.md b/examples/complete/README.md
index 35f561faed..10937ebdd3 100644
--- a/examples/complete/README.md
+++ b/examples/complete/README.md
@@ -73,8 +73,9 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Type |
|------|------|
+| [aws_kms_key.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource |
| [aws_security_group.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
-| [null_resource.patch_cni](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
+| [null_resource.patch](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
| [aws_eks_cluster_auth.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
## Inputs
diff --git a/examples/complete/main.tf b/examples/complete/main.tf
index 494191202b..6817b3868a 100644
--- a/examples/complete/main.tf
+++ b/examples/complete/main.tf
@@ -36,6 +36,13 @@ module "eks" {
}
}
+ cluster_encryption_config = [
+ {
+ provider_key_arn = aws_kms_key.eks.arn
+ resources = ["secrets"]
+ }
+ ]
+
vpc_id = module.vpc.vpc_id
subnet_ids = module.vpc.private_subnets
@@ -220,10 +227,10 @@ locals {
})
}
-resource "null_resource" "patch_cni" {
+resource "null_resource" "patch" {
triggers = {
kubeconfig = base64encode(local.kubeconfig)
- cmd_patch = "kubectl patch configmap/aws-auth -n kube-system --patch \"${module.eks.aws_auth_configmap_yaml}\" -n kube-system --kubeconfig <(echo $KUBECONFIG | base64 --decode)"
+ cmd_patch = "kubectl patch configmap/aws-auth --patch \"${module.eks.aws_auth_configmap_yaml}\" -n kube-system --kubeconfig <(echo $KUBECONFIG | base64 --decode)"
}
provisioner "local-exec" {
@@ -288,3 +295,11 @@ resource "aws_security_group" "additional" {
tags = local.tags
}
+
+resource "aws_kms_key" "eks" {
+ description = "EKS Secret Encryption Key"
+ deletion_window_in_days = 7
+ enable_key_rotation = true
+
+ tags = local.tags
+}
diff --git a/examples/eks_managed_node_group/README.md b/examples/eks_managed_node_group/README.md
index ddd7162b2f..e89c61f8bb 100644
--- a/examples/eks_managed_node_group/README.md
+++ b/examples/eks_managed_node_group/README.md
@@ -26,13 +26,14 @@ Note that this example may create resources which cost money. Run `terraform des
|------|---------|
| [terraform](#requirement\_terraform) | >= 0.13.1 |
| [aws](#requirement\_aws) | >= 3.64 |
-| [cloudinit](#requirement\_cloudinit) | >= 2.0 |
+| [null](#requirement\_null) | >= 3.0 |
## Providers
| Name | Version |
|------|---------|
| [aws](#provider\_aws) | >= 3.64 |
+| [null](#provider\_null) | >= 3.0 |
## Modules
@@ -48,7 +49,9 @@ Note that this example may create resources which cost money. Run `terraform des
| [aws_kms_key.ebs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource |
| [aws_launch_template.external](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource |
| [aws_security_group.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
+| [null_resource.patch](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source |
+| [aws_eks_cluster_auth.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
| [aws_iam_policy_document.ebs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
## Inputs
diff --git a/examples/eks_managed_node_group/main.tf b/examples/eks_managed_node_group/main.tf
index 137e74fcb7..3464532456 100644
--- a/examples/eks_managed_node_group/main.tf
+++ b/examples/eks_managed_node_group/main.tf
@@ -34,86 +34,80 @@ module "eks" {
ami_type = "AL2_x86_64"
disk_size = 50
instance_types = ["m6i.large", "m5.large", "m5n.large", "m5zn.large"]
- # fail deploy early helps with development
- timeouts = {
- create = "8m"
- update = "40m"
- delete = "12m"
- }
}
eks_managed_node_groups = {
- # # Default node group - as provided by AWS EKS
- # default_node_group = {}
-
- # # Default node group - as provided by AWS EKS using Bottlerocket
- # bottlerocket_default = {
- # ami_type = "BOTTLEROCKET_x86_64"
- # }
-
- # # Adds to the AWS provided user data
- # bottlerocket_add = {
- # ami_type = "BOTTLEROCKET_x86_64"
-
- # create_launch_template = true
- # launch_template_name = "bottlerocket-custom"
- # update_default_version = true
-
- # # this will get added to what AWS provides
- # bootstrap_extra_args = <<-EOT
- # [settings.kernel]
- # lockdown = "integrity"
- # EOT
- # }
-
- # # Custom AMI, using module provided bootstrap data
- # bottlerocket_custom = {
- # # Current bottlerocket AMI
- # ami_id = "ami-0ff61e0bcfc81dc94"
- # platform = "bottlerocket"
-
- # create_launch_template = true
- # launch_template_name = "bottlerocket-custom"
- # update_default_version = true
-
- # bootstrap_extra_args = <<-EOT
- # [settings.kernel]
- # lockdown = "integrity"
-
- # [settings.kubernetes.node-labels]
- # "label1" = "foo"
- # "label2" = "bar"
-
- # [settings.kubernetes.node-taints]
- # "dedicated" = "experimental:PreferNoSchedule"
- # "special" = "true:NoSchedule"
- # EOT
- # }
-
- # # Use existing/external launch template
- # external_lt = {
- # create_launch_template = false
- # launch_template_name = aws_launch_template.external.name
- # launch_template_version = aws_launch_template.external.default_version
- # }
-
- # # Use a custom AMI
- # custom_ami = {
- # create_launch_template = true
- # launch_template_name = "custom-ami"
-
- # # Current default AMI used by managed node groups - pseudo "custom"
- # ami_id = "ami-0caf35bc73450c396"
- # # This will ensure the boostrap user data is used to join the node
- # # By default, EKS managed node groups will not append bootstrap script;
- # # this adds it back in if its an EKS optmized AMI derivative
- # ami_is_eks_optimized = true
- # }
+ # Default node group - as provided by AWS EKS
+ default_node_group = {}
+
+ # Default node group - as provided by AWS EKS using Bottlerocket
+ bottlerocket_default = {
+ ami_type = "BOTTLEROCKET_x86_64"
+ }
+
+ # Adds to the AWS provided user data
+ bottlerocket_add = {
+ ami_type = "BOTTLEROCKET_x86_64"
+
+ create_launch_template = true
+ launch_template_name = "bottlerocket-custom"
+ update_default_version = true
+
+ # this will get added to what AWS provides
+ bootstrap_extra_args = <<-EOT
+ [settings.kernel]
+ lockdown = "integrity"
+ EOT
+ }
+
+ # Custom AMI, using module provided bootstrap data
+ bottlerocket_custom = {
+ # Current bottlerocket AMI
+ ami_id = "ami-0ff61e0bcfc81dc94"
+ platform = "bottlerocket"
+
+ create_launch_template = true
+ launch_template_name = "bottlerocket-custom"
+ update_default_version = true
+
+ bootstrap_extra_args = <<-EOT
+ [settings.kernel]
+ lockdown = "integrity"
+
+ [settings.kubernetes.node-labels]
+ "label1" = "foo"
+ "label2" = "bar"
+
+ [settings.kubernetes.node-taints]
+ "dedicated" = "experimental:PreferNoSchedule"
+ "special" = "true:NoSchedule"
+ EOT
+ }
+
+ # Use existing/external launch template
+ external_lt = {
+ create_launch_template = false
+ launch_template_name = aws_launch_template.external.name
+ launch_template_version = aws_launch_template.external.default_version
+ }
+
+ # Use a custom AMI
+ custom_ami = {
+ create_launch_template = true
+ launch_template_name = "custom-ami"
+
+ # Current default AMI used by managed node groups - pseudo "custom"
+ ami_id = "ami-0caf35bc73450c396"
+ # This will ensure the boostrap user data is used to join the node
+ # By default, EKS managed node groups will not append bootstrap script;
+ # this adds it back in if its an EKS optmized AMI derivative
+ ami_is_eks_optimized = true
+ }
# Complete
complete = {
name = "complete-eks-mng"
- use_name_prefix = true
+ use_name_prefix = false
subnet_ids = module.vpc.private_subnets
@@ -144,13 +138,13 @@ module "eks" {
GithubOrg = "terraform-aws-modules"
}
- # taints = [
- # {
- # key = "dedicated"
- # value = "gpuGroup"
- # effect = "NO_SCHEDULE"
- # }
- # ]
+ taints = [
+ {
+ key = "dedicated"
+ value = "gpuGroup"
+ effect = "NO_SCHEDULE"
+ }
+ ]
# TODO - this is throwing an error
# update_config = {
@@ -168,20 +162,20 @@ module "eks" {
disable_api_termination = false
enable_monitoring = true
- # block_device_mappings = {
- # xvda = {
- # device_name = "/dev/xvda"
- # ebs = {
- # volume_size = 75
- # volume_type = "gp3"
- # iops = 3000
- # throughput = 150
- # # encrypted = true
- # # kms_key_id = aws_kms_key.ebs.arn
- # delete_on_termination = true
- # }
- # }
- # }
+ block_device_mappings = {
+ xvda = {
+ device_name = "/dev/xvda"
+ ebs = {
+ volume_size = 75
+ volume_type = "gp3"
+ iops = 3000
+ throughput = 150
+ encrypted = true
+ kms_key_id = aws_kms_key.ebs.arn
+ delete_on_termination = true
+ }
+ }
+ }
metadata_options = {
http_endpoint = "enabled"
@@ -189,19 +183,16 @@ module "eks" {
http_put_response_hop_limit = 2
}
- # TODO - something commented is not letting node to come up successfully
- #
- # create_iam_role = true
- # iam_role_name = "eks-managed-node-group-complete-example"
- # iam_role_use_name_prefix = false
- # iam_role_path = "/eks/"
- # iam_role_description = "EKS managed node group complete example role"
- # iam_role_tags = {
- # Purpose = "Protector of the kubelet"
- # }
- # iam_role_additional_policies = [
- # "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
- # ]
+ create_iam_role = true
+ iam_role_name = "eks-managed-node-group-complete-example"
+ iam_role_use_name_prefix = false
+ iam_role_description = "EKS managed node group complete example role"
+ iam_role_tags = {
+ Purpose = "Protector of the kubelet"
+ }
+ iam_role_additional_policies = [
+ "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
+ ]
create_security_group = true
security_group_name = "eks-managed-node-group-complete-example"
@@ -230,21 +221,68 @@ module "eks" {
Purpose = "Protector of the kubelet"
}
- # timeouts = {
- # create = "8m"
- # update = "8m"
- # delete = "8m"
- # }
-
- # tags = {
- # ExtraTag = "EKS managed node group complete example"
- # }
+ tags = {
+ ExtraTag = "EKS managed node group complete example"
+ }
}
}
tags = local.tags
}
+################################################################################
+# aws-auth configmap
+# Only EKS managed node groups automatically add roles to aws-auth configmap
+# so we need to ensure fargate profiles and self-managed node roles are added
+################################################################################
+
+data "aws_eks_cluster_auth" "this" {
+ name = module.eks.cluster_id
+}
+
+locals {
+ kubeconfig = yamlencode({
+ apiVersion = "v1"
+ kind = "Config"
+ current-context = "terraform"
+ clusters = [{
+ name = "${module.eks.cluster_id}"
+ cluster = {
+ certificate-authority-data = "${module.eks.cluster_certificate_authority_data}"
+ server = "${module.eks.cluster_endpoint}"
+ }
+ }]
+ contexts = [{
+ name = "terraform"
+ context = {
+ cluster = "${module.eks.cluster_id}"
+ user = "terraform"
+ }
+ }]
+ users = [{
+ name = "terraform"
+ user = {
+ token = "${data.aws_eks_cluster_auth.this.token}"
+ }
+ }]
+ })
+}
+
+resource "null_resource" "patch" {
+ triggers = {
+ kubeconfig = base64encode(local.kubeconfig)
+ cmd_patch = "kubectl patch configmap/aws-auth --patch \"${module.eks.aws_auth_configmap_yaml}\" -n kube-system --kubeconfig <(echo $KUBECONFIG | base64 --decode)"
+ }
+
+ provisioner "local-exec" {
+ interpreter = ["/bin/bash", "-c"]
+ environment = {
+ KUBECONFIG = self.triggers.kubeconfig
+ }
+ command = self.triggers.cmd_patch
+ }
+}
+
################################################################################
# Supporting Resources
################################################################################
@@ -402,7 +440,6 @@ resource "aws_launch_template" "external" {
# (optionally you can use https://registry.terraform.io/providers/hashicorp/cloudinit/latest/docs/data-sources/cloudinit_config to render the script, example: https://github.com/terraform-aws-modules/terraform-aws-eks/pull/997#issuecomment-705286151)
# user_data = base64encode(data.template_file.launch_template_userdata.rendered)
- # Supplying custom tags to EKS instances is another use-case for LaunchTemplates
tag_specifications {
resource_type = "instance"
@@ -411,7 +448,6 @@ resource "aws_launch_template" "external" {
}
}
- # Supplying custom tags to EKS instances root volumes is another use-case for LaunchTemplates. (doesnt add tags to dynamically provisioned volumes via PVC)
tag_specifications {
resource_type = "volume"
@@ -420,7 +456,6 @@ resource "aws_launch_template" "external" {
}
}
- # Supplying custom tags to EKS instances ENI's is another use-case for LaunchTemplates
tag_specifications {
resource_type = "network-interface"
@@ -429,7 +464,6 @@ resource "aws_launch_template" "external" {
}
}
- # Tag the LT itself
tags = {
CustomTag = "Launch template custom tag"
}
diff --git a/examples/eks_managed_node_group/versions.tf b/examples/eks_managed_node_group/versions.tf
index 48492037e2..adfd0180d4 100644
--- a/examples/eks_managed_node_group/versions.tf
+++ b/examples/eks_managed_node_group/versions.tf
@@ -6,9 +6,9 @@ terraform {
source = "hashicorp/aws"
version = ">= 3.64"
}
- cloudinit = {
- source = "hashicorp/cloudinit"
- version = ">= 2.0"
+ null = {
+ source = "hashicorp/null"
+ version = ">= 3.0"
}
}
}
diff --git a/examples/fargate/README.md b/examples/fargate/README.md
index 6eaff1432d..388ec18833 100644
--- a/examples/fargate/README.md
+++ b/examples/fargate/README.md
@@ -28,6 +28,7 @@ Note that this example may create resources which cost money. Run `terraform des
## Providers
+<<<<<<< HEAD
No providers.
=======
| [aws](#requirement\_aws) | >= 3.56 |
@@ -43,6 +44,11 @@ No providers.
| [aws](#provider\_aws) | >= 3.56 |
| [random](#provider\_random) | >= 2.1 |
>>>>>>> b876ff9 (fix: update CI/CD process to enable auto-release workflow (#1698))
+=======
+| Name | Version |
+|------|---------|
+| [aws](#provider\_aws) | >= 3.64 |
+>>>>>>> 43280e5 (refactor: splitting out user data to internal module for better testing/validation)
## Modules
@@ -57,7 +63,9 @@ No providers.
## Resources
-No resources.
+| Name | Type |
+|------|------|
+| [aws_kms_key.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource |
## Inputs
diff --git a/examples/fargate/main.tf b/examples/fargate/main.tf
index ae799b85fe..b490788d6e 100644
--- a/examples/fargate/main.tf
+++ b/examples/fargate/main.tf
@@ -21,14 +21,32 @@ locals {
module "eks" {
source = "../.."
- cluster_name = local.name
- cluster_version = local.cluster_version
+ cluster_name = local.name
+ cluster_version = local.cluster_version
+ cluster_endpoint_private_access = true
+ cluster_endpoint_public_access = true
+
+ cluster_addons = {
+ coredns = {
+ resolve_conflicts = "OVERWRITE"
+ }
+ kube-proxy = {}
+ vpc-cni = {
+ resolve_conflicts = "OVERWRITE"
+ }
+ }
+
+ cluster_encryption_config = [
+ {
+ provider_key_arn = aws_kms_key.eks.arn
+ resources = ["secrets"]
+ }
+ ]
vpc_id = module.vpc.vpc_id
subnet_ids = module.vpc.private_subnets
- cluster_endpoint_private_access = true
- cluster_endpoint_public_access = true
+ enable_irsa = true
# You require a node group to schedule coredns which is critical for running correctly internal DNS.
# If you want to use only fargate you must follow docs `(Optional) Update CoreDNS`
@@ -46,10 +64,6 @@ module "eks" {
tags = {
ExtraTag = "example"
}
- # TODO - this is throwing an error
- # update_config = {
- # max_unavailable_percentage = 50 # or set `max_unavailable`
- # }
}
}
@@ -106,39 +120,6 @@ module "eks" {
tags = local.tags
}
-##############################################
-# Calling submodule with existing EKS cluster
-##############################################
-
-# module "fargate_profile_existing_cluster" {
-# source = "../../modules/fargate-profile"
-
-# cluster_name = module.eks.cluster_id
-# subnet_ids = [module.vpc.private_subnets[0], module.vpc.private_subnets[2]]
-
-
-# fargate_profile_name = "profile1"
-# selectors = [
-# {
-# namespace = "kube-system"
-# labels = {
-# k8s-app = "kube-dns"
-# }
-# },
-# {
-# namespace = "profile"
-# labels = {
-# WorkerType = "fargate"
-# }
-# }
-# ]
-
-# tags = merge(local.tags, {
-# Owner = "profile1"
-# submodule = "true"
-# })
-# }
-
################################################################################
# Supporting Resources
################################################################################
@@ -174,3 +155,11 @@ module "vpc" {
tags = local.tags
}
+
+resource "aws_kms_key" "eks" {
+ description = "EKS Secret Encryption Key"
+ deletion_window_in_days = 7
+ enable_key_rotation = true
+
+ tags = local.tags
+}
diff --git a/examples/secrets_encryption/main.tf b/examples/secrets_encryption/main.tf
deleted file mode 100644
index e57a848a34..0000000000
--- a/examples/secrets_encryption/main.tf
+++ /dev/null
@@ -1,98 +0,0 @@
-provider "aws" {
- region = local.region
-}
-
-locals {
- name = "ex-${replace(basename(path.cwd), "_", "-")}"
- cluster_version = "1.21"
- region = "eu-west-1"
-
- tags = {
- Example = local.name
- GithubRepo = "terraform-aws-eks"
- GithubOrg = "terraform-aws-modules"
- }
-}
-
-################################################################################
-# EKS Module
-################################################################################
-
-module "eks" {
- source = "../.."
-
- cluster_name = local.name
- cluster_version = local.cluster_version
-
- vpc_id = module.vpc.vpc_id
- subnet_ids = module.vpc.private_subnets
-
- cluster_endpoint_private_access = true
- cluster_endpoint_public_access = true
-
- cluster_encryption_config = [
- {
- provider_key_arn = aws_kms_key.eks.arn
- resources = ["secrets"]
- }
- ]
-
- self_managed_node_groups = {
- one = {
- name = "worker-group-1"
- instance_type = "t3.small"
- post_bootstrap_user_data = "echo foo bar"
- desired_size = 2
- },
- }
-
- tags = local.tags
-}
-
-################################################################################
-# KMS for encrypting secrets
-################################################################################
-
-resource "aws_kms_key" "eks" {
- description = "EKS Secret Encryption Key"
- deletion_window_in_days = 7
- enable_key_rotation = true
-
- tags = local.tags
-}
-
-################################################################################
-# Supporting Resources
-################################################################################
-
-module "vpc" {
- source = "terraform-aws-modules/vpc/aws"
- version = "~> 3.0"
-
- name = local.name
- cidr = "10.0.0.0/16"
-
- azs = ["${local.region}a", "${local.region}b", "${local.region}c"]
- private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
- public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
-
- enable_nat_gateway = true
- single_nat_gateway = true
- enable_dns_hostnames = true
-
- enable_flow_log = true
- create_flow_log_cloudwatch_iam_role = true
- create_flow_log_cloudwatch_log_group = true
-
- public_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/elb" = 1
- }
-
- private_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/internal-elb" = 1
- }
-
- tags = local.tags
-}
diff --git a/examples/secrets_encryption/outputs.tf b/examples/secrets_encryption/outputs.tf
deleted file mode 100644
index 3e9620157b..0000000000
--- a/examples/secrets_encryption/outputs.tf
+++ /dev/null
@@ -1,167 +0,0 @@
-################################################################################
-# Cluster
-################################################################################
-
-output "cluster_arn" {
- description = "The Amazon Resource Name (ARN) of the cluster"
- value = module.eks.cluster_arn
-}
-
-output "cluster_certificate_authority_data" {
- description = "Base64 encoded certificate data required to communicate with the cluster"
- value = module.eks.cluster_certificate_authority_data
-}
-
-output "cluster_endpoint" {
- description = "Endpoint for your Kubernetes API server"
- value = module.eks.cluster_endpoint
-}
-
-output "cluster_id" {
- description = "The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready"
- value = module.eks.cluster_id
-}
-
-output "cluster_oidc_issuer_url" {
- description = "The URL on the EKS cluster for the OpenID Connect identity provider"
- value = module.eks.cluster_oidc_issuer_url
-}
-
-output "cluster_platform_version" {
- description = "Platform version for the cluster"
- value = module.eks.cluster_platform_version
-}
-
-output "cluster_status" {
- description = "Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED`"
- value = module.eks.cluster_status
-}
-
-output "cluster_primary_security_group_id" {
- description = "Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console"
- value = module.eks.cluster_primary_security_group_id
-}
-
-################################################################################
-# Security Group
-################################################################################
-
-output "cluster_security_group_arn" {
- description = "Amazon Resource Name (ARN) of the cluster security group"
- value = module.eks.cluster_security_group_arn
-}
-
-output "cluster_security_group_id" {
- description = "ID of the cluster security group"
- value = module.eks.cluster_security_group_id
-}
-
-################################################################################
-# Node Security Group
-################################################################################
-
-output "node_security_group_arn" {
- description = "Amazon Resource Name (ARN) of the node shared security group"
- value = module.eks.node_security_group_arn
-}
-
-output "node_security_group_id" {
- description = "ID of the node shared security group"
- value = module.eks.node_security_group_id
-}
-
-################################################################################
-# IRSA
-################################################################################
-
-output "oidc_provider_arn" {
- description = "The ARN of the OIDC Provider if `enable_irsa = true`"
- value = module.eks.oidc_provider_arn
-}
-
-################################################################################
-# IAM Role
-################################################################################
-
-output "cluster_iam_role_name" {
- description = "IAM role name of the EKS cluster"
- value = module.eks.cluster_iam_role_name
-}
-
-output "cluster_iam_role_arn" {
- description = "IAM role ARN of the EKS cluster"
- value = module.eks.cluster_iam_role_arn
-}
-
-output "cluster_iam_role_unique_id" {
- description = "Stable and unique string identifying the IAM role"
- value = module.eks.cluster_iam_role_unique_id
-}
-
-################################################################################
-# EKS Addons
-################################################################################
-
-output "cluster_addons" {
- description = "Map of attribute maps for all EKS cluster addons enabled"
- value = module.eks.cluster_addons
-}
-
-################################################################################
-# EKS Identity Provider
-################################################################################
-
-output "cluster_identity_providers" {
- description = "Map of attribute maps for all EKS identity providers enabled"
- value = module.eks.cluster_identity_providers
-}
-
-################################################################################
-# CloudWatch Log Group
-################################################################################
-
-output "cloudwatch_log_group_name" {
- description = "Name of cloudwatch log group created"
- value = module.eks.cloudwatch_log_group_name
-}
-
-output "cloudwatch_log_group_arn" {
- description = "Arn of cloudwatch log group created"
- value = module.eks.cloudwatch_log_group_arn
-}
-
-################################################################################
-# Fargate Profile
-################################################################################
-
-output "fargate_profiles" {
- description = "Map of attribute maps for all EKS Fargate Profiles created"
- value = module.eks.fargate_profiles
-}
-
-################################################################################
-# EKS Managed Node Group
-################################################################################
-
-output "eks_managed_node_groups" {
- description = "Map of attribute maps for all EKS managed node groups created"
- value = module.eks.eks_managed_node_groups
-}
-
-################################################################################
-# Self Managed Node Group
-################################################################################
-
-output "self_managed_node_groups" {
- description = "Map of attribute maps for all self managed node groups created"
- value = module.eks.self_managed_node_groups
-}
-
-################################################################################
-# Additional
-################################################################################
-
-output "aws_auth_configmap_yaml" {
- description = "Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles"
- value = module.eks.aws_auth_configmap_yaml
-}
diff --git a/examples/bottlerocket/README.md b/examples/self_managed_node_group/README.md
similarity index 88%
rename from examples/bottlerocket/README.md
rename to examples/self_managed_node_group/README.md
index 79b47e11ea..1ae48de39f 100644
--- a/examples/bottlerocket/README.md
+++ b/examples/self_managed_node_group/README.md
@@ -1,10 +1,11 @@
-# AWS EKS cluster running Bottlerocket AMI
+# Managed groups example
-Configuration in this directory creates EKS cluster with workers group running [AWS Bottlerocket OS](https://github.com/bottlerocket-os/bottlerocket)
+This is EKS example using managed groups feature in two different ways:
-This is a minimalistic example which shows what knobs to turn to make Bottlerocket work.
+- Using SPOT instances in node group
+- Using ON_DEMAND instance in node group
-See [the official documentation](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami-bottlerocket.html) for more details.
+See [the official documentation](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html) for more details.
## Usage
@@ -62,10 +63,14 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Type |
|------|------|
| [aws_key_pair.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/key_pair) | resource |
-| [null_resource.patch_cni](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
+| [aws_kms_key.ebs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource |
+| [aws_security_group.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
+| [null_resource.apply](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
| [tls_private_key.this](https://registry.terraform.io/providers/hashicorp/tls/latest/docs/resources/private_key) | resource |
| [aws_ami.bottlerocket_ami](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source |
+| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source |
| [aws_eks_cluster_auth.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
+| [aws_iam_policy_document.ebs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
## Inputs
diff --git a/examples/self_managed_node_groups/main.tf b/examples/self_managed_node_group/main.tf
similarity index 71%
rename from examples/self_managed_node_groups/main.tf
rename to examples/self_managed_node_group/main.tf
index 385214d2ce..54fd3303d4 100644
--- a/examples/self_managed_node_groups/main.tf
+++ b/examples/self_managed_node_group/main.tf
@@ -30,14 +30,54 @@ module "eks" {
cluster_endpoint_private_access = true
cluster_endpoint_public_access = true
+ cluster_addons = {
+ coredns = {
+ resolve_conflicts = "OVERWRITE"
+ }
+ kube-proxy = {}
+ vpc-cni = {
+ resolve_conflicts = "OVERWRITE"
+ }
+ }
+
self_managed_node_group_defaults = {
- disk_size = 50
+ disk_size = 50
+ instance_types = ["m6i.large", "m5.large", "m5n.large", "m5zn.large"]
}
self_managed_node_groups = {
# Default node group - as provisioned by the module defaults
default_node_group = {}
+ # Bottlerocket node group
+ bottlerocket = {
+ name = "bottlerocket-self-mng"
+
+ platform = "bottlerocket"
+ ami_id = data.aws_ami.bottlerocket_ami.id
+ instance_type = "m5.large"
+ desired_size = 2
+ key_name = aws_key_pair.this.key_name
+
+ iam_role_additional_policies = ["arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"]
+
+ bootstrap_extra_args = <<-EOT
+ # The admin host container provides SSH access and runs with "superpowers".
+ # It is disabled by default, but can be disabled explicitly.
+ [settings.host-containers.admin]
+ enabled = false
+
+ # The control host container provides out-of-band access via SSM.
+ # It is enabled by default, and can be disabled if you do not expect to use SSM.
+ # This could leave you with no way to access the API and change settings on an existing node!
+ [settings.host-containers.control]
+ enabled = true
+
+ [settings.kubernetes.node-labels]
+ ingress = "allowed"
+ EOT
+ }
+
# Complete
complete = {
name = "complete-self-mng"
@@ -111,7 +151,6 @@ module "eks" {
create_iam_role = true
iam_role_name = "self-managed-node-group-complete-example"
iam_role_use_name_prefix = false
- iam_role_path = "/self/"
iam_role_description = "Self managed node group complete example role"
iam_role_tags = {
Purpose = "Protector of the kubelet"
@@ -161,6 +200,62 @@ module "eks" {
tags = local.tags
}
+################################################################################
+# aws-auth configmap
+# Only EKS managed node groups automatically add roles to aws-auth configmap
+# so we need to ensure fargate profiles and self-managed node roles are added
+################################################################################
+
+data "aws_eks_cluster_auth" "this" {
+ name = module.eks.cluster_id
+}
+
+locals {
+ kubeconfig = yamlencode({
+ apiVersion = "v1"
+ kind = "Config"
+ current-context = "terraform"
+ clusters = [{
+ name = "${module.eks.cluster_id}"
+ cluster = {
+ certificate-authority-data = "${module.eks.cluster_certificate_authority_data}"
+ server = "${module.eks.cluster_endpoint}"
+ }
+ }]
+ contexts = [{
+ name = "terraform"
+ context = {
+ cluster = "${module.eks.cluster_id}"
+ user = "terraform"
+ }
+ }]
+ users = [{
+ name = "terraform"
+ user = {
+ token = "${data.aws_eks_cluster_auth.this.token}"
+ }
+ }]
+ })
+}
+
+resource "null_resource" "apply" {
+ triggers = {
+ kubeconfig = base64encode(local.kubeconfig)
+ cmd_patch = <<-EOT
+ kubectl create configmap aws-auth -n kube-system --kubeconfig <(echo $KUBECONFIG | base64 --decode)
+ kubectl patch configmap/aws-auth --patch "${module.eks.aws_auth_configmap_yaml}" -n kube-system --kubeconfig <(echo $KUBECONFIG | base64 --decode)
+ EOT
+ }
+
+ provisioner "local-exec" {
+ interpreter = ["/bin/bash", "-c"]
+ environment = {
+ KUBECONFIG = self.triggers.kubeconfig
+ }
+ command = self.triggers.cmd_patch
+ }
+}
+
################################################################################
# Supporting Resources
################################################################################
@@ -215,6 +310,25 @@ resource "aws_security_group" "additional" {
tags = local.tags
}
+data "aws_ami" "bottlerocket_ami" {
+ most_recent = true
+ owners = ["amazon"]
+
+ filter {
+ name = "name"
+ values = ["bottlerocket-aws-k8s-${local.cluster_version}-x86_64-*"]
+ }
+}
+
+resource "tls_private_key" "this" {
+ algorithm = "RSA"
+}
+
+resource "aws_key_pair" "this" {
+ key_name = local.name
+ public_key = tls_private_key.this.public_key_openssh
+}
+
data "aws_caller_identity" "current" {}
resource "aws_kms_key" "ebs" {
diff --git a/examples/bottlerocket/outputs.tf b/examples/self_managed_node_group/outputs.tf
similarity index 100%
rename from examples/bottlerocket/outputs.tf
rename to examples/self_managed_node_group/outputs.tf
diff --git a/examples/bottlerocket/variables.tf b/examples/self_managed_node_group/variables.tf
similarity index 100%
rename from examples/bottlerocket/variables.tf
rename to examples/self_managed_node_group/variables.tf
diff --git a/examples/bottlerocket/versions.tf b/examples/self_managed_node_group/versions.tf
similarity index 100%
rename from examples/bottlerocket/versions.tf
rename to examples/self_managed_node_group/versions.tf
diff --git a/examples/self_managed_node_groups/README.md b/examples/self_managed_node_groups/README.md
deleted file mode 100644
index 30056a9aee..0000000000
--- a/examples/self_managed_node_groups/README.md
+++ /dev/null
@@ -1,84 +0,0 @@
-# Managed groups example
-
-This is EKS example using managed groups feature in two different ways:
-
-- Using SPOT instances in node group
-- Using ON_DEMAND instance in node group
-
-See [the official documentation](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html) for more details.
-
-## Usage
-
-To run this example you need to execute:
-
-```bash
-$ terraform init
-$ terraform plan
-$ terraform apply
-```
-
-Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources.
-
-
-## Requirements
-
-| Name | Version |
-|------|---------|
-| [terraform](#requirement\_terraform) | >= 0.13.1 |
-| [aws](#requirement\_aws) | >= 3.64 |
-
-## Providers
-
-| Name | Version |
-|------|---------|
-| [aws](#provider\_aws) | >= 3.64 |
-
-## Modules
-
-| Name | Source | Version |
-|------|--------|---------|
-| [eks](#module\_eks) | ../.. | n/a |
-| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 |
-
-## Resources
-
-| Name | Type |
-|------|------|
-| [aws_kms_key.ebs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource |
-| [aws_security_group.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
-| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source |
-| [aws_iam_policy_document.ebs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
-
-## Inputs
-
-No inputs.
-
-## Outputs
-
-| Name | Description |
-|------|-------------|
-| [aws\_auth\_configmap\_yaml](#output\_aws\_auth\_configmap\_yaml) | Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles |
-| [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created |
-| [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created |
-| [cluster\_addons](#output\_cluster\_addons) | Map of attribute maps for all EKS cluster addons enabled |
-| [cluster\_arn](#output\_cluster\_arn) | The Amazon Resource Name (ARN) of the cluster |
-| [cluster\_certificate\_authority\_data](#output\_cluster\_certificate\_authority\_data) | Base64 encoded certificate data required to communicate with the cluster |
-| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for your Kubernetes API server |
-| [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster |
-| [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster |
-| [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role |
-| [cluster\_id](#output\_cluster\_id) | The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready |
-| [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled |
-| [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider |
-| [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster |
-| [cluster\_primary\_security\_group\_id](#output\_cluster\_primary\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console |
-| [cluster\_security\_group\_arn](#output\_cluster\_security\_group\_arn) | Amazon Resource Name (ARN) of the cluster security group |
-| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | ID of the cluster security group |
-| [cluster\_status](#output\_cluster\_status) | Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED` |
-| [eks\_managed\_node\_groups](#output\_eks\_managed\_node\_groups) | Map of attribute maps for all EKS managed node groups created |
-| [fargate\_profiles](#output\_fargate\_profiles) | Map of attribute maps for all EKS Fargate Profiles created |
-| [node\_security\_group\_arn](#output\_node\_security\_group\_arn) | Amazon Resource Name (ARN) of the node shared security group |
-| [node\_security\_group\_id](#output\_node\_security\_group\_id) | ID of the node shared security group |
-| [oidc\_provider\_arn](#output\_oidc\_provider\_arn) | The ARN of the OIDC Provider if `enable_irsa = true` |
-| [self\_managed\_node\_groups](#output\_self\_managed\_node\_groups) | Map of attribute maps for all self managed node groups created |
-
diff --git a/examples/self_managed_node_groups/outputs.tf b/examples/self_managed_node_groups/outputs.tf
deleted file mode 100644
index 3e9620157b..0000000000
--- a/examples/self_managed_node_groups/outputs.tf
+++ /dev/null
@@ -1,167 +0,0 @@
-################################################################################
-# Cluster
-################################################################################
-
-output "cluster_arn" {
- description = "The Amazon Resource Name (ARN) of the cluster"
- value = module.eks.cluster_arn
-}
-
-output "cluster_certificate_authority_data" {
- description = "Base64 encoded certificate data required to communicate with the cluster"
- value = module.eks.cluster_certificate_authority_data
-}
-
-output "cluster_endpoint" {
- description = "Endpoint for your Kubernetes API server"
- value = module.eks.cluster_endpoint
-}
-
-output "cluster_id" {
- description = "The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready"
- value = module.eks.cluster_id
-}
-
-output "cluster_oidc_issuer_url" {
- description = "The URL on the EKS cluster for the OpenID Connect identity provider"
- value = module.eks.cluster_oidc_issuer_url
-}
-
-output "cluster_platform_version" {
- description = "Platform version for the cluster"
- value = module.eks.cluster_platform_version
-}
-
-output "cluster_status" {
- description = "Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED`"
- value = module.eks.cluster_status
-}
-
-output "cluster_primary_security_group_id" {
- description = "Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console"
- value = module.eks.cluster_primary_security_group_id
-}
-
-################################################################################
-# Security Group
-################################################################################
-
-output "cluster_security_group_arn" {
- description = "Amazon Resource Name (ARN) of the cluster security group"
- value = module.eks.cluster_security_group_arn
-}
-
-output "cluster_security_group_id" {
- description = "ID of the cluster security group"
- value = module.eks.cluster_security_group_id
-}
-
-################################################################################
-# Node Security Group
-################################################################################
-
-output "node_security_group_arn" {
- description = "Amazon Resource Name (ARN) of the node shared security group"
- value = module.eks.node_security_group_arn
-}
-
-output "node_security_group_id" {
- description = "ID of the node shared security group"
- value = module.eks.node_security_group_id
-}
-
-################################################################################
-# IRSA
-################################################################################
-
-output "oidc_provider_arn" {
- description = "The ARN of the OIDC Provider if `enable_irsa = true`"
- value = module.eks.oidc_provider_arn
-}
-
-################################################################################
-# IAM Role
-################################################################################
-
-output "cluster_iam_role_name" {
- description = "IAM role name of the EKS cluster"
- value = module.eks.cluster_iam_role_name
-}
-
-output "cluster_iam_role_arn" {
- description = "IAM role ARN of the EKS cluster"
- value = module.eks.cluster_iam_role_arn
-}
-
-output "cluster_iam_role_unique_id" {
- description = "Stable and unique string identifying the IAM role"
- value = module.eks.cluster_iam_role_unique_id
-}
-
-################################################################################
-# EKS Addons
-################################################################################
-
-output "cluster_addons" {
- description = "Map of attribute maps for all EKS cluster addons enabled"
- value = module.eks.cluster_addons
-}
-
-################################################################################
-# EKS Identity Provider
-################################################################################
-
-output "cluster_identity_providers" {
- description = "Map of attribute maps for all EKS identity providers enabled"
- value = module.eks.cluster_identity_providers
-}
-
-################################################################################
-# CloudWatch Log Group
-################################################################################
-
-output "cloudwatch_log_group_name" {
- description = "Name of cloudwatch log group created"
- value = module.eks.cloudwatch_log_group_name
-}
-
-output "cloudwatch_log_group_arn" {
- description = "Arn of cloudwatch log group created"
- value = module.eks.cloudwatch_log_group_arn
-}
-
-################################################################################
-# Fargate Profile
-################################################################################
-
-output "fargate_profiles" {
- description = "Map of attribute maps for all EKS Fargate Profiles created"
- value = module.eks.fargate_profiles
-}
-
-################################################################################
-# EKS Managed Node Group
-################################################################################
-
-output "eks_managed_node_groups" {
- description = "Map of attribute maps for all EKS managed node groups created"
- value = module.eks.eks_managed_node_groups
-}
-
-################################################################################
-# Self Managed Node Group
-################################################################################
-
-output "self_managed_node_groups" {
- description = "Map of attribute maps for all self managed node groups created"
- value = module.eks.self_managed_node_groups
-}
-
-################################################################################
-# Additional
-################################################################################
-
-output "aws_auth_configmap_yaml" {
- description = "Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles"
- value = module.eks.aws_auth_configmap_yaml
-}
diff --git a/examples/self_managed_node_groups/variables.tf b/examples/self_managed_node_groups/variables.tf
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/examples/user_data/README.md b/examples/user_data/README.md
new file mode 100644
index 0000000000..c28bf2fdf7
--- /dev/null
+++ b/examples/user_data/README.md
@@ -0,0 +1,58 @@
+# Internal User Data Module
+
+## Usage
+
+To run this example you need to execute:
+
+```bash
+$ terraform init
+$ terraform plan
+$ terraform apply
+```
+
+
+## Requirements
+
+| Name | Version |
+|------|---------|
+| [terraform](#requirement\_terraform) | >= 0.13.1 |
+| [aws](#requirement\_aws) | >= 3.64 |
+
+## Providers
+
+No providers.
+
+## Modules
+
+| Name | Source | Version |
+|------|--------|---------|
+| [eks\_mng\_bottlerocket\_additional](#module\_eks\_mng\_bottlerocket\_additional) | ../../modules/_user_data | n/a |
+| [eks\_mng\_bottlerocket\_custom\_ami](#module\_eks\_mng\_bottlerocket\_custom\_ami) | ../../modules/_user_data | n/a |
+| [eks\_mng\_bottlerocket\_custom\_template](#module\_eks\_mng\_bottlerocket\_custom\_template) | ../../modules/_user_data | n/a |
+| [eks\_mng\_bottlerocket\_no\_op](#module\_eks\_mng\_bottlerocket\_no\_op) | ../../modules/_user_data | n/a |
+| [eks\_mng\_linux\_additional](#module\_eks\_mng\_linux\_additional) | ../../modules/_user_data | n/a |
+| [eks\_mng\_linux\_custom\_ami](#module\_eks\_mng\_linux\_custom\_ami) | ../../modules/_user_data | n/a |
+| [eks\_mng\_linux\_custom\_template](#module\_eks\_mng\_linux\_custom\_template) | ../../modules/_user_data | n/a |
+| [eks\_mng\_linux\_no\_op](#module\_eks\_mng\_linux\_no\_op) | ../../modules/_user_data | n/a |
+
+## Resources
+
+No resources.
+
+## Inputs
+
+No inputs.
+
+## Outputs
+
+| Name | Description |
+|------|-------------|
+| [eks\_mng\_bottlerocket\_additional](#output\_eks\_mng\_bottlerocket\_additional) | Base64 decoded user data rendered for the provided inputs |
+| [eks\_mng\_bottlerocket\_custom\_ami](#output\_eks\_mng\_bottlerocket\_custom\_ami) | Base64 decoded user data rendered for the provided inputs |
+| [eks\_mng\_bottlerocket\_custom\_template](#output\_eks\_mng\_bottlerocket\_custom\_template) | Base64 decoded user data rendered for the provided inputs |
+| [eks\_mng\_bottlerocket\_no\_op](#output\_eks\_mng\_bottlerocket\_no\_op) | Base64 decoded user data rendered for the provided inputs |
+| [eks\_mng\_linux\_additional](#output\_eks\_mng\_linux\_additional) | Base64 decoded user data rendered for the provided inputs |
+| [eks\_mng\_linux\_custom\_ami](#output\_eks\_mng\_linux\_custom\_ami) | Base64 decoded user data rendered for the provided inputs |
+| [eks\_mng\_linux\_custom\_template](#output\_eks\_mng\_linux\_custom\_template) | Base64 decoded user data rendered for the provided inputs |
+| [eks\_mng\_linux\_no\_op](#output\_eks\_mng\_linux\_no\_op) | Base64 decoded user data rendered for the provided inputs |
+
diff --git a/examples/user_data/main.tf b/examples/user_data/main.tf
new file mode 100644
index 0000000000..432043d1f6
--- /dev/null
+++ b/examples/user_data/main.tf
@@ -0,0 +1,110 @@
+locals {
+ name = "ex-${replace(basename(path.cwd), "_", "-")}"
+
+ cluster_endpoint = "https://012345678903AB2BAE5D1E0BFE0E2B50.gr7.us-east-1.eks.amazonaws.com"
+ cluster_auth_base64 = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKbXFqQ1VqNGdGR2w3ZW5PeWthWnZ2RjROOTVOUEZCM2o0cGhVZUsrWGFtN2ZSQnZya0d6OGxKZmZEZWF2b2plTwpQK2xOZFlqdHZncmxCUEpYdHZIZmFzTzYxVzdIZmdWQ2EvamdRM2w3RmkvL1dpQmxFOG9oWUZkdWpjc0s1SXM2CnNkbk5KTTNYUWN2TysrSitkV09NT2ZlNzlsSWdncmdQLzgvRU9CYkw3eUY1aU1hS3lsb1RHL1V3TlhPUWt3ZUcKblBNcjdiUmdkQ1NCZTlXYXowOGdGRmlxV2FOditsTDhsODBTdFZLcWVNVlUxbjQyejVwOVpQRTd4T2l6L0xTNQpYV2lXWkVkT3pMN0xBWGVCS2gzdkhnczFxMkI2d1BKZnZnS1NzWllQRGFpZTloT1NNOUJkNFNPY3JrZTRYSVBOCkVvcXVhMlYrUDRlTWJEQzhMUkVWRDdCdVZDdWdMTldWOTBoL3VJUy9WU2VOcEdUOGVScE5DakszSjc2aFlsWm8KWjNGRG5QWUY0MWpWTHhiOXF0U1ROdEp6amYwWXBEYnFWci9xZzNmQWlxbVorMzd3YWM1eHlqMDZ4cmlaRUgzZgpUM002d2lCUEVHYVlGeWN5TmNYTk5aYW9DWDJVL0N1d2JsUHAKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ=="
+}
+
+################################################################################
+# User Data Module
+################################################################################
+
+# EKS managed node group - linux
+module "eks_mng_linux_no_op" {
+ source = "../../modules/_user_data"
+}
+
+module "eks_mng_linux_additional" {
+ source = "../../modules/_user_data"
+
+ pre_bootstrap_user_data = <<-EOT
+ echo "foo"
+ export FOO=bar
+ EOT
+}
+
+module "eks_mng_linux_custom_ami" {
+ source = "../../modules/_user_data"
+
+ cluster_name = local.name
+ cluster_endpoint = local.cluster_endpoint
+ cluster_auth_base64 = local.cluster_auth_base64
+
+ is_custom_ami = true
+
+ pre_bootstrap_user_data = <<-EOT
+ echo "foo"
+ export FOO=bar
+ EOT
+}
+
+
+module "eks_mng_linux_custom_template" {
+ source = "../../modules/_user_data"
+
+ cluster_name = local.name
+ cluster_endpoint = local.cluster_endpoint
+ cluster_auth_base64 = local.cluster_auth_base64
+
+ user_data_template_path = "${path.module}/templates/linux_custom.tpl"
+
+ pre_bootstrap_user_data = <<-EOT
+ echo "foo"
+ export FOO=bar
+ EOT
+}
+
+# EKS managed node group - bottlerocket
+module "eks_mng_bottlerocket_no_op" {
+ source = "../../modules/_user_data"
+
+ platform = "bottlerocket"
+}
+
+module "eks_mng_bottlerocket_additional" {
+ source = "../../modules/_user_data"
+
+ platform = "bottlerocket"
+
+ bootstrap_extra_args = <<-EOT
+ # extra args added
+ [settings.kernel]
+ lockdown = "integrity"
+ EOT
+}
+
+module "eks_mng_bottlerocket_custom_ami" {
+ source = "../../modules/_user_data"
+
+ platform = "bottlerocket"
+
+ cluster_name = local.name
+ cluster_endpoint = local.cluster_endpoint
+ cluster_auth_base64 = local.cluster_auth_base64
+
+ is_custom_ami = true
+
+ bootstrap_extra_args = <<-EOT
+ # extra args added
+ [settings.kernel]
+ lockdown = "integrity"
+ EOT
+}
+
+module "eks_mng_bottlerocket_custom_template" {
+ source = "../../modules/_user_data"
+
+ platform = "bottlerocket"
+
+ cluster_name = local.name
+ cluster_endpoint = local.cluster_endpoint
+ cluster_auth_base64 = local.cluster_auth_base64
+
+ user_data_template_path = "${path.module}/templates/bottlerocket_custom.tpl"
+
+ bootstrap_extra_args = <<-EOT
+ # extra args added
+ [settings.kernel]
+ lockdown = "integrity"
+ EOT
+}
diff --git a/examples/user_data/outputs.tf b/examples/user_data/outputs.tf
new file mode 100644
index 0000000000..d9f1f5cbf2
--- /dev/null
+++ b/examples/user_data/outputs.tf
@@ -0,0 +1,41 @@
+# EKS managed node group - linux
+output "eks_mng_linux_no_op" {
+ description = "Base64 decoded user data rendered for the provided inputs"
+ value = base64decode(module.eks_mng_linux_no_op.user_data)
+}
+
+output "eks_mng_linux_additional" {
+ description = "Base64 decoded user data rendered for the provided inputs"
+ value = base64decode(module.eks_mng_linux_additional.user_data)
+}
+
+output "eks_mng_linux_custom_ami" {
+ description = "Base64 decoded user data rendered for the provided inputs"
+ value = base64decode(module.eks_mng_linux_custom_ami.user_data)
+}
+
+output "eks_mng_linux_custom_template" {
+ description = "Base64 decoded user data rendered for the provided inputs"
+ value = base64decode(module.eks_mng_linux_custom_template.user_data)
+}
+
+# EKS managed node group - bottlerocket
+output "eks_mng_bottlerocket_no_op" {
+ description = "Base64 decoded user data rendered for the provided inputs"
+ value = base64decode(module.eks_mng_bottlerocket_no_op.user_data)
+}
+
+output "eks_mng_bottlerocket_additional" {
+ description = "Base64 decoded user data rendered for the provided inputs"
+ value = base64decode(module.eks_mng_bottlerocket_additional.user_data)
+}
+
+output "eks_mng_bottlerocket_custom_ami" {
+ description = "Base64 decoded user data rendered for the provided inputs"
+ value = base64decode(module.eks_mng_bottlerocket_custom_ami.user_data)
+}
+
+output "eks_mng_bottlerocket_custom_template" {
+ description = "Base64 decoded user data rendered for the provided inputs"
+ value = base64decode(module.eks_mng_bottlerocket_custom_template.user_data)
+}
diff --git a/examples/user_data/templates/bottlerocket_custom.tpl b/examples/user_data/templates/bottlerocket_custom.tpl
new file mode 100644
index 0000000000..6c4d9434a7
--- /dev/null
+++ b/examples/user_data/templates/bottlerocket_custom.tpl
@@ -0,0 +1,7 @@
+# Custom user data template provided for rendering
+[settings.kubernetes]
+"cluster-name" = "${cluster_name}"
+"api-server" = "${cluster_endpoint}"
+"cluster-certificate" = "${cluster_auth_base64}"
+
+${bootstrap_extra_args~}
diff --git a/examples/user_data/templates/linux_custom.tpl b/examples/user_data/templates/linux_custom.tpl
new file mode 100644
index 0000000000..c1c4955bc8
--- /dev/null
+++ b/examples/user_data/templates/linux_custom.tpl
@@ -0,0 +1,8 @@
+#!/bin/bash
+set -ex
+
+# Custom user data template provided for rendering
+B64_CLUSTER_CA=${cluster_auth_base64}
+API_SERVER_URL=${cluster_endpoint}
+/etc/eks/bootstrap.sh ${cluster_name} ${bootstrap_extra_args} --b64-cluster-ca $B64_CLUSTER_CA --apiserver-endpoint $API_SERVER_URL
+${post_bootstrap_user_data}
diff --git a/examples/user_data/templates/windows_custom.tpl b/examples/user_data/templates/windows_custom.tpl
new file mode 100644
index 0000000000..c76c27a63e
--- /dev/null
+++ b/examples/user_data/templates/windows_custom.tpl
@@ -0,0 +1,9 @@
+
+${pre_bootstrap_user_data}
+[string]$EKSBinDir = "$env:ProgramFiles\Amazon\EKS"
+[string]$EKSBootstrapScriptName = 'Start-EKSBootstrap.ps1'
+[string]$EKSBootstrapScriptFile = "$EKSBinDir\$EKSBootstrapScriptName"
+& $EKSBootstrapScriptFile -EKSClusterName ${cluster_name} ${bootstrap_extra_args} 3>&1 4>&1 5>&1 6>&1
+$LastError = if ($?) { 0 } else { $Error[0].Exception.HResult }
+${post_bootstrap_user_data}
+
diff --git a/examples/secrets_encryption/variables.tf b/examples/user_data/variables.tf
similarity index 100%
rename from examples/secrets_encryption/variables.tf
rename to examples/user_data/variables.tf
diff --git a/examples/self_managed_node_groups/versions.tf b/examples/user_data/versions.tf
similarity index 100%
rename from examples/self_managed_node_groups/versions.tf
rename to examples/user_data/versions.tf
diff --git a/modules/_user_data/README.md b/modules/_user_data/README.md
new file mode 100644
index 0000000000..38dab46a36
--- /dev/null
+++ b/modules/_user_data/README.md
@@ -0,0 +1,60 @@
+# Internal User Data Module
+
+Configuration in this directory configures the necessary user data for launching nodes
+
+## Usage
+
+To run this example you need to execute:
+
+```bash
+$ terraform init
+$ terraform plan
+$ terraform apply
+```
+
+
+## Requirements
+
+| Name | Version |
+|------|---------|
+| [terraform](#requirement\_terraform) | >= 0.13.1 |
+| [cloudinit](#requirement\_cloudinit) | >= 2.0 |
+
+## Providers
+
+| Name | Version |
+|------|---------|
+| [cloudinit](#provider\_cloudinit) | >= 2.0 |
+
+## Modules
+
+No modules.
+
+## Resources
+
+| Name | Type |
+|------|------|
+| [cloudinit_config.linux_eks_managed_node_group](https://registry.terraform.io/providers/hashicorp/cloudinit/latest/docs/data-sources/config) | data source |
+
+## Inputs
+
+| Name | Description | Type | Default | Required |
+|------|-------------|------|---------|:--------:|
+| [bootstrap\_extra\_args](#input\_bootstrap\_extra\_args) | Additional arguments passed to the bootstrap script | `string` | `""` | no |
+| [cluster\_auth\_base64](#input\_cluster\_auth\_base64) | Base64 encoded CA of associated EKS cluster | `string` | `""` | no |
+| [cluster\_endpoint](#input\_cluster\_endpoint) | Endpoint of associated EKS cluster | `string` | `""` | no |
+| [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster and default name (prefix) used throughout the resources created | `string` | `""` | no |
+| [create](#input\_create) | Determines whether to create EKS managed node group or not | `bool` | `true` | no |
+| [is\_custom\_ami](#input\_is\_custom\_ami) | Determines whether a custom AMI is used or the default AMI in the case of EKS managed node groups | `bool` | `false` | no |
+| [is\_eks\_managed\_node\_group](#input\_is\_eks\_managed\_node\_group) | Determines whether the user data is used on nodes in an EKS managed node group | `bool` | `true` | no |
+| [platform](#input\_platform) | Identifies if the OS platform is `bottlerocket`, `linux`, or `windows` based | `string` | `"linux"` | no |
+| [post\_bootstrap\_user\_data](#input\_post\_bootstrap\_user\_data) | User data that is appended to the user data script after of the EKS bootstrap script. Only valid when using a custom EKS optimized AMI derivative | `string` | `""` | no |
+| [pre\_bootstrap\_user\_data](#input\_pre\_bootstrap\_user\_data) | User data that is injected into the user data script ahead of the EKS bootstrap script | `string` | `""` | no |
+| [user\_data\_template\_path](#input\_user\_data\_template\_path) | Path to a local, custom user data template file to use when rendering user data | `string` | `""` | no |
+
+## Outputs
+
+| Name | Description |
+|------|-------------|
+| [user\_data](#output\_user\_data) | Base64 encoded user data rendered for the provided inputs |
+
diff --git a/modules/_user_data/main.tf b/modules/_user_data/main.tf
new file mode 100644
index 0000000000..f9e453ce35
--- /dev/null
+++ b/modules/_user_data/main.tf
@@ -0,0 +1,75 @@
+
+locals {
+ int_linux_default_user_data = var.create && var.platform == "linux" && (var.is_custom_ami || var.user_data_template_path != "") ? base64encode(templatefile(
+ coalesce(var.user_data_template_path, "${path.module}/../../templates/linux_user_data.tpl"),
+ {
+ # https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-custom-ami
+ is_custom_ami = var.is_custom_ami
+ # Required to bootstrap node
+ cluster_name = var.cluster_name
+ cluster_endpoint = var.cluster_endpoint
+ cluster_auth_base64 = var.cluster_auth_base64
+ # Optional
+ bootstrap_extra_args = var.bootstrap_extra_args
+ pre_bootstrap_user_data = var.pre_bootstrap_user_data
+ post_bootstrap_user_data = var.post_bootstrap_user_data
+ }
+ )) : ""
+ platform = {
+ bottlerocket = {
+ user_data = var.create && var.platform == "bottlerocket" ? base64encode(templatefile(
+ coalesce(var.user_data_template_path, "${path.module}/../../templates/bottlerocket_user_data.tpl"),
+ {
+ # https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-custom-ami
+ is_custom_ami = var.is_custom_ami
+ # Required to bootstrap node
+ cluster_name = var.cluster_name
+ cluster_endpoint = var.cluster_endpoint
+ cluster_auth_base64 = var.cluster_auth_base64
+ # Optional - is appended if using EKS managed node group without custom AMI
+ bootstrap_extra_args = var.bootstrap_extra_args
+ }
+ )) : ""
+ }
+ linux = {
+ user_data = try(data.cloudinit_config.linux_eks_managed_node_group[0].rendered, local.int_linux_default_user_data)
+
+ }
+ windows = {
+ user_data = var.create && var.platform == "windows" ? base64encode(templatefile(
+ coalesce(var.user_data_template_path, "${path.module}/../../templates/windows_user_data.tpl"),
+ {
+ # Required to bootstrap node
+ cluster_name = var.cluster_name
+ cluster_endpoint = var.cluster_endpoint
+ cluster_auth_base64 = var.cluster_auth_base64
+ # Optional - is appended if using EKS managed node group without custom AMI
+ bootstrap_extra_args = var.bootstrap_extra_args
+ pre_bootstrap_user_data = var.pre_bootstrap_user_data
+ post_bootstrap_user_data = var.post_bootstrap_user_data
+ }
+ )) : ""
+ }
+ }
+}
+
+# https://github.com/aws/containers-roadmap/issues/596#issuecomment-675097667
+# An important note is that user data must in MIME multi-part archive format,
+# as by default, EKS will merge the bootstrapping command required for nodes to join the
+# cluster with your user data. If you use a custom AMI in your launch template,
+# this merging will NOT happen and you are responsible for nodes joining the cluster.
+# See docs for more details -> https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-user-data
+
+data "cloudinit_config" "linux_eks_managed_node_group" {
+ count = var.create && var.platform == "linux" && var.is_eks_managed_node_group && !var.is_custom_ami && var.pre_bootstrap_user_data != "" && var.user_data_template_path == "" ? 1 : 0
+
+ base64_encode = true
+ gzip = false
+ boundary = "//"
+
+ # Prepend to existing user data suppled by AWS EKS
+ part {
+ content_type = "text/x-shellscript"
+ content = var.pre_bootstrap_user_data
+ }
+}
diff --git a/modules/_user_data/outputs.tf b/modules/_user_data/outputs.tf
new file mode 100644
index 0000000000..c2a569b05b
--- /dev/null
+++ b/modules/_user_data/outputs.tf
@@ -0,0 +1,4 @@
+output "user_data" {
+ description = "Base64 encoded user data rendered for the provided inputs"
+ value = try(local.platform[var.platform].user_data, "")
+}
diff --git a/modules/_user_data/variables.tf b/modules/_user_data/variables.tf
new file mode 100644
index 0000000000..04a157c6df
--- /dev/null
+++ b/modules/_user_data/variables.tf
@@ -0,0 +1,65 @@
+variable "create" {
+ description = "Determines whether to create EKS managed node group or not"
+ type = bool
+ default = true
+}
+
+variable "platform" {
+ description = "Identifies if the OS platform is `bottlerocket`, `linux`, or `windows` based"
+ type = string
+ default = "linux"
+}
+
+variable "is_custom_ami" {
+ description = "Determines whether a custom AMI is used or the default AMI in the case of EKS managed node groups"
+ type = bool
+ default = false
+}
+
+variable "is_eks_managed_node_group" {
+ description = "Determines whether the user data is used on nodes in an EKS managed node group"
+ type = bool
+ default = true
+}
+
+variable "cluster_name" {
+ description = "Name of the EKS cluster and default name (prefix) used throughout the resources created"
+ type = string
+ default = ""
+}
+
+variable "cluster_endpoint" {
+ description = "Endpoint of associated EKS cluster"
+ type = string
+ default = ""
+}
+
+variable "cluster_auth_base64" {
+ description = "Base64 encoded CA of associated EKS cluster"
+ type = string
+ default = ""
+}
+
+variable "pre_bootstrap_user_data" {
+ description = "User data that is injected into the user data script ahead of the EKS bootstrap script"
+ type = string
+ default = ""
+}
+
+variable "post_bootstrap_user_data" {
+ description = "User data that is appended to the user data script after of the EKS bootstrap script. Only valid when using a custom EKS optimized AMI derivative"
+ type = string
+ default = ""
+}
+
+variable "bootstrap_extra_args" {
+ description = "Additional arguments passed to the bootstrap script"
+ type = string
+ default = ""
+}
+
+variable "user_data_template_path" {
+ description = "Path to a local, custom user data template file to use when rendering user data"
+ type = string
+ default = ""
+}
diff --git a/modules/_user_data/versions.tf b/modules/_user_data/versions.tf
new file mode 100644
index 0000000000..e293dc67ce
--- /dev/null
+++ b/modules/_user_data/versions.tf
@@ -0,0 +1,10 @@
+terraform {
+ required_version = ">= 0.13.1"
+
+ required_providers {
+ cloudinit = {
+ source = "hashicorp/cloudinit"
+ version = ">= 2.0"
+ }
+ }
+}
diff --git a/modules/self-managed-node-group/README.md b/modules/self-managed-node-group/README.md
index 274acc3b4a..04c643fe7e 100644
--- a/modules/self-managed-node-group/README.md
+++ b/modules/self-managed-node-group/README.md
@@ -55,7 +55,7 @@ No modules.
|------|-------------|------|---------|:--------:|
| [ami\_id](#input\_ami\_id) | The AMI from which to launch the instance | `string` | `""` | no |
| [availability\_zones](#input\_availability\_zones) | A list of one or more availability zones for the group. Used for EC2-Classic and default subnets when not specified with `subnet_ids` argument. Conflicts with `subnet_ids` | `list(string)` | `null` | no |
-| [block\_device\_mappings](#input\_block\_device\_mappings) | Specify volumes to attach to the instance besides the volumes specified by the AMI | `list(any)` | `[]` | no |
+| [block\_device\_mappings](#input\_block\_device\_mappings) | Specify volumes to attach to the instance besides the volumes specified by the AMI | `any` | `{}` | no |
| [bootstrap\_extra\_args](#input\_bootstrap\_extra\_args) | Additional arguments passed to the bootstrap script | `string` | `""` | no |
| [capacity\_rebalance](#input\_capacity\_rebalance) | Indicates whether capacity rebalance is enabled | `bool` | `null` | no |
| [capacity\_reservation\_specification](#input\_capacity\_reservation\_specification) | Targeting for EC2 capacity reservations | `any` | `null` | no |
diff --git a/modules/self-managed-node-group/variables.tf b/modules/self-managed-node-group/variables.tf
index 7838b21ef7..cfaae9f089 100644
--- a/modules/self-managed-node-group/variables.tf
+++ b/modules/self-managed-node-group/variables.tf
@@ -330,8 +330,8 @@ variable "ram_disk_id" {
variable "block_device_mappings" {
description = "Specify volumes to attach to the instance besides the volumes specified by the AMI"
- type = list(any)
- default = []
+ type = any
+ default = {}
}
variable "capacity_reservation_specification" {
diff --git a/node_groups.tf b/node_groups.tf
index 98107324df..eb36396f9d 100644
--- a/node_groups.tf
+++ b/node_groups.tf
@@ -232,7 +232,7 @@ module "eks_managed_node_group" {
kernel_id = try(each.value.kernel_id, var.eks_managed_node_group_defaults.kernel_id, null)
ram_disk_id = try(each.value.ram_disk_id, var.eks_managed_node_group_defaults.ram_disk_id, null)
- block_device_mappings = try(each.value.block_device_mappings, var.eks_managed_node_group_defaults.block_device_mappings, [])
+ block_device_mappings = try(each.value.block_device_mappings, var.eks_managed_node_group_defaults.block_device_mappings, {})
capacity_reservation_specification = try(each.value.capacity_reservation_specification, var.eks_managed_node_group_defaults.capacity_reservation_specification, null)
cpu_options = try(each.value.cpu_options, var.eks_managed_node_group_defaults.cpu_options, null)
credit_specification = try(each.value.credit_specification, var.eks_managed_node_group_defaults.credit_specification, null)
diff --git a/templates/bottlerocket_user_data.tpl b/templates/bottlerocket_user_data.tpl
index ac4043bfc5..dc10dcb41f 100644
--- a/templates/bottlerocket_user_data.tpl
+++ b/templates/bottlerocket_user_data.tpl
@@ -1,8 +1,7 @@
-%{ if length(ami_id) > 0 ~}
+%{ if is_custom_ami ~}
[settings.kubernetes]
"cluster-name" = "${cluster_name}"
"api-server" = "${cluster_endpoint}"
"cluster-certificate" = "${cluster_auth_base64}"
%{ endif ~}
-
-${bootstrap_extra_args}
+${bootstrap_extra_args ~}
diff --git a/templates/linux_user_data.tpl b/templates/linux_user_data.tpl
index 78f40d354d..63b06a1a42 100644
--- a/templates/linux_user_data.tpl
+++ b/templates/linux_user_data.tpl
@@ -1,8 +1,11 @@
+%{ if is_custom_ami ~}
#!/bin/bash
set -e
-%{ if length(ami_id) > 0 ~}
+%{ endif ~}
+${pre_bootstrap_user_data ~}
+%{ if is_custom_ami ~}
B64_CLUSTER_CA=${cluster_auth_base64}
API_SERVER_URL=${cluster_endpoint}
/etc/eks/bootstrap.sh ${cluster_name} ${bootstrap_extra_args} --b64-cluster-ca $B64_CLUSTER_CA --apiserver-endpoint $API_SERVER_URL
-%{ endif ~}
${post_bootstrap_user_data}
+%{ endif ~}
diff --git a/templates/windows_user_data.tpl b/templates/windows_user_data.tpl
index 057e89aa62..c76c27a63e 100644
--- a/templates/windows_user_data.tpl
+++ b/templates/windows_user_data.tpl
@@ -1,11 +1,9 @@
${pre_bootstrap_user_data}
-%{ if length(ami_id) > 0 ~}
[string]$EKSBinDir = "$env:ProgramFiles\Amazon\EKS"
[string]$EKSBootstrapScriptName = 'Start-EKSBootstrap.ps1'
[string]$EKSBootstrapScriptFile = "$EKSBinDir\$EKSBootstrapScriptName"
& $EKSBootstrapScriptFile -EKSClusterName ${cluster_name} ${bootstrap_extra_args} 3>&1 4>&1 5>&1 6>&1
$LastError = if ($?) { 0 } else { $Error[0].Exception.HResult }
-%{ endif ~}
${post_bootstrap_user_data}
From 500f9dbce8c10ca8201db10fd7932f67403e6654 Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Thu, 2 Dec 2021 17:08:02 -0500
Subject: [PATCH 45/83] chore: finish testing and updating of internal user
data module
---
examples/user_data/README.md | 18 ++
examples/user_data/main.tf | 181 +++++++++++++++++-
examples/user_data/outputs.tf | 48 +++++
examples/user_data/templates/linux_custom.tpl | 4 +-
.../user_data/templates/windows_custom.tpl | 5 +-
modules/_user_data/README.md | 2 +-
modules/_user_data/main.tf | 12 +-
modules/_user_data/variables.tf | 4 +-
templates/bottlerocket_user_data.tpl | 2 +-
templates/linux_user_data.tpl | 6 +-
templates/windows_user_data.tpl | 4 +-
11 files changed, 266 insertions(+), 20 deletions(-)
diff --git a/examples/user_data/README.md b/examples/user_data/README.md
index c28bf2fdf7..7d43a530c2 100644
--- a/examples/user_data/README.md
+++ b/examples/user_data/README.md
@@ -34,6 +34,15 @@ No providers.
| [eks\_mng\_linux\_custom\_ami](#module\_eks\_mng\_linux\_custom\_ami) | ../../modules/_user_data | n/a |
| [eks\_mng\_linux\_custom\_template](#module\_eks\_mng\_linux\_custom\_template) | ../../modules/_user_data | n/a |
| [eks\_mng\_linux\_no\_op](#module\_eks\_mng\_linux\_no\_op) | ../../modules/_user_data | n/a |
+| [self\_mng\_bottlerocket\_bootstrap](#module\_self\_mng\_bottlerocket\_bootstrap) | ../../modules/_user_data | n/a |
+| [self\_mng\_bottlerocket\_custom\_template](#module\_self\_mng\_bottlerocket\_custom\_template) | ../../modules/_user_data | n/a |
+| [self\_mng\_bottlerocket\_no\_op](#module\_self\_mng\_bottlerocket\_no\_op) | ../../modules/_user_data | n/a |
+| [self\_mng\_linux\_bootstrap](#module\_self\_mng\_linux\_bootstrap) | ../../modules/_user_data | n/a |
+| [self\_mng\_linux\_custom\_template](#module\_self\_mng\_linux\_custom\_template) | ../../modules/_user_data | n/a |
+| [self\_mng\_linux\_no\_op](#module\_self\_mng\_linux\_no\_op) | ../../modules/_user_data | n/a |
+| [self\_mng\_windows\_bootstrap](#module\_self\_mng\_windows\_bootstrap) | ../../modules/_user_data | n/a |
+| [self\_mng\_windows\_custom\_template](#module\_self\_mng\_windows\_custom\_template) | ../../modules/_user_data | n/a |
+| [self\_mng\_windows\_no\_op](#module\_self\_mng\_windows\_no\_op) | ../../modules/_user_data | n/a |
## Resources
@@ -55,4 +64,13 @@ No inputs.
| [eks\_mng\_linux\_custom\_ami](#output\_eks\_mng\_linux\_custom\_ami) | Base64 decoded user data rendered for the provided inputs |
| [eks\_mng\_linux\_custom\_template](#output\_eks\_mng\_linux\_custom\_template) | Base64 decoded user data rendered for the provided inputs |
| [eks\_mng\_linux\_no\_op](#output\_eks\_mng\_linux\_no\_op) | Base64 decoded user data rendered for the provided inputs |
+| [self\_mng\_bottlerocket\_bootstrap](#output\_self\_mng\_bottlerocket\_bootstrap) | Base64 decoded user data rendered for the provided inputs |
+| [self\_mng\_bottlerocket\_custom\_template](#output\_self\_mng\_bottlerocket\_custom\_template) | Base64 decoded user data rendered for the provided inputs |
+| [self\_mng\_bottlerocket\_no\_op](#output\_self\_mng\_bottlerocket\_no\_op) | Base64 decoded user data rendered for the provided inputs |
+| [self\_mng\_linux\_bootstrap](#output\_self\_mng\_linux\_bootstrap) | Base64 decoded user data rendered for the provided inputs |
+| [self\_mng\_linux\_custom\_template](#output\_self\_mng\_linux\_custom\_template) | Base64 decoded user data rendered for the provided inputs |
+| [self\_mng\_linux\_no\_op](#output\_self\_mng\_linux\_no\_op) | Base64 decoded user data rendered for the provided inputs |
+| [self\_mng\_windows\_bootstrap](#output\_self\_mng\_windows\_bootstrap) | Base64 decoded user data rendered for the provided inputs |
+| [self\_mng\_windows\_custom\_template](#output\_self\_mng\_windows\_custom\_template) | Base64 decoded user data rendered for the provided inputs |
+| [self\_mng\_windows\_no\_op](#output\_self\_mng\_windows\_no\_op) | Base64 decoded user data rendered for the provided inputs |
diff --git a/examples/user_data/main.tf b/examples/user_data/main.tf
index 432043d1f6..9c572b42ab 100644
--- a/examples/user_data/main.tf
+++ b/examples/user_data/main.tf
@@ -21,6 +21,12 @@ module "eks_mng_linux_additional" {
echo "foo"
export FOO=bar
EOT
+
+ bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"
+
+ post_bootstrap_user_data = <<-EOT
+ echo "All done"
+ EOT
}
module "eks_mng_linux_custom_ami" {
@@ -30,12 +36,18 @@ module "eks_mng_linux_custom_ami" {
cluster_endpoint = local.cluster_endpoint
cluster_auth_base64 = local.cluster_auth_base64
- is_custom_ami = true
+ enable_bootstrap_user_data = true
pre_bootstrap_user_data = <<-EOT
echo "foo"
export FOO=bar
EOT
+
+ bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"
+
+ post_bootstrap_user_data = <<-EOT
+ echo "All done"
+ EOT
}
@@ -52,6 +64,12 @@ module "eks_mng_linux_custom_template" {
echo "foo"
export FOO=bar
EOT
+
+ bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"
+
+ post_bootstrap_user_data = <<-EOT
+ echo "All done"
+ EOT
}
# EKS managed node group - bottlerocket
@@ -82,7 +100,7 @@ module "eks_mng_bottlerocket_custom_ami" {
cluster_endpoint = local.cluster_endpoint
cluster_auth_base64 = local.cluster_auth_base64
- is_custom_ami = true
+ enable_bootstrap_user_data = true
bootstrap_extra_args = <<-EOT
# extra args added
@@ -108,3 +126,162 @@ module "eks_mng_bottlerocket_custom_template" {
lockdown = "integrity"
EOT
}
+
+# Self managed node group - linux
+module "self_mng_linux_no_op" {
+ source = "../../modules/_user_data"
+
+ is_eks_managed_node_group = false
+}
+
+module "self_mng_linux_bootstrap" {
+ source = "../../modules/_user_data"
+
+ enable_bootstrap_user_data = true
+ is_eks_managed_node_group = false
+
+ cluster_name = local.name
+ cluster_endpoint = local.cluster_endpoint
+ cluster_auth_base64 = local.cluster_auth_base64
+
+ pre_bootstrap_user_data = <<-EOT
+ echo "foo"
+ export FOO=bar
+ EOT
+
+ bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"
+
+ post_bootstrap_user_data = <<-EOT
+ echo "All done"
+ EOT
+}
+
+module "self_mng_linux_custom_template" {
+ source = "../../modules/_user_data"
+
+ enable_bootstrap_user_data = true
+ is_eks_managed_node_group = false
+
+ cluster_name = local.name
+ cluster_endpoint = local.cluster_endpoint
+ cluster_auth_base64 = local.cluster_auth_base64
+
+ user_data_template_path = "${path.module}/templates/linux_custom.tpl"
+
+ pre_bootstrap_user_data = <<-EOT
+ echo "foo"
+ export FOO=bar
+ EOT
+
+ bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"
+
+ post_bootstrap_user_data = <<-EOT
+ echo "All done"
+ EOT
+}
+
+# Self managed node group - bottlerocket
+module "self_mng_bottlerocket_no_op" {
+ source = "../../modules/_user_data"
+
+ platform = "bottlerocket"
+
+ is_eks_managed_node_group = false
+}
+
+module "self_mng_bottlerocket_bootstrap" {
+ source = "../../modules/_user_data"
+
+ platform = "bottlerocket"
+
+ enable_bootstrap_user_data = true
+ is_eks_managed_node_group = false
+
+ cluster_name = local.name
+ cluster_endpoint = local.cluster_endpoint
+ cluster_auth_base64 = local.cluster_auth_base64
+
+ bootstrap_extra_args = <<-EOT
+ # extra args added
+ [settings.kernel]
+ lockdown = "integrity"
+ EOT
+}
+
+module "self_mng_bottlerocket_custom_template" {
+ source = "../../modules/_user_data"
+
+ platform = "bottlerocket"
+
+ enable_bootstrap_user_data = true
+ is_eks_managed_node_group = false
+
+ cluster_name = local.name
+ cluster_endpoint = local.cluster_endpoint
+ cluster_auth_base64 = local.cluster_auth_base64
+
+ user_data_template_path = "${path.module}/templates/bottlerocket_custom.tpl"
+
+ bootstrap_extra_args = <<-EOT
+ # extra args added
+ [settings.kernel]
+ lockdown = "integrity"
+ EOT
+}
+
+# Self managed node group - windows
+module "self_mng_windows_no_op" {
+ source = "../../modules/_user_data"
+
+ platform = "windows"
+
+ is_eks_managed_node_group = false
+}
+
+module "self_mng_windows_bootstrap" {
+ source = "../../modules/_user_data"
+
+ platform = "windows"
+
+ enable_bootstrap_user_data = true
+ is_eks_managed_node_group = false
+
+ cluster_name = local.name
+ cluster_endpoint = local.cluster_endpoint
+ cluster_auth_base64 = local.cluster_auth_base64
+
+ pre_bootstrap_user_data = <<-EOT
+ [string]$Something = 'IDoNotKnowAnyPowerShell ¯\_(ツ)_/¯'
+ EOT
+ # I don't know if this is the right way on WindowsOS, but its just a string check here anyways
+ bootstrap_extra_args = "-KubeletExtraArgs --node-labels=node.kubernetes.io/lifecycle=spot"
+
+ post_bootstrap_user_data = <<-EOT
+ [string]$Something = 'IStillDoNotKnowAnyPowerShell ¯\_(ツ)_/¯'
+ EOT
+}
+
+module "self_mng_windows_custom_template" {
+ source = "../../modules/_user_data"
+
+ platform = "windows"
+
+ enable_bootstrap_user_data = true
+ is_eks_managed_node_group = false
+
+ cluster_name = local.name
+ cluster_endpoint = local.cluster_endpoint
+ cluster_auth_base64 = local.cluster_auth_base64
+
+ user_data_template_path = "${path.module}/templates/windows_custom.tpl"
+
+ pre_bootstrap_user_data = <<-EOT
+ [string]$Something = 'IDoNotKnowAnyPowerShell ¯\_(ツ)_/¯'
+ EOT
+ # I don't know if this is the right way on WindowsOS, but its just a string check here anyways
+ bootstrap_extra_args = "-KubeletExtraArgs --node-labels=node.kubernetes.io/lifecycle=spot"
+
+ post_bootstrap_user_data = <<-EOT
+ [string]$Something = 'IStillDoNotKnowAnyPowerShell ¯\_(ツ)_/¯'
+ EOT
+}
diff --git a/examples/user_data/outputs.tf b/examples/user_data/outputs.tf
index d9f1f5cbf2..dd2c3407e1 100644
--- a/examples/user_data/outputs.tf
+++ b/examples/user_data/outputs.tf
@@ -39,3 +39,51 @@ output "eks_mng_bottlerocket_custom_template" {
description = "Base64 decoded user data rendered for the provided inputs"
value = base64decode(module.eks_mng_bottlerocket_custom_template.user_data)
}
+
+# Self managed node group - linux
+output "self_mng_linux_no_op" {
+ description = "Base64 decoded user data rendered for the provided inputs"
+ value = base64decode(module.self_mng_linux_no_op.user_data)
+}
+
+output "self_mng_linux_bootstrap" {
+ description = "Base64 decoded user data rendered for the provided inputs"
+ value = base64decode(module.self_mng_linux_bootstrap.user_data)
+}
+
+output "self_mng_linux_custom_template" {
+ description = "Base64 decoded user data rendered for the provided inputs"
+ value = base64decode(module.self_mng_linux_custom_template.user_data)
+}
+
+# Self managed node group - bottlerocket
+output "self_mng_bottlerocket_no_op" {
+ description = "Base64 decoded user data rendered for the provided inputs"
+ value = base64decode(module.self_mng_bottlerocket_no_op.user_data)
+}
+
+output "self_mng_bottlerocket_bootstrap" {
+ description = "Base64 decoded user data rendered for the provided inputs"
+ value = base64decode(module.self_mng_bottlerocket_bootstrap.user_data)
+}
+
+output "self_mng_bottlerocket_custom_template" {
+ description = "Base64 decoded user data rendered for the provided inputs"
+ value = base64decode(module.self_mng_bottlerocket_custom_template.user_data)
+}
+
+# Self managed node group - windows
+output "self_mng_windows_no_op" {
+ description = "Base64 decoded user data rendered for the provided inputs"
+ value = base64decode(module.self_mng_windows_no_op.user_data)
+}
+
+output "self_mng_windows_bootstrap" {
+ description = "Base64 decoded user data rendered for the provided inputs"
+ value = base64decode(module.self_mng_windows_bootstrap.user_data)
+}
+
+output "self_mng_windows_custom_template" {
+ description = "Base64 decoded user data rendered for the provided inputs"
+ value = base64decode(module.self_mng_windows_custom_template.user_data)
+}
diff --git a/examples/user_data/templates/linux_custom.tpl b/examples/user_data/templates/linux_custom.tpl
index c1c4955bc8..bfe21f117a 100644
--- a/examples/user_data/templates/linux_custom.tpl
+++ b/examples/user_data/templates/linux_custom.tpl
@@ -1,8 +1,10 @@
#!/bin/bash
set -ex
+${pre_bootstrap_user_data ~}
+
# Custom user data template provided for rendering
B64_CLUSTER_CA=${cluster_auth_base64}
API_SERVER_URL=${cluster_endpoint}
/etc/eks/bootstrap.sh ${cluster_name} ${bootstrap_extra_args} --b64-cluster-ca $B64_CLUSTER_CA --apiserver-endpoint $API_SERVER_URL
-${post_bootstrap_user_data}
+${post_bootstrap_user_data ~}
diff --git a/examples/user_data/templates/windows_custom.tpl b/examples/user_data/templates/windows_custom.tpl
index c76c27a63e..ab64984135 100644
--- a/examples/user_data/templates/windows_custom.tpl
+++ b/examples/user_data/templates/windows_custom.tpl
@@ -1,9 +1,10 @@
+# Custom user data template provided for rendering
-${pre_bootstrap_user_data}
+${pre_bootstrap_user_data ~}
[string]$EKSBinDir = "$env:ProgramFiles\Amazon\EKS"
[string]$EKSBootstrapScriptName = 'Start-EKSBootstrap.ps1'
[string]$EKSBootstrapScriptFile = "$EKSBinDir\$EKSBootstrapScriptName"
& $EKSBootstrapScriptFile -EKSClusterName ${cluster_name} ${bootstrap_extra_args} 3>&1 4>&1 5>&1 6>&1
$LastError = if ($?) { 0 } else { $Error[0].Exception.HResult }
-${post_bootstrap_user_data}
+${post_bootstrap_user_data ~}
diff --git a/modules/_user_data/README.md b/modules/_user_data/README.md
index 38dab46a36..7068e41c19 100644
--- a/modules/_user_data/README.md
+++ b/modules/_user_data/README.md
@@ -45,7 +45,7 @@ No modules.
| [cluster\_endpoint](#input\_cluster\_endpoint) | Endpoint of associated EKS cluster | `string` | `""` | no |
| [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster and default name (prefix) used throughout the resources created | `string` | `""` | no |
| [create](#input\_create) | Determines whether to create EKS managed node group or not | `bool` | `true` | no |
-| [is\_custom\_ami](#input\_is\_custom\_ami) | Determines whether a custom AMI is used or the default AMI in the case of EKS managed node groups | `bool` | `false` | no |
+| [enable\_bootstrap\_user\_data](#input\_enable\_bootstrap\_user\_data) | Determines whether the bootstrap configurations are populated within the user data template | `bool` | `false` | no |
| [is\_eks\_managed\_node\_group](#input\_is\_eks\_managed\_node\_group) | Determines whether the user data is used on nodes in an EKS managed node group | `bool` | `true` | no |
| [platform](#input\_platform) | Identifies if the OS platform is `bottlerocket`, `linux`, or `windows` based | `string` | `"linux"` | no |
| [post\_bootstrap\_user\_data](#input\_post\_bootstrap\_user\_data) | User data that is appended to the user data script after of the EKS bootstrap script. Only valid when using a custom EKS optimized AMI derivative | `string` | `""` | no |
diff --git a/modules/_user_data/main.tf b/modules/_user_data/main.tf
index f9e453ce35..f2c2854424 100644
--- a/modules/_user_data/main.tf
+++ b/modules/_user_data/main.tf
@@ -1,10 +1,10 @@
locals {
- int_linux_default_user_data = var.create && var.platform == "linux" && (var.is_custom_ami || var.user_data_template_path != "") ? base64encode(templatefile(
+ int_linux_default_user_data = var.create && var.platform == "linux" && (var.enable_bootstrap_user_data || var.user_data_template_path != "") ? base64encode(templatefile(
coalesce(var.user_data_template_path, "${path.module}/../../templates/linux_user_data.tpl"),
{
# https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-custom-ami
- is_custom_ami = var.is_custom_ami
+ enable_bootstrap_user_data = var.enable_bootstrap_user_data
# Required to bootstrap node
cluster_name = var.cluster_name
cluster_endpoint = var.cluster_endpoint
@@ -17,11 +17,11 @@ locals {
)) : ""
platform = {
bottlerocket = {
- user_data = var.create && var.platform == "bottlerocket" ? base64encode(templatefile(
+ user_data = var.create && var.platform == "bottlerocket" && var.enable_bootstrap_user_data ? base64encode(templatefile(
coalesce(var.user_data_template_path, "${path.module}/../../templates/bottlerocket_user_data.tpl"),
{
# https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-custom-ami
- is_custom_ami = var.is_custom_ami
+ enable_bootstrap_user_data = var.enable_bootstrap_user_data
# Required to bootstrap node
cluster_name = var.cluster_name
cluster_endpoint = var.cluster_endpoint
@@ -36,7 +36,7 @@ locals {
}
windows = {
- user_data = var.create && var.platform == "windows" ? base64encode(templatefile(
+ user_data = var.create && var.platform == "windows" && var.enable_bootstrap_user_data ? base64encode(templatefile(
coalesce(var.user_data_template_path, "${path.module}/../../templates/windows_user_data.tpl"),
{
# Required to bootstrap node
@@ -61,7 +61,7 @@ locals {
# See docs for more details -> https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-user-data
data "cloudinit_config" "linux_eks_managed_node_group" {
- count = var.create && var.platform == "linux" && var.is_eks_managed_node_group && !var.is_custom_ami && var.pre_bootstrap_user_data != "" && var.user_data_template_path == "" ? 1 : 0
+ count = var.create && var.platform == "linux" && var.is_eks_managed_node_group && !var.enable_bootstrap_user_data && var.pre_bootstrap_user_data != "" && var.user_data_template_path == "" ? 1 : 0
base64_encode = true
gzip = false
diff --git a/modules/_user_data/variables.tf b/modules/_user_data/variables.tf
index 04a157c6df..ff4c2c7526 100644
--- a/modules/_user_data/variables.tf
+++ b/modules/_user_data/variables.tf
@@ -10,8 +10,8 @@ variable "platform" {
default = "linux"
}
-variable "is_custom_ami" {
- description = "Determines whether a custom AMI is used or the default AMI in the case of EKS managed node groups"
+variable "enable_bootstrap_user_data" {
+ description = "Determines whether the bootstrap configurations are populated within the user data template"
type = bool
default = false
}
diff --git a/templates/bottlerocket_user_data.tpl b/templates/bottlerocket_user_data.tpl
index dc10dcb41f..640c801438 100644
--- a/templates/bottlerocket_user_data.tpl
+++ b/templates/bottlerocket_user_data.tpl
@@ -1,4 +1,4 @@
-%{ if is_custom_ami ~}
+%{ if enable_bootstrap_user_data ~}
[settings.kubernetes]
"cluster-name" = "${cluster_name}"
"api-server" = "${cluster_endpoint}"
diff --git a/templates/linux_user_data.tpl b/templates/linux_user_data.tpl
index 63b06a1a42..d6f367984e 100644
--- a/templates/linux_user_data.tpl
+++ b/templates/linux_user_data.tpl
@@ -1,11 +1,11 @@
-%{ if is_custom_ami ~}
+%{ if enable_bootstrap_user_data ~}
#!/bin/bash
set -e
%{ endif ~}
${pre_bootstrap_user_data ~}
-%{ if is_custom_ami ~}
+%{ if enable_bootstrap_user_data ~}
B64_CLUSTER_CA=${cluster_auth_base64}
API_SERVER_URL=${cluster_endpoint}
/etc/eks/bootstrap.sh ${cluster_name} ${bootstrap_extra_args} --b64-cluster-ca $B64_CLUSTER_CA --apiserver-endpoint $API_SERVER_URL
-${post_bootstrap_user_data}
+${post_bootstrap_user_data ~}
%{ endif ~}
diff --git a/templates/windows_user_data.tpl b/templates/windows_user_data.tpl
index c76c27a63e..47b2feca70 100644
--- a/templates/windows_user_data.tpl
+++ b/templates/windows_user_data.tpl
@@ -1,9 +1,9 @@
-${pre_bootstrap_user_data}
+${pre_bootstrap_user_data ~}
[string]$EKSBinDir = "$env:ProgramFiles\Amazon\EKS"
[string]$EKSBootstrapScriptName = 'Start-EKSBootstrap.ps1'
[string]$EKSBootstrapScriptFile = "$EKSBinDir\$EKSBootstrapScriptName"
& $EKSBootstrapScriptFile -EKSClusterName ${cluster_name} ${bootstrap_extra_args} 3>&1 4>&1 5>&1 6>&1
$LastError = if ($?) { 0 } else { $Error[0].Exception.HResult }
-${post_bootstrap_user_data}
+${post_bootstrap_user_data ~}
From d6e27b5ba67c0a0cb556acb6e12140dc71418055 Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Fri, 3 Dec 2021 11:04:00 -0500
Subject: [PATCH 46/83] refactor: incorporate new user data internal module and
provide documentation on its usage and patterns
---
.github/images/user_data.svg | 1 +
examples/complete/main.tf | 20 +++--
examples/eks_managed_node_group/main.tf | 12 +--
examples/self_managed_node_group/main.tf | 1 -
modules/_user_data/README.md | 75 ++++++++++++++++++-
modules/eks-managed-node-group/README.md | 14 ++--
modules/eks-managed-node-group/main.tf | 79 ++++----------------
modules/eks-managed-node-group/variables.tf | 22 +++---
modules/self-managed-node-group/README.md | 13 ++--
modules/self-managed-node-group/main.tf | 72 +++++-------------
modules/self-managed-node-group/variables.tf | 22 ++----
node_groups.tf | 31 ++++----
12 files changed, 172 insertions(+), 190 deletions(-)
create mode 100644 .github/images/user_data.svg
diff --git a/.github/images/user_data.svg b/.github/images/user_data.svg
new file mode 100644
index 0000000000..daa2ea33f3
--- /dev/null
+++ b/.github/images/user_data.svg
@@ -0,0 +1 @@
+
diff --git a/examples/complete/main.tf b/examples/complete/main.tf
index 6817b3868a..b1ac281531 100644
--- a/examples/complete/main.tf
+++ b/examples/complete/main.tf
@@ -59,6 +59,11 @@ module "eks" {
one = {
name = "spot-1"
+ public_ip = true
+
+ max_size = 5
+ desired_size = 2
+
use_mixed_instances_policy = true
mixed_instances_policy = {
instances_distribution = {
@@ -70,25 +75,28 @@ module "eks" {
override = [
{
instance_type = "m5.large"
- weighted_capacity = "2"
+ weighted_capacity = "1"
},
{
instance_type = "m6i.large"
- weighted_capacity = "1"
+ weighted_capacity = "2"
},
]
}
- max_size = 5
- desired_size = 5
- bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"
+ pre_bootstrap_user_data = <<-EOT
+ echo "foo"
+ export FOO=bar
+ EOT
+
+ bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"
+
post_bootstrap_user_data = <<-EOT
cd /tmp
sudo yum install -y https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_amd64/amazon-ssm-agent.rpm
sudo systemctl enable amazon-ssm-agent
sudo systemctl start amazon-ssm-agent
EOT
- public_ip = true
}
}
diff --git a/examples/eks_managed_node_group/main.tf b/examples/eks_managed_node_group/main.tf
index 3464532456..21e74ee676 100644
--- a/examples/eks_managed_node_group/main.tf
+++ b/examples/eks_managed_node_group/main.tf
@@ -70,6 +70,8 @@ module "eks" {
launch_template_name = "bottlerocket-custom"
update_default_version = true
+ enable_bootstrap_user_data = true
+
bootstrap_extra_args = <<-EOT
[settings.kernel]
lockdown = "integrity"
@@ -98,10 +100,11 @@ module "eks" {
# Current default AMI used by managed node groups - pseudo "custom"
ami_id = "ami-0caf35bc73450c396"
+
# This will ensure the boostrap user data is used to join the node
# By default, EKS managed node groups will not append bootstrap script;
# this adds it back in if its an EKS optmized AMI derivative
- ami_is_eks_optimized = true
+ enable_bootstrap_user_data = true
}
# Complete
@@ -115,12 +118,11 @@ module "eks" {
max_size = 7
desired_size = 1
- ami_id = "ami-0caf35bc73450c396"
- ami_is_eks_optimized = true
- bootstrap_extra_args = "--container-runtime containerd --kubelet-extra-args '--max-pods=20'"
+ ami_id = "ami-0caf35bc73450c396"
+ enable_bootstrap_user_data = true
+ bootstrap_extra_args = "--container-runtime containerd --kubelet-extra-args '--max-pods=20'"
pre_bootstrap_user_data = <<-EOT
- #!/bin/bash set -ex
export CONTAINER_RUNTIME="containerd"
export USE_MAX_PODS=false
EOT
diff --git a/examples/self_managed_node_group/main.tf b/examples/self_managed_node_group/main.tf
index 54fd3303d4..6bd740448b 100644
--- a/examples/self_managed_node_group/main.tf
+++ b/examples/self_managed_node_group/main.tf
@@ -93,7 +93,6 @@ module "eks" {
bootstrap_extra_args = "--kubelet-extra-args '--max-pods=110'"
pre_bootstrap_user_data = <<-EOT
- #!/bin/bash set -ex
export CONTAINER_RUNTIME="containerd"
export USE_MAX_PODS=false
EOT
diff --git a/modules/_user_data/README.md b/modules/_user_data/README.md
index 7068e41c19..f7502ae095 100644
--- a/modules/_user_data/README.md
+++ b/modules/_user_data/README.md
@@ -1,6 +1,77 @@
# Internal User Data Module
-Configuration in this directory configures the necessary user data for launching nodes
+Configuration in this directory renderes the appropriate user data for the given inputs. There are a number of different ways that user data can be utilized and this internal module is designed to aid in making that flexibility possible without over complicating and duplicating code.
+
+## Combinations
+
+At a high level, we have two methods for launching nodes within the EKS module:
+
+1. EKS managed node group
+2. Self managed node group
+
+### EKS Managed Node Group
+
+Within the EKS managed node group, users have 2 methods of user data populating
+
+ℹ️ Note: When using bottlerocket as the desired platform, since the user data for bottlerocket is TOML, all configurations are merged in the one file supplied as user data. Therefore, `pre_bootstra_user_data` and `post_bootstrap_user_data` are not valid since the bottlerocket OS handles when various settings are applied. If you wish to supply additional configuration settings when using bottlerocket, supply them via the `bootstrap_extra_args` variable. For the linux platform, `bootstrap_extra_args` are settings that will be supplied to the [AWS EKS Optimized AMI boostrap script](https://github.com/awslabs/amazon-eks-ami/blob/master/files/bootstrap.sh#L14) such as kubelet extra args, etc.
+
+1. If the EKS managed node group does not use a custom AMI, the EKS managed node group is responsible for the AMI used, then users can elect to supply additional user data that is pre-pended before the EKS managed node group bootstrap user data. You can read more about this process from the [AWS supplied documentation](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-user-data)
+
+ - Users can use the following two variables to supplement this process:
+
+ ```hcl
+ pre_bootstrap_user_data = "..."
+ bootstrap_extra_args = "..."
+ ```
+
+2. If the EKS managed node group utilizes a custom AMI, then per the [AWS documentation](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-custom-ami), users will need to supply the necessary bootstrap configuration via user data to ensure that the node is configured to register with the cluster upon launch. There are two routes that users can utilize to facilitate this bootstrapping process:
+ - If the AMI used is a derivative of the [AWS EKS Optimized AMI ](https://github.com/awslabs/amazon-eks-ami), users can opt in to using a template provided by the module that provides the minimum necessary configuration to bootstrap the node when launched, with the option to add additional pre and post boostrap user data as well as bootstrap additional args that are supplied to the [AWS EKS boostrap.sh script](https://github.com/awslabs/amazon-eks-ami/blob/master/files/bootstrap.sh)
+ - Users can use the following variables to facilitate this process:
+ ```hcl
+ enable_bootstrap_user_data = true # to opt in to using the module supplied bootstrap user data template
+ pre_bootstrap_user_data = "..."
+ bootstrap_extra_args = "..."
+ post_bootstrap_user_data = "..."
+ ```
+ - If the AMI is not an AWS EKS Optmized AMI derivative, or if users wish to have more control over the user data that is supplied to the node upon launch, users have the ability to supply their own user data template that will be rendered instead of the module supplied template. Note - only the variables that are supplied to the `templatefile()` for the respective platform/OS are available for use in the supplied template, otherwise users will need to pre-render/pre-populate the template before supplying the final template to the module for rendering as user data.
+ - Users can use the following variables to faciliate this process:
+ ```hcl
+ user_data_template_path = "./your/user_data.sh" # user supplied bootstrap user data template
+ pre_bootstrap_user_data = "..."
+ bootstrap_extra_args = "..."
+ post_bootstrap_user_data = "..."
+ ```
+
+### Self Managed Node Group
+
+Within the self managed node group, the options presented to users is very similar to the 2nd option listed above for EKS managed node groups. Because self managed node groups require users to provide the bootstrap user data, there is no concept of appending to user data that AWS provides, users can either elect to use the user data template provided for their platform/OS by the module or provide their own user data template for rendering by the module.
+
+- If the AMI used is a derivative of the [AWS EKS Optimized AMI ](https://github.com/awslabs/amazon-eks-ami), users can opt in to using a template provided by the module that provides the minimum necessary configuration to bootstrap the node when launched, with the option to add additional pre and post boostrap user data as well as bootstrap additional args that are supplied to the [AWS EKS boostrap.sh script](https://github.com/awslabs/amazon-eks-ami/blob/master/files/bootstrap.sh)
+ - Users can use the following variables to facilitate this process:
+ ```hcl
+ enable_bootstrap_user_data = true # to opt in to using the module supplied bootstrap user data template
+ pre_bootstrap_user_data = "..."
+ bootstrap_extra_args = "..."
+ post_bootstrap_user_data = "..."
+ ```
+- If the AMI is not an AWS EKS Optmized AMI derivative, or if users wish to have more control over the user data that is supplied to the node upon launch, users have the ability to supply their own user data template that will be rendered instead of the module supplied template. Note - only the variables that are supplied to the `templatefile()` for the respective platform/OS are available for use in the supplied template, otherwise users will need to pre-render/pre-populate the template before supplying the final template to the module for rendering as user data.
+ - Users can use the following variables to faciliate this process:
+ ```hcl
+ user_data_template_path = "./your/user_data.sh" # user supplied bootstrap user data template
+ pre_bootstrap_user_data = "..."
+ bootstrap_extra_args = "..."
+ post_bootstrap_user_data = "..."
+ ```
+
+### Logic Diagram
+
+The rough flow of logic that is encapsulated within the `_user_data` internal module can be represented by the following diagram to better highlight the various manners in which user data can be populated.
+
+
+
+
+
+
## Usage
@@ -10,7 +81,7 @@ To run this example you need to execute:
$ terraform init
$ terraform plan
$ terraform apply
-```
+````
## Requirements
diff --git a/modules/eks-managed-node-group/README.md b/modules/eks-managed-node-group/README.md
index ce0d754a20..6e23482db4 100644
--- a/modules/eks-managed-node-group/README.md
+++ b/modules/eks-managed-node-group/README.md
@@ -60,11 +60,12 @@ If you use a custom AMI, you need to supply via user-data, the bootstrap script
| Name | Version |
|------|---------|
| [aws](#provider\_aws) | >= 3.64 |
-| [cloudinit](#provider\_cloudinit) | >= 2.0 |
## Modules
-No modules.
+| Name | Source | Version |
+|------|--------|---------|
+| [user\_data](#module\_user\_data) | ../_user_data | n/a |
## Resources
@@ -78,22 +79,20 @@ No modules.
| [aws_security_group_rule.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
| [aws_iam_policy_document.assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
| [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source |
-| [cloudinit_config.eks_optimized_ami_user_data](https://registry.terraform.io/providers/hashicorp/cloudinit/latest/docs/data-sources/config) | data source |
## Inputs
| Name | Description | Type | Default | Required |
|------|-------------|------|---------|:--------:|
| [ami\_id](#input\_ami\_id) | The AMI from which to launch the instance. If not supplied, EKS will use its own default image | `string` | `""` | no |
-| [ami\_is\_eks\_optimized](#input\_ami\_is\_eks\_optimized) | Determines whether the AMI ID provided (`ami_id`) is an EKS optimized AMI derivative or not; if `true` then the module will add the boostrap user data | `bool` | `true` | no |
| [ami\_release\_version](#input\_ami\_release\_version) | AMI version of the EKS Node Group. Defaults to latest version for Kubernetes version | `string` | `null` | no |
| [ami\_type](#input\_ami\_type) | Type of Amazon Machine Image (AMI) associated with the EKS Node Group. Valid values are `AL2_x86_64`, `AL2_x86_64_GPU`, `AL2_ARM_64`, `CUSTOM`, `BOTTLEROCKET_ARM_64`, `BOTTLEROCKET_x86_64` | `string` | `null` | no |
| [block\_device\_mappings](#input\_block\_device\_mappings) | Specify volumes to attach to the instance besides the volumes specified by the AMI | `any` | `{}` | no |
| [bootstrap\_extra\_args](#input\_bootstrap\_extra\_args) | Additional arguments passed to the bootstrap script | `string` | `""` | no |
| [capacity\_reservation\_specification](#input\_capacity\_reservation\_specification) | Targeting for EC2 capacity reservations | `any` | `null` | no |
| [capacity\_type](#input\_capacity\_type) | Type of capacity associated with the EKS Node Group. Valid values: `ON_DEMAND`, `SPOT` | `string` | `"ON_DEMAND"` | no |
-| [cluster\_auth\_base64](#input\_cluster\_auth\_base64) | Base64 encoded CA of associated EKS cluster | `string` | `null` | no |
-| [cluster\_endpoint](#input\_cluster\_endpoint) | Endpoint of associated EKS cluster | `string` | `null` | no |
+| [cluster\_auth\_base64](#input\_cluster\_auth\_base64) | Base64 encoded CA of associated EKS cluster | `string` | `""` | no |
+| [cluster\_endpoint](#input\_cluster\_endpoint) | Endpoint of associated EKS cluster | `string` | `""` | no |
| [cluster\_name](#input\_cluster\_name) | Name of associated EKS cluster | `string` | `null` | no |
| [cluster\_security\_group\_id](#input\_cluster\_security\_group\_id) | Cluster control plain security group ID | `string` | `null` | no |
| [cluster\_version](#input\_cluster\_version) | Kubernetes version. Defaults to EKS Cluster Kubernetes version | `string` | `null` | no |
@@ -103,7 +102,6 @@ No modules.
| [create\_launch\_template](#input\_create\_launch\_template) | Determines whether to create launch template or not | `bool` | `false` | no |
| [create\_security\_group](#input\_create\_security\_group) | Whether to create a security group | `bool` | `true` | no |
| [credit\_specification](#input\_credit\_specification) | Customize the credit specification of the instance | `map(string)` | `null` | no |
-| [custom\_user\_data](#input\_custom\_user\_data) | Base64-encoded user data used; should be used when `ami_is_eks_optimized` = `false` to boostrap and join instances to the cluster | `string` | `""` | no |
| [default\_version](#input\_default\_version) | Default Version of the launch template | `string` | `null` | no |
| [description](#input\_description) | Description of the launch template | `string` | `null` | no |
| [desired\_size](#input\_desired\_size) | Desired number of worker nodes | `number` | `1` | no |
@@ -112,6 +110,7 @@ No modules.
| [ebs\_optimized](#input\_ebs\_optimized) | If true, the launched EC2 instance will be EBS-optimized | `bool` | `null` | no |
| [elastic\_gpu\_specifications](#input\_elastic\_gpu\_specifications) | The elastic GPU to attach to the instance | `map(string)` | `null` | no |
| [elastic\_inference\_accelerator](#input\_elastic\_inference\_accelerator) | Configuration block containing an Elastic Inference Accelerator to attach to the instance | `map(string)` | `null` | no |
+| [enable\_bootstrap\_user\_data](#input\_enable\_bootstrap\_user\_data) | Determines whether the bootstrap configurations are populated within the user data template | `bool` | `false` | no |
| [enable\_monitoring](#input\_enable\_monitoring) | Enables/disables detailed monitoring | `bool` | `null` | no |
| [enclave\_options](#input\_enclave\_options) | Enable Nitro Enclaves on launched instances | `map(string)` | `null` | no |
| [force\_update\_version](#input\_force\_update\_version) | Force version update if existing pods are unable to be drained due to a pod disruption budget issue | `bool` | `null` | no |
@@ -155,6 +154,7 @@ No modules.
| [update\_config](#input\_update\_config) | Configuration block of settings for max unavailable resources during node group updates | `map(string)` | `{}` | no |
| [update\_default\_version](#input\_update\_default\_version) | Whether to update Default Version each update. Conflicts with `default_version` | `bool` | `true` | no |
| [use\_name\_prefix](#input\_use\_name\_prefix) | Determines whether to use `name` as is or create a unique name beginning with the `name` as the prefix | `bool` | `true` | no |
+| [user\_data\_template\_path](#input\_user\_data\_template\_path) | Path to a local, custom user data template file to use when rendering user data | `string` | `""` | no |
| [vpc\_id](#input\_vpc\_id) | ID of the VPC where the security group/nodes will be provisioned | `string` | `null` | no |
| [vpc\_security\_group\_ids](#input\_vpc\_security\_group\_ids) | A list of security group IDs to associate | `list(string)` | `null` | no |
diff --git a/modules/eks-managed-node-group/main.tf b/modules/eks-managed-node-group/main.tf
index 7cc9825745..a2a5aa7688 100644
--- a/modules/eks-managed-node-group/main.tf
+++ b/modules/eks-managed-node-group/main.tf
@@ -4,74 +4,21 @@ data "aws_partition" "current" {}
# User Data
################################################################################
-locals {
- platform = {
- bottlerocket = {
- user_data = var.custom_user_data != "" ? var.custom_user_data : base64encode(templatefile(
- "${path.module}/../../templates/bottlerocket_user_data.tpl",
- {
- ami_id = var.ami_id
- # Required to bootstrap node
- cluster_name = var.cluster_name
- # https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-custom-ami
- cluster_endpoint = var.cluster_endpoint
- cluster_auth_base64 = var.cluster_auth_base64
- # Optional
- bootstrap_extra_args = var.bootstrap_extra_args
- }
- ))
- }
- linux = {
- user_data = var.custom_user_data != "" ? var.custom_user_data : try(data.cloudinit_config.eks_optimized_ami_user_data[0].rendered, "")
- }
- # Not supported on EKS managed node groups
- # windows = {}
- }
-}
+module "user_data" {
+ source = "../_user_data"
-# https://github.com/aws/containers-roadmap/issues/596#issuecomment-675097667
-# An important note is that user data must in MIME multi-part archive format,
-# as by default, EKS will merge the bootstrapping command required for nodes to join the
-# cluster with your user data. If you use a custom AMI in your launch template,
-# this merging will NOT happen and you are responsible for nodes joining the cluster.
-# See docs for more details -> https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-user-data
+ create = var.create
+ platform = var.platform
-data "cloudinit_config" "eks_optimized_ami_user_data" {
- count = var.create && var.platform == "linux" && ((local.use_custom_launch_template && var.pre_bootstrap_user_data != "") || (var.ami_id != "" && var.ami_is_eks_optimized)) ? 1 : 0
+ cluster_name = var.cluster_name
+ cluster_endpoint = var.cluster_endpoint
+ cluster_auth_base64 = var.cluster_auth_base64
- base64_encode = true
- gzip = false
- boundary = "//"
-
- # Prepend to existing user data suppled by AWS EKS
- dynamic "part" {
- for_each = var.pre_bootstrap_user_data != "" ? [1] : []
- content {
- content_type = "text/x-shellscript"
- content = var.pre_bootstrap_user_data
- }
- }
-
- # Supply all of bootstrap user data due to custom AMI
- dynamic "part" {
- for_each = var.ami_id != "" && var.ami_is_eks_optimized ? [1] : []
- content {
- content_type = "text/x-shellscript"
- content = templatefile("${path.module}/../../templates/linux_user_data.tpl",
- {
- ami_id = var.ami_id
- # Required to bootstrap node
- cluster_name = var.cluster_name
- # https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-custom-ami
- cluster_endpoint = var.cluster_endpoint
- cluster_auth_base64 = var.cluster_auth_base64
- # Optional
- bootstrap_extra_args = var.bootstrap_extra_args
- post_bootstrap_user_data = var.post_bootstrap_user_data
- }
- )
- }
- }
+ enable_bootstrap_user_data = var.enable_bootstrap_user_data
+ pre_bootstrap_user_data = var.pre_bootstrap_user_data
+ post_bootstrap_user_data = var.post_bootstrap_user_data
+ bootstrap_extra_args = var.bootstrap_extra_args
+ user_data_template_path = var.user_data_template_path
}
################################################################################
@@ -95,7 +42,7 @@ resource "aws_launch_template" "this" {
# # Set on node group instead
# instance_type = var.launch_template_instance_type
key_name = var.key_name
- user_data = local.platform[var.platform].user_data
+ user_data = module.user_data.user_data
vpc_security_group_ids = compact(concat([try(aws_security_group.this[0].id, "")], var.vpc_security_group_ids))
diff --git a/modules/eks-managed-node-group/variables.tf b/modules/eks-managed-node-group/variables.tf
index b0c6cfff51..7bc5913eb1 100644
--- a/modules/eks-managed-node-group/variables.tf
+++ b/modules/eks-managed-node-group/variables.tf
@@ -20,28 +20,22 @@ variable "platform" {
# User Data
################################################################################
-variable "custom_user_data" {
- description = "Base64-encoded user data used; should be used when `ami_is_eks_optimized` = `false` to boostrap and join instances to the cluster"
- type = string
- default = ""
-}
-
-variable "ami_is_eks_optimized" {
- description = "Determines whether the AMI ID provided (`ami_id`) is an EKS optimized AMI derivative or not; if `true` then the module will add the boostrap user data"
+variable "enable_bootstrap_user_data" {
+ description = "Determines whether the bootstrap configurations are populated within the user data template"
type = bool
- default = true
+ default = false
}
variable "cluster_endpoint" {
description = "Endpoint of associated EKS cluster"
type = string
- default = null
+ default = ""
}
variable "cluster_auth_base64" {
description = "Base64 encoded CA of associated EKS cluster"
type = string
- default = null
+ default = ""
}
variable "pre_bootstrap_user_data" {
@@ -62,6 +56,12 @@ variable "bootstrap_extra_args" {
default = ""
}
+variable "user_data_template_path" {
+ description = "Path to a local, custom user data template file to use when rendering user data"
+ type = string
+ default = ""
+}
+
################################################################################
# Launch template
################################################################################
diff --git a/modules/self-managed-node-group/README.md b/modules/self-managed-node-group/README.md
index 04c643fe7e..154b915a76 100644
--- a/modules/self-managed-node-group/README.md
+++ b/modules/self-managed-node-group/README.md
@@ -26,11 +26,12 @@ $ terraform apply
| Name | Version |
|------|---------|
| [aws](#provider\_aws) | >= 3.64 |
-| [cloudinit](#provider\_cloudinit) | >= 2.0 |
## Modules
-No modules.
+| Name | Source | Version |
+|------|--------|---------|
+| [user\_data](#module\_user\_data) | ../_user_data | n/a |
## Resources
@@ -47,7 +48,6 @@ No modules.
| [aws_ami.eks_default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source |
| [aws_iam_policy_document.assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
| [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source |
-| [cloudinit_config.eks_optimized_ami_user_data](https://registry.terraform.io/providers/hashicorp/cloudinit/latest/docs/data-sources/config) | data source |
## Inputs
@@ -59,8 +59,8 @@ No modules.
| [bootstrap\_extra\_args](#input\_bootstrap\_extra\_args) | Additional arguments passed to the bootstrap script | `string` | `""` | no |
| [capacity\_rebalance](#input\_capacity\_rebalance) | Indicates whether capacity rebalance is enabled | `bool` | `null` | no |
| [capacity\_reservation\_specification](#input\_capacity\_reservation\_specification) | Targeting for EC2 capacity reservations | `any` | `null` | no |
-| [cluster\_auth\_base64](#input\_cluster\_auth\_base64) | Base64 encoded CA of associated EKS cluster | `string` | `null` | no |
-| [cluster\_endpoint](#input\_cluster\_endpoint) | Endpoint of associated EKS cluster | `string` | `null` | no |
+| [cluster\_auth\_base64](#input\_cluster\_auth\_base64) | Base64 encoded CA of associated EKS cluster | `string` | `""` | no |
+| [cluster\_endpoint](#input\_cluster\_endpoint) | Endpoint of associated EKS cluster | `string` | `""` | no |
| [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster that the node group will be associated with | `string` | `null` | no |
| [cluster\_security\_group\_id](#input\_cluster\_security\_group\_id) | Cluster control plain security group ID | `string` | `null` | no |
| [cluster\_version](#input\_cluster\_version) | Kubernetes cluster version - used to lookup default AMI ID if one is not provided | `string` | `null` | no |
@@ -71,7 +71,6 @@ No modules.
| [create\_schedule](#input\_create\_schedule) | Determines whether to create autoscaling group schedule or not | `bool` | `true` | no |
| [create\_security\_group](#input\_create\_security\_group) | Whether to create a security group | `bool` | `true` | no |
| [credit\_specification](#input\_credit\_specification) | Customize the credit specification of the instance | `map(string)` | `null` | no |
-| [custom\_user\_data](#input\_custom\_user\_data) | Base64-encoded user data used; should be used when `ami_is_eks_optimized` = `false` to boostrap and join instances to the cluster | `string` | `""` | no |
| [default\_cooldown](#input\_default\_cooldown) | The amount of time, in seconds, after a scaling activity completes before another scaling activity can start | `number` | `null` | no |
| [default\_version](#input\_default\_version) | Default Version of the launch template | `string` | `null` | no |
| [delete\_timeout](#input\_delete\_timeout) | Delete timeout to wait for destroying autoscaling group | `string` | `null` | no |
@@ -81,7 +80,6 @@ No modules.
| [ebs\_optimized](#input\_ebs\_optimized) | If true, the launched EC2 instance will be EBS-optimized | `bool` | `null` | no |
| [elastic\_gpu\_specifications](#input\_elastic\_gpu\_specifications) | The elastic GPU to attach to the instance | `map(string)` | `null` | no |
| [elastic\_inference\_accelerator](#input\_elastic\_inference\_accelerator) | Configuration block containing an Elastic Inference Accelerator to attach to the instance | `map(string)` | `null` | no |
-| [enable\_bootstrap\_user\_data](#input\_enable\_bootstrap\_user\_data) | Determines whether custom bootstrap user data template is used to boostrap node. Enabling this assumes the AMI is an AWS EKS Optimized AMI derivative | `bool` | `true` | no |
| [enable\_monitoring](#input\_enable\_monitoring) | Enables/disables detailed monitoring | `bool` | `null` | no |
| [enabled\_metrics](#input\_enabled\_metrics) | A list of metrics to collect. The allowed values are `GroupDesiredCapacity`, `GroupInServiceCapacity`, `GroupPendingCapacity`, `GroupMinSize`, `GroupMaxSize`, `GroupInServiceInstances`, `GroupPendingInstances`, `GroupStandbyInstances`, `GroupStandbyCapacity`, `GroupTerminatingCapacity`, `GroupTerminatingInstances`, `GroupTotalCapacity`, `GroupTotalInstances` | `list(string)` | `null` | no |
| [enclave\_options](#input\_enclave\_options) | Enable Nitro Enclaves on launched instances | `map(string)` | `null` | no |
@@ -141,6 +139,7 @@ No modules.
| [update\_default\_version](#input\_update\_default\_version) | Whether to update Default Version each update. Conflicts with `default_version` | `string` | `null` | no |
| [use\_mixed\_instances\_policy](#input\_use\_mixed\_instances\_policy) | Determines whether to use a mixed instances policy in the autoscaling group or not | `bool` | `false` | no |
| [use\_name\_prefix](#input\_use\_name\_prefix) | Determines whether to use `name` as is or create a unique name beginning with the `name` as the prefix | `bool` | `true` | no |
+| [user\_data\_template\_path](#input\_user\_data\_template\_path) | Path to a local, custom user data template file to use when rendering user data | `string` | `""` | no |
| [vpc\_id](#input\_vpc\_id) | ID of the VPC where the security group/nodes will be provisioned | `string` | `null` | no |
| [vpc\_security\_group\_ids](#input\_vpc\_security\_group\_ids) | A list of security group IDs to associate | `list(string)` | `null` | no |
| [wait\_for\_capacity\_timeout](#input\_wait\_for\_capacity\_timeout) | A maximum duration that Terraform should wait for ASG instances to be healthy before timing out. (See also Waiting for Capacity below.) Setting this to '0' causes Terraform to skip all Capacity Waiting behavior. | `string` | `null` | no |
diff --git a/modules/self-managed-node-group/main.tf b/modules/self-managed-node-group/main.tf
index 321efc233c..1b21a9d83d 100644
--- a/modules/self-managed-node-group/main.tf
+++ b/modules/self-managed-node-group/main.tf
@@ -16,60 +16,22 @@ data "aws_ami" "eks_default" {
# User Data
################################################################################
-locals {
- platform = {
- bottlerocket = {
- content_type = "application/toml"
- user_data = var.platform == "bottlerocket" ? base64encode(templatefile("${path.module}/../../templates/${var.platform}_user_data.tpl",
- {
- ami_id = var.ami_id
- # Required to bootstrap node
- cluster_name = var.cluster_name
- cluster_endpoint = var.cluster_endpoint
- cluster_auth_base64 = var.cluster_auth_base64
- # Optional
- bootstrap_extra_args = var.bootstrap_extra_args
- }
- )) : ""
- }
- linux = {
- content_type = "text/x-shellscript"
- }
- windows = {
- content_type = "text/x-shellscript"
- }
- }
-}
-
-data "cloudinit_config" "eks_optimized_ami_user_data" {
- count = var.create && var.enable_bootstrap_user_data && var.platform != "bottlerocket" ? 1 : 0
-
- gzip = false
- boundary = "//"
-
- dynamic "part" {
- for_each = var.pre_bootstrap_user_data != "" ? [1] : []
- content {
- content_type = local.platform[var.platform].content_type
- content = var.pre_bootstrap_user_data
- }
- }
-
- part {
- content_type = local.platform[var.platform].content_type
- content = templatefile("${path.module}/../../templates/${var.platform}_user_data.tpl",
- {
- ami_id = "JustNeedsToBeSomethingToEnsureUserDataIsPopulated"
- # Required to bootstrap node
- cluster_name = var.cluster_name
- cluster_endpoint = var.cluster_endpoint
- cluster_auth_base64 = var.cluster_auth_base64
- # Optional
- bootstrap_extra_args = var.bootstrap_extra_args
- post_bootstrap_user_data = var.post_bootstrap_user_data
- }
- )
- }
+module "user_data" {
+ source = "../_user_data"
+
+ create = var.create
+ platform = var.platform
+ is_eks_managed_node_group = false
+
+ cluster_name = var.cluster_name
+ cluster_endpoint = var.cluster_endpoint
+ cluster_auth_base64 = var.cluster_auth_base64
+
+ enable_bootstrap_user_data = true
+ pre_bootstrap_user_data = var.pre_bootstrap_user_data
+ post_bootstrap_user_data = var.post_bootstrap_user_data
+ bootstrap_extra_args = var.bootstrap_extra_args
+ user_data_template_path = var.user_data_template_path
}
################################################################################
@@ -91,7 +53,7 @@ resource "aws_launch_template" "this" {
image_id = coalesce(var.ami_id, data.aws_ami.eks_default[0].image_id)
instance_type = var.instance_type
key_name = var.key_name
- user_data = var.platform == "bottlerocket" ? local.platform.bottlerocket.user_data : try(data.cloudinit_config.eks_optimized_ami_user_data[0].rendered, var.custom_user_data)
+ user_data = module.user_data.user_data
vpc_security_group_ids = compact(concat([try(aws_security_group.this[0].id, "")], var.vpc_security_group_ids))
diff --git a/modules/self-managed-node-group/variables.tf b/modules/self-managed-node-group/variables.tf
index cfaae9f089..564f13b517 100644
--- a/modules/self-managed-node-group/variables.tf
+++ b/modules/self-managed-node-group/variables.tf
@@ -20,28 +20,16 @@ variable "platform" {
# User Data
################################################################################
-variable "enable_bootstrap_user_data" {
- description = "Determines whether custom bootstrap user data template is used to boostrap node. Enabling this assumes the AMI is an AWS EKS Optimized AMI derivative"
- type = bool
- default = true
-}
-
-variable "custom_user_data" {
- description = "Base64-encoded user data used; should be used when `ami_is_eks_optimized` = `false` to boostrap and join instances to the cluster"
- type = string
- default = ""
-}
-
variable "cluster_endpoint" {
description = "Endpoint of associated EKS cluster"
type = string
- default = null
+ default = ""
}
variable "cluster_auth_base64" {
description = "Base64 encoded CA of associated EKS cluster"
type = string
- default = null
+ default = ""
}
variable "pre_bootstrap_user_data" {
@@ -62,6 +50,12 @@ variable "bootstrap_extra_args" {
default = ""
}
+variable "user_data_template_path" {
+ description = "Path to a local, custom user data template file to use when rendering user data"
+ type = string
+ default = ""
+}
+
################################################################################
# Autoscaling group
################################################################################
diff --git a/node_groups.tf b/node_groups.tf
index eb36396f9d..fe50f2cfb2 100644
--- a/node_groups.tf
+++ b/node_groups.tf
@@ -207,14 +207,14 @@ module "eks_managed_node_group" {
timeouts = try(each.value.timeouts, var.eks_managed_node_group_defaults.timeouts, {})
# User data
- platform = try(each.value.platform, var.eks_managed_node_group_defaults.platform, "linux")
- custom_user_data = try(each.value.custom_user_data, var.eks_managed_node_group_defaults.custom_user_data, "")
- ami_is_eks_optimized = try(each.value.ami_is_eks_optimized, var.eks_managed_node_group_defaults.ami_is_eks_optimized, true)
- cluster_endpoint = try(aws_eks_cluster.this[0].endpoint, var.eks_managed_node_group_defaults.cluster_endpoint, null)
- cluster_auth_base64 = try(aws_eks_cluster.this[0].certificate_authority[0].data, var.eks_managed_node_group_defaults.cluster_auth_base64, null)
- pre_bootstrap_user_data = try(each.value.pre_bootstrap_user_data, var.eks_managed_node_group_defaults.pre_bootstrap_user_data, "")
- post_bootstrap_user_data = try(each.value.post_bootstrap_user_data, var.eks_managed_node_group_defaults.post_bootstrap_user_data, "")
- bootstrap_extra_args = try(each.value.bootstrap_extra_args, var.eks_managed_node_group_defaults.bootstrap_extra_args, "")
+ platform = try(each.value.platform, var.eks_managed_node_group_defaults.platform, "linux")
+ cluster_endpoint = try(aws_eks_cluster.this[0].endpoint, var.eks_managed_node_group_defaults.cluster_endpoint, "")
+ cluster_auth_base64 = try(aws_eks_cluster.this[0].certificate_authority[0].data, var.eks_managed_node_group_defaults.cluster_auth_base64, "")
+ enable_bootstrap_user_data = try(each.value.enable_bootstrap_user_data, var.eks_managed_node_group_defaults.enable_bootstrap_user_data, false)
+ pre_bootstrap_user_data = try(each.value.pre_bootstrap_user_data, var.eks_managed_node_group_defaults.pre_bootstrap_user_data, "")
+ post_bootstrap_user_data = try(each.value.post_bootstrap_user_data, var.eks_managed_node_group_defaults.post_bootstrap_user_data, "")
+ bootstrap_extra_args = try(each.value.bootstrap_extra_args, var.eks_managed_node_group_defaults.bootstrap_extra_args, "")
+ user_data_template_path = try(each.value.user_data_template_path, var.eks_managed_node_group_defaults.user_data_template_path, "")
# Launch Template
create_launch_template = try(each.value.create_launch_template, var.eks_managed_node_group_defaults.create_launch_template, false)
@@ -324,14 +324,13 @@ module "self_managed_node_group" {
delete_timeout = try(each.value.delete_timeout, var.self_managed_node_group_defaults.delete_timeout, null)
# User data
- platform = try(each.value.platform, var.eks_managed_node_group_defaults.platform, "linux")
- enable_bootstrap_user_data = try(each.value.enable_bootstrap_user_data, var.self_managed_node_group_defaults.enable_bootstrap_user_data, true)
- custom_user_data = try(each.value.custom_user_data, var.self_managed_node_group_defaults.custom_user_data, "")
- cluster_endpoint = try(aws_eks_cluster.this[0].endpoint, var.self_managed_node_group_defaults.cluster_endpoint, null)
- cluster_auth_base64 = try(aws_eks_cluster.this[0].certificate_authority[0].data, var.self_managed_node_group_defaults.cluster_auth_base64, null)
- pre_bootstrap_user_data = try(each.value.pre_bootstrap_user_data, var.self_managed_node_group_defaults.pre_bootstrap_user_data, "")
- post_bootstrap_user_data = try(each.value.post_bootstrap_user_data, var.self_managed_node_group_defaults.post_bootstrap_user_data, "")
- bootstrap_extra_args = try(each.value.bootstrap_extra_args, var.self_managed_node_group_defaults.bootstrap_extra_args, "")
+ platform = try(each.value.platform, var.self_managed_node_group_defaults.platform, "linux")
+ cluster_endpoint = try(aws_eks_cluster.this[0].endpoint, var.self_managed_node_group_defaults.cluster_endpoint, "")
+ cluster_auth_base64 = try(aws_eks_cluster.this[0].certificate_authority[0].data, var.self_managed_node_group_defaults.cluster_auth_base64, "")
+ pre_bootstrap_user_data = try(each.value.pre_bootstrap_user_data, var.self_managed_node_group_defaults.pre_bootstrap_user_data, "")
+ post_bootstrap_user_data = try(each.value.post_bootstrap_user_data, var.self_managed_node_group_defaults.post_bootstrap_user_data, "")
+ bootstrap_extra_args = try(each.value.bootstrap_extra_args, var.self_managed_node_group_defaults.bootstrap_extra_args, "")
+ user_data_template_path = try(each.value.user_data_template_path, var.self_managed_node_group_defaults.user_data_template_path, "")
# Launch Template
create_launch_template = try(each.value.create_launch_template, var.self_managed_node_group_defaults.create_launch_template, true)
From c9b937efe9afc4e3fd0ea06dd81067b76f91bdbb Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Fri, 3 Dec 2021 11:36:09 -0500
Subject: [PATCH 47/83] chore: update documentation, 2nd pass
---
.github/images/user_data.svg | 2 +-
.pre-commit-config.yaml | 4 +
examples/complete/README.md | 3 +
examples/complete/main.tf | 73 ++++-
examples/eks_managed_node_group/README.md | 1 +
examples/eks_managed_node_group/main.tf | 68 ++--
.../{fargate => fargate_profile}/README.md | 0
examples/{fargate => fargate_profile}/main.tf | 4 +-
.../{fargate => fargate_profile}/outputs.tf | 0
.../{fargate => fargate_profile}/variables.tf | 0
.../{fargate => fargate_profile}/versions.tf | 0
examples/instance_refresh/main.tf | 292 ------------------
examples/irsa/irsa.tf | 114 -------
examples/irsa/main.tf | 93 ------
examples/irsa/outputs.tf | 167 ----------
examples/irsa/variables.tf | 0
.../README.md | 40 ++-
examples/irsa_autoscale_refresh/charts.tf | 292 ++++++++++++++++++
examples/irsa_autoscale_refresh/main.tf | 165 ++++++++++
.../outputs.tf | 0
.../variables.tf | 0
.../versions.tf | 4 +
examples/self_managed_node_group/README.md | 1 +
examples/self_managed_node_group/main.tf | 32 +-
modules/_user_data/README.md | 25 +-
modules/_user_data/main.tf | 2 +-
modules/eks-managed-node-group/README.md | 4 +-
modules/eks-managed-node-group/variables.tf | 4 +-
modules/fargate-profile/README.md | 2 +-
modules/fargate-profile/main.tf | 4 +-
modules/fargate-profile/variables.tf | 2 +-
modules/self-managed-node-group/README.md | 8 +-
modules/self-managed-node-group/main.tf | 3 +
modules/self-managed-node-group/variables.tf | 20 +-
node_groups.tf | 12 +-
35 files changed, 698 insertions(+), 743 deletions(-)
rename examples/{fargate => fargate_profile}/README.md (100%)
rename examples/{fargate => fargate_profile}/main.tf (97%)
rename examples/{fargate => fargate_profile}/outputs.tf (100%)
rename examples/{fargate => fargate_profile}/variables.tf (100%)
rename examples/{fargate => fargate_profile}/versions.tf (100%)
delete mode 100644 examples/instance_refresh/main.tf
delete mode 100644 examples/irsa/irsa.tf
delete mode 100644 examples/irsa/main.tf
delete mode 100644 examples/irsa/outputs.tf
delete mode 100644 examples/irsa/variables.tf
rename examples/{instance_refresh => irsa_autoscale_refresh}/README.md (73%)
create mode 100644 examples/irsa_autoscale_refresh/charts.tf
create mode 100644 examples/irsa_autoscale_refresh/main.tf
rename examples/{instance_refresh => irsa_autoscale_refresh}/outputs.tf (100%)
rename examples/{instance_refresh => irsa_autoscale_refresh}/variables.tf (100%)
rename examples/{instance_refresh => irsa_autoscale_refresh}/versions.tf (88%)
diff --git a/.github/images/user_data.svg b/.github/images/user_data.svg
index daa2ea33f3..a36b8051e4 100644
--- a/.github/images/user_data.svg
+++ b/.github/images/user_data.svg
@@ -1 +1 @@
-
+
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index eaed32796e..75d403e724 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -1,10 +1,14 @@
repos:
- repo: https://github.com/antonbabenko/pre-commit-terraform
+<<<<<<< HEAD
<<<<<<< HEAD
rev: v1.57.0
=======
rev: v1.58.0
>>>>>>> b876ff9 (fix: update CI/CD process to enable auto-release workflow (#1698))
+=======
+ rev: v1.59.0
+>>>>>>> fb3eb35 (chore: remove karpenter, back to instance refresh and node termination handler)
hooks:
- id: terraform_fmt
- id: terraform_validate
diff --git a/examples/complete/README.md b/examples/complete/README.md
index 10937ebdd3..daf25524a1 100644
--- a/examples/complete/README.md
+++ b/examples/complete/README.md
@@ -67,6 +67,9 @@ Note that this example may create resources which cost money. Run `terraform des
| [disabled\_node\_groups](#module\_disabled\_node\_groups) | ../../modules/node_groups | n/a |
>>>>>>> b876ff9 (fix: update CI/CD process to enable auto-release workflow (#1698))
| [eks](#module\_eks) | ../.. | n/a |
+| [eks\_managed\_node\_group](#module\_eks\_managed\_node\_group) | ../../modules/eks-managed-node-group | n/a |
+| [fargate\_profile](#module\_fargate\_profile) | ../../modules/fargate-profile | n/a |
+| [self\_managed\_node\_group](#module\_self\_managed\_node\_group) | ../../modules/self-managed-node-group | n/a |
| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 |
## Resources
diff --git a/examples/complete/main.tf b/examples/complete/main.tf
index b1ac281531..4241e38d01 100644
--- a/examples/complete/main.tf
+++ b/examples/complete/main.tf
@@ -142,7 +142,7 @@ module "eks" {
# Fargate Profile(s)
fargate_profiles = {
default = {
- fargate_profile_name = "default"
+ name = "default"
selectors = [
{
namespace = "kube-system"
@@ -169,6 +169,56 @@ module "eks" {
tags = local.tags
}
+################################################################################
+# Sub-Module Usage on Existing/Separate Cluster
+################################################################################
+
+module "eks_managed_node_group" {
+ source = "../../modules/eks-managed-node-group"
+
+ name = "separate-eks-mng"
+ cluster_name = module.eks.cluster_id
+ cluster_version = local.cluster_version
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_subnets
+
+ tags = merge(local.tags, { Separate = "eks-managed-node-group" })
+}
+
+module "self_managed_node_group" {
+ source = "../../modules/self-managed-node-group"
+
+ name = "separate-self-mng"
+ cluster_name = module.eks.cluster_id
+ cluster_version = local.cluster_version
+ cluster_endpoint = module.eks.cluster_endpoint
+ cluster_auth_base64 = module.eks.cluster_certificate_authority_data
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_subnets
+
+ create_launch_template = true
+ launch_template_name = "separate-self-mng"
+ instance_type = "m5.large"
+
+ tags = merge(local.tags, { Separate = "self-managed-node-group" })
+}
+
+module "fargate_profile" {
+ source = "../../modules/fargate-profile"
+
+ name = "separate-fargate-profile"
+ cluster_name = module.eks.cluster_id
+
+ subnet_ids = module.vpc.private_subnets
+ selectors = [{
+ namespace = "kube-system"
+ }]
+
+ tags = merge(local.tags, { Separate = "fargate-profile" })
+}
+
################################################################################
# Disabled creation
################################################################################
@@ -233,6 +283,27 @@ locals {
}
}]
})
+
+ # we have to combine the configmap created by the eks module with the externally created node group/profile sub-modules
+ # aws_auth_configmap = <<-EOT
+ # ${chomp(module.eks.aws_auth_configmap_yaml)}
+ # - rolearn: ${module.eks_managed_node_group.iam_role_arn}
+ # username: system:node:{{EC2PrivateDNSName}}
+ # groups:
+ # - system:bootstrappers
+ # - system:nodes
+ # - rolearn: ${module.self_managed_node_group.iam_role_arn}
+ # username: system:node:{{EC2PrivateDNSName}}
+ # groups:
+ # - system:bootstrappers
+ # - system:nodes
+ # - rolearn: ${module.fargate_profile.fargate_profile_arn}
+ # username: system:node:{{SessionName}}
+ # groups:
+ # - system:bootstrappers
+ # - system:nodes
+ # - system:node-proxier
+ # EOT
}
resource "null_resource" "patch" {
diff --git a/examples/eks_managed_node_group/README.md b/examples/eks_managed_node_group/README.md
index e89c61f8bb..a0171a87fd 100644
--- a/examples/eks_managed_node_group/README.md
+++ b/examples/eks_managed_node_group/README.md
@@ -47,6 +47,7 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Type |
|------|------|
| [aws_kms_key.ebs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource |
+| [aws_kms_key.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource |
| [aws_launch_template.external](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource |
| [aws_security_group.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
| [null_resource.patch](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
diff --git a/examples/eks_managed_node_group/main.tf b/examples/eks_managed_node_group/main.tf
index 21e74ee676..9654603078 100644
--- a/examples/eks_managed_node_group/main.tf
+++ b/examples/eks_managed_node_group/main.tf
@@ -14,6 +14,8 @@ locals {
}
}
+data "aws_caller_identity" "current" {}
+
################################################################################
# EKS Module
################################################################################
@@ -21,14 +23,32 @@ locals {
module "eks" {
source = "../.."
- cluster_name = local.name
- cluster_version = local.cluster_version
+ cluster_name = local.name
+ cluster_version = local.cluster_version
+ cluster_endpoint_private_access = true
+ cluster_endpoint_public_access = true
+
+ cluster_addons = {
+ coredns = {
+ resolve_conflicts = "OVERWRITE"
+ }
+ kube-proxy = {}
+ vpc-cni = {
+ resolve_conflicts = "OVERWRITE"
+ }
+ }
+
+ cluster_encryption_config = [
+ {
+ provider_key_arn = aws_kms_key.eks.arn
+ resources = ["secrets"]
+ }
+ ]
vpc_id = module.vpc.vpc_id
subnet_ids = module.vpc.private_subnets
- cluster_endpoint_private_access = true
- cluster_endpoint_public_access = true
+ enable_irsa = true
eks_managed_node_group_defaults = {
ami_type = "AL2_x86_64"
@@ -43,11 +63,13 @@ module "eks" {
# Default node group - as provided by AWS EKS using Bottlerocket
bottlerocket_default = {
ami_type = "BOTTLEROCKET_x86_64"
+ platform = "bottlerocket"
}
# Adds to the AWS provided user data
bottlerocket_add = {
ami_type = "BOTTLEROCKET_x86_64"
+ platform = "bottlerocket"
create_launch_template = true
launch_template_name = "bottlerocket-custom"
@@ -55,9 +77,10 @@ module "eks" {
# this will get added to what AWS provides
bootstrap_extra_args = <<-EOT
- [settings.kernel]
- lockdown = "integrity"
- EOT
+ # extra args added
+ [settings.kernel]
+ lockdown = "integrity"
+ EOT
}
# Custom AMI, using module provided bootstrap data
@@ -70,20 +93,22 @@ module "eks" {
launch_template_name = "bottlerocket-custom"
update_default_version = true
+ # use module user data template to boostrap
enable_bootstrap_user_data = true
-
+ # this will get added to the template
bootstrap_extra_args = <<-EOT
- [settings.kernel]
- lockdown = "integrity"
+ # extra args added
+ [settings.kernel]
+ lockdown = "integrity"
- [settings.kubernetes.node-labels]
- "label1" = "foo"
- "label2" = "bar"
+ [settings.kubernetes.node-labels]
+ "label1" = "foo"
+ "label2" = "bar"
- [settings.kubernetes.node-taints]
- "dedicated" = "experimental:PreferNoSchedule"
- "special" = "true:NoSchedule"
- EOT
+ [settings.kubernetes.node-taints]
+ "dedicated" = "experimental:PreferNoSchedule"
+ "special" = "true:NoSchedule"
+ EOT
}
# Use existing/external launch template
@@ -201,7 +226,6 @@ module "eks" {
security_group_use_name_prefix = false
security_group_description = "EKS managed node group complete example security group"
security_group_rules = {
-
phoneOut = {
description = "Hello CloudFlare"
protocol = "udp"
@@ -339,7 +363,13 @@ resource "aws_security_group" "additional" {
tags = local.tags
}
-data "aws_caller_identity" "current" {}
+resource "aws_kms_key" "eks" {
+ description = "EKS Secret Encryption Key"
+ deletion_window_in_days = 7
+ enable_key_rotation = true
+
+ tags = local.tags
+}
resource "aws_kms_key" "ebs" {
description = "Customer managed key to encrypt EKS managed node group volumes"
diff --git a/examples/fargate/README.md b/examples/fargate_profile/README.md
similarity index 100%
rename from examples/fargate/README.md
rename to examples/fargate_profile/README.md
diff --git a/examples/fargate/main.tf b/examples/fargate_profile/main.tf
similarity index 97%
rename from examples/fargate/main.tf
rename to examples/fargate_profile/main.tf
index b490788d6e..f2df652486 100644
--- a/examples/fargate/main.tf
+++ b/examples/fargate_profile/main.tf
@@ -69,7 +69,7 @@ module "eks" {
fargate_profiles = {
default = {
- fargate_profile_name = "default"
+ name = "default"
selectors = [
{
namespace = "kube-system"
@@ -96,7 +96,7 @@ module "eks" {
}
secondary = {
- fargate_profile_name = "secondary"
+ name = "secondary"
selectors = [
{
namespace = "default"
diff --git a/examples/fargate/outputs.tf b/examples/fargate_profile/outputs.tf
similarity index 100%
rename from examples/fargate/outputs.tf
rename to examples/fargate_profile/outputs.tf
diff --git a/examples/fargate/variables.tf b/examples/fargate_profile/variables.tf
similarity index 100%
rename from examples/fargate/variables.tf
rename to examples/fargate_profile/variables.tf
diff --git a/examples/fargate/versions.tf b/examples/fargate_profile/versions.tf
similarity index 100%
rename from examples/fargate/versions.tf
rename to examples/fargate_profile/versions.tf
diff --git a/examples/instance_refresh/main.tf b/examples/instance_refresh/main.tf
deleted file mode 100644
index c6b4a0c474..0000000000
--- a/examples/instance_refresh/main.tf
+++ /dev/null
@@ -1,292 +0,0 @@
-provider "aws" {
- region = local.region
-}
-
-locals {
- name = "ex-${replace(basename(path.cwd), "_", "-")}"
- cluster_version = "1.21"
- region = "eu-west-1"
-
- tags = {
- Example = local.name
- GithubRepo = "terraform-aws-eks"
- GithubOrg = "terraform-aws-modules"
- }
-}
-
-################################################################################
-# EKS Module
-################################################################################
-
-# Based on the official aws-node-termination-handler setup guide at https://github.com/aws/aws-node-termination-handler#infrastructure-setup
-
-data "aws_eks_cluster_auth" "cluster" {
- name = module.eks.cluster_id
-}
-
-provider "helm" {
- kubernetes {
- host = module.eks.cluster_endpoint
- cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
- token = data.aws_eks_cluster_auth.cluster.token
- }
-}
-
-data "aws_caller_identity" "current" {}
-
-data "aws_iam_policy_document" "aws_node_termination_handler" {
- statement {
- effect = "Allow"
- actions = [
- "ec2:DescribeInstances",
- "autoscaling:DescribeAutoScalingInstances",
- "autoscaling:DescribeTags",
- ]
- resources = [
- "*",
- ]
- }
- statement {
- effect = "Allow"
- actions = [
- "autoscaling:CompleteLifecycleAction",
- ]
- resources = [for k, v in module.eks.self_managed_node_groups : v.autoscaling_group_arn]
- }
- statement {
- effect = "Allow"
- actions = [
- "sqs:DeleteMessage",
- "sqs:ReceiveMessage"
- ]
- resources = [
- module.aws_node_termination_handler_sqs.sqs_queue_arn
- ]
- }
-}
-
-resource "aws_iam_policy" "aws_node_termination_handler" {
- name = "${local.name}-aws-node-termination-handler"
- policy = data.aws_iam_policy_document.aws_node_termination_handler.json
-}
-
-data "aws_iam_policy_document" "aws_node_termination_handler_events" {
- statement {
- effect = "Allow"
- principals {
- type = "Service"
- identifiers = [
- "events.amazonaws.com",
- "sqs.amazonaws.com",
- ]
- }
- actions = [
- "sqs:SendMessage",
- ]
- resources = [
- "arn:aws:sqs:${local.region}:${data.aws_caller_identity.current.account_id}:${local.name}",
- ]
- }
-}
-
-module "aws_node_termination_handler_sqs" {
- source = "terraform-aws-modules/sqs/aws"
- version = "~> 3.0"
- name = local.name
- message_retention_seconds = 300
- policy = data.aws_iam_policy_document.aws_node_termination_handler_events.json
-}
-
-resource "aws_cloudwatch_event_rule" "aws_node_termination_handler_asg" {
- name = "${local.name}-asg-termination"
- description = "Node termination event rule"
- event_pattern = jsonencode(
- {
- "source" : [
- "aws.autoscaling"
- ],
- "detail-type" : [
- "EC2 Instance-terminate Lifecycle Action"
- ]
- "resources" : [for k, v in module.eks.self_managed_node_groups : v.autoscaling_group_arn]
- }
- )
-}
-
-resource "aws_cloudwatch_event_target" "aws_node_termination_handler_asg" {
- target_id = "${local.name}-asg-termination"
- rule = aws_cloudwatch_event_rule.aws_node_termination_handler_asg.name
- arn = module.aws_node_termination_handler_sqs.sqs_queue_arn
-}
-
-resource "aws_cloudwatch_event_rule" "aws_node_termination_handler_spot" {
- name = "${local.name}-spot-termination"
- description = "Node termination event rule"
- event_pattern = jsonencode(
- {
- "source" : [
- "aws.ec2"
- ],
- "detail-type" : [
- "EC2 Spot Instance Interruption Warning"
- ]
- "resources" : [for k, v in module.eks.self_managed_node_groups : v.autoscaling_group_arn]
- }
- )
-}
-
-resource "aws_cloudwatch_event_target" "aws_node_termination_handler_spot" {
- target_id = "${local.name}-spot-termination"
- rule = aws_cloudwatch_event_rule.aws_node_termination_handler_spot.name
- arn = module.aws_node_termination_handler_sqs.sqs_queue_arn
-}
-
-module "aws_node_termination_handler_role" {
- source = "terraform-aws-modules/iam/aws//modules/iam-assumable-role-with-oidc"
- version = "~> 4.0"
- create_role = true
- role_description = "IRSA role for ANTH, cluster ${local.name}"
- role_name_prefix = local.name
- provider_url = replace(module.eks.cluster_oidc_issuer_url, "https://", "")
- role_policy_arns = [aws_iam_policy.aws_node_termination_handler.arn]
- oidc_fully_qualified_subjects = ["system:serviceaccount:kube-system:aws-node-termination-handler"]
-}
-
-resource "helm_release" "aws_node_termination_handler" {
- depends_on = [
- module.eks
- ]
-
- name = "aws-node-termination-handler"
- namespace = "kube-system"
- repository = "https://aws.github.io/eks-charts"
- chart = "aws-node-termination-handler"
- version = "0.15.0"
- create_namespace = true
-
- set {
- name = "awsRegion"
- value = local.region
- }
- set {
- name = "serviceAccount.name"
- value = "aws-node-termination-handler"
- }
- set {
- name = "serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn"
- value = module.aws_node_termination_handler_role.iam_role_arn
- type = "string"
- }
- set {
- name = "enableSqsTerminationDraining"
- value = "true"
- }
- set {
- name = "enableSpotInterruptionDraining"
- value = "true"
- }
- set {
- name = "queueURL"
- value = module.aws_node_termination_handler_sqs.sqs_queue_id
- }
- set {
- name = "logLevel"
- value = "debug"
- }
-}
-
-# Creating the lifecycle-hook outside of the ASG resource's `initial_lifecycle_hook`
-# ensures that node termination does not require the lifecycle action to be completed,
-# and thus allows the ASG to be destroyed cleanly.
-resource "aws_autoscaling_lifecycle_hook" "aws_node_termination_handler" {
- for_each = module.eks.self_managed_node_groups
- name = "aws-node-termination-handler"
- autoscaling_group_name = each.value.autoscaling_group_id
- lifecycle_transition = "autoscaling:EC2_INSTANCE_TERMINATING"
- heartbeat_timeout = 300
- default_result = "CONTINUE"
-}
-
-################################################################################
-# EKS Module
-################################################################################
-
-module "eks" {
- source = "../.."
-
- cluster_name = local.name
- cluster_version = local.cluster_version
-
- vpc_id = module.vpc.vpc_id
- subnet_ids = module.vpc.private_subnets
-
- cluster_endpoint_private_access = true
- cluster_endpoint_public_access = true
-
- enable_irsa = true
-
- self_managed_node_groups = {
- one = {
- name = "refresh"
- max_size = 2
- desired_size = 2
-
- instance_refresh_enabled = true
- instance_refresh_instance_warmup = 60
- public_ip = true
- metadata_http_put_response_hop_limit = 3
- update_default_version = true
- instance_refresh_triggers = ["tag"]
- tags = [
- {
- key = "aws-node-termination-handler/managed"
- value = ""
- propagate_at_launch = true
- },
- {
- key = "foo"
- value = "buzz"
- propagate_at_launch = true
- }
- ]
- }
- }
-
- tags = local.tags
-}
-
-################################################################################
-# Supporting Resources
-################################################################################
-
-module "vpc" {
- source = "terraform-aws-modules/vpc/aws"
- version = "~> 3.0"
-
- name = local.name
- cidr = "10.0.0.0/16"
-
- azs = ["${local.region}a", "${local.region}b", "${local.region}c"]
- private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
- public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
-
- enable_nat_gateway = true
- single_nat_gateway = true
- enable_dns_hostnames = true
-
- enable_flow_log = true
- create_flow_log_cloudwatch_iam_role = true
- create_flow_log_cloudwatch_log_group = true
-
- public_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/elb" = 1
- }
-
- private_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/internal-elb" = 1
- }
-
- tags = local.tags
-}
diff --git a/examples/irsa/irsa.tf b/examples/irsa/irsa.tf
deleted file mode 100644
index 810c871201..0000000000
--- a/examples/irsa/irsa.tf
+++ /dev/null
@@ -1,114 +0,0 @@
-data "aws_eks_cluster_auth" "cluster" {
- name = module.eks.cluster_id
-}
-
-locals {
- k8s_service_account_namespace = "kube-system"
- k8s_service_account_name = "cluster-autoscaler-aws"
-}
-
-provider "helm" {
- kubernetes {
- host = module.eks.cluster_endpoint
- cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
- token = data.aws_eks_cluster_auth.cluster.token
- }
-}
-
-resource "helm_release" "cluster_autoscaler" {
- depends_on = [
- module.eks
- ]
-
- name = "cluster-autoscaler"
- namespace = local.k8s_service_account_namespace
- repository = "https://kubernetes.github.io/autoscaler"
- chart = "cluster-autoscaler"
- version = "9.10.7"
- create_namespace = false
-
- set {
- name = "awsRegion"
- value = local.region
- }
- set {
- name = "rbac.serviceAccount.name"
- value = local.k8s_service_account_name
- }
- set {
- name = "rbac.serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn"
- value = module.iam_assumable_role_admin.iam_role_arn
- type = "string"
- }
- set {
- name = "autoDiscovery.clusterName"
- value = local.name
- }
- set {
- name = "autoDiscovery.enabled"
- value = "true"
- }
- set {
- name = "rbac.create"
- value = "true"
- }
-}
-
-module "iam_assumable_role_admin" {
- source = "terraform-aws-modules/iam/aws//modules/iam-assumable-role-with-oidc"
- version = "~> 4.0"
-
- create_role = true
- role_name = "cluster-autoscaler"
- provider_url = replace(module.eks.cluster_oidc_issuer_url, "https://", "")
- role_policy_arns = [aws_iam_policy.cluster_autoscaler.arn]
- oidc_fully_qualified_subjects = ["system:serviceaccount:${local.k8s_service_account_namespace}:${local.k8s_service_account_name}"]
-}
-
-resource "aws_iam_policy" "cluster_autoscaler" {
- name_prefix = "cluster-autoscaler"
- description = "EKS cluster-autoscaler policy for cluster ${module.eks.cluster_id}"
- policy = data.aws_iam_policy_document.cluster_autoscaler.json
-}
-
-data "aws_iam_policy_document" "cluster_autoscaler" {
- statement {
- sid = "clusterAutoscalerAll"
- effect = "Allow"
-
- actions = [
- "autoscaling:DescribeAutoScalingGroups",
- "autoscaling:DescribeAutoScalingInstances",
- "autoscaling:DescribeLaunchConfigurations",
- "autoscaling:DescribeTags",
- "ec2:DescribeLaunchTemplateVersions",
- ]
-
- resources = ["*"]
- }
-
- statement {
- sid = "clusterAutoscalerOwn"
- effect = "Allow"
-
- actions = [
- "autoscaling:SetDesiredCapacity",
- "autoscaling:TerminateInstanceInAutoScalingGroup",
- "autoscaling:UpdateAutoScalingGroup",
- ]
-
- resources = ["*"]
-
- condition {
- test = "StringEquals"
- variable = "autoscaling:ResourceTag/k8s.io/cluster-autoscaler/${module.eks.cluster_id}"
- values = ["owned"]
- }
-
- condition {
- test = "StringEquals"
- variable = "autoscaling:ResourceTag/k8s.io/cluster-autoscaler/enabled"
- values = ["true"]
- }
- }
-}
diff --git a/examples/irsa/main.tf b/examples/irsa/main.tf
deleted file mode 100644
index 3e04974c8e..0000000000
--- a/examples/irsa/main.tf
+++ /dev/null
@@ -1,93 +0,0 @@
-provider "aws" {
- region = local.region
-}
-
-locals {
- name = "ex-${replace(basename(path.cwd), "_", "-")}"
- cluster_version = "1.21"
- region = "eu-west-1"
-
- tags = {
- Example = local.name
- GithubRepo = "terraform-aws-eks"
- GithubOrg = "terraform-aws-modules"
- }
-}
-
-################################################################################
-# EKS Module
-################################################################################
-
-module "eks" {
- source = "../.."
-
- cluster_name = local.name
- cluster_version = local.cluster_version
-
- vpc_id = module.vpc.vpc_id
- subnet_ids = module.vpc.private_subnets
-
- cluster_endpoint_private_access = true
- cluster_endpoint_public_access = true
-
- enable_irsa = true
-
- self_managed_node_groups = {
- one = {
- name = "worker-group-1"
- instance_type = "t3.medium"
- desired_size = 1
- max_size = 4
- propagate_tags = [
- {
- "key" = "k8s.io/cluster-autoscaler/enabled"
- "propagate_at_launch" = "false"
- "value" = "true"
- },
- {
- "key" = "k8s.io/cluster-autoscaler/${local.name}"
- "propagate_at_launch" = "false"
- "value" = "owned"
- }
- ]
- }
- }
-
- tags = local.tags
-}
-
-################################################################################
-# Supporting Resources
-################################################################################
-
-module "vpc" {
- source = "terraform-aws-modules/vpc/aws"
- version = "~> 3.0"
-
- name = local.name
- cidr = "10.0.0.0/16"
-
- azs = ["${local.region}a", "${local.region}b", "${local.region}c"]
- private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
- public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
-
- enable_nat_gateway = true
- single_nat_gateway = true
- enable_dns_hostnames = true
-
- enable_flow_log = true
- create_flow_log_cloudwatch_iam_role = true
- create_flow_log_cloudwatch_log_group = true
-
- public_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/elb" = 1
- }
-
- private_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/internal-elb" = 1
- }
-
- tags = local.tags
-}
diff --git a/examples/irsa/outputs.tf b/examples/irsa/outputs.tf
deleted file mode 100644
index 3e9620157b..0000000000
--- a/examples/irsa/outputs.tf
+++ /dev/null
@@ -1,167 +0,0 @@
-################################################################################
-# Cluster
-################################################################################
-
-output "cluster_arn" {
- description = "The Amazon Resource Name (ARN) of the cluster"
- value = module.eks.cluster_arn
-}
-
-output "cluster_certificate_authority_data" {
- description = "Base64 encoded certificate data required to communicate with the cluster"
- value = module.eks.cluster_certificate_authority_data
-}
-
-output "cluster_endpoint" {
- description = "Endpoint for your Kubernetes API server"
- value = module.eks.cluster_endpoint
-}
-
-output "cluster_id" {
- description = "The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready"
- value = module.eks.cluster_id
-}
-
-output "cluster_oidc_issuer_url" {
- description = "The URL on the EKS cluster for the OpenID Connect identity provider"
- value = module.eks.cluster_oidc_issuer_url
-}
-
-output "cluster_platform_version" {
- description = "Platform version for the cluster"
- value = module.eks.cluster_platform_version
-}
-
-output "cluster_status" {
- description = "Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED`"
- value = module.eks.cluster_status
-}
-
-output "cluster_primary_security_group_id" {
- description = "Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console"
- value = module.eks.cluster_primary_security_group_id
-}
-
-################################################################################
-# Security Group
-################################################################################
-
-output "cluster_security_group_arn" {
- description = "Amazon Resource Name (ARN) of the cluster security group"
- value = module.eks.cluster_security_group_arn
-}
-
-output "cluster_security_group_id" {
- description = "ID of the cluster security group"
- value = module.eks.cluster_security_group_id
-}
-
-################################################################################
-# Node Security Group
-################################################################################
-
-output "node_security_group_arn" {
- description = "Amazon Resource Name (ARN) of the node shared security group"
- value = module.eks.node_security_group_arn
-}
-
-output "node_security_group_id" {
- description = "ID of the node shared security group"
- value = module.eks.node_security_group_id
-}
-
-################################################################################
-# IRSA
-################################################################################
-
-output "oidc_provider_arn" {
- description = "The ARN of the OIDC Provider if `enable_irsa = true`"
- value = module.eks.oidc_provider_arn
-}
-
-################################################################################
-# IAM Role
-################################################################################
-
-output "cluster_iam_role_name" {
- description = "IAM role name of the EKS cluster"
- value = module.eks.cluster_iam_role_name
-}
-
-output "cluster_iam_role_arn" {
- description = "IAM role ARN of the EKS cluster"
- value = module.eks.cluster_iam_role_arn
-}
-
-output "cluster_iam_role_unique_id" {
- description = "Stable and unique string identifying the IAM role"
- value = module.eks.cluster_iam_role_unique_id
-}
-
-################################################################################
-# EKS Addons
-################################################################################
-
-output "cluster_addons" {
- description = "Map of attribute maps for all EKS cluster addons enabled"
- value = module.eks.cluster_addons
-}
-
-################################################################################
-# EKS Identity Provider
-################################################################################
-
-output "cluster_identity_providers" {
- description = "Map of attribute maps for all EKS identity providers enabled"
- value = module.eks.cluster_identity_providers
-}
-
-################################################################################
-# CloudWatch Log Group
-################################################################################
-
-output "cloudwatch_log_group_name" {
- description = "Name of cloudwatch log group created"
- value = module.eks.cloudwatch_log_group_name
-}
-
-output "cloudwatch_log_group_arn" {
- description = "Arn of cloudwatch log group created"
- value = module.eks.cloudwatch_log_group_arn
-}
-
-################################################################################
-# Fargate Profile
-################################################################################
-
-output "fargate_profiles" {
- description = "Map of attribute maps for all EKS Fargate Profiles created"
- value = module.eks.fargate_profiles
-}
-
-################################################################################
-# EKS Managed Node Group
-################################################################################
-
-output "eks_managed_node_groups" {
- description = "Map of attribute maps for all EKS managed node groups created"
- value = module.eks.eks_managed_node_groups
-}
-
-################################################################################
-# Self Managed Node Group
-################################################################################
-
-output "self_managed_node_groups" {
- description = "Map of attribute maps for all self managed node groups created"
- value = module.eks.self_managed_node_groups
-}
-
-################################################################################
-# Additional
-################################################################################
-
-output "aws_auth_configmap_yaml" {
- description = "Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles"
- value = module.eks.aws_auth_configmap_yaml
-}
diff --git a/examples/irsa/variables.tf b/examples/irsa/variables.tf
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/examples/instance_refresh/README.md b/examples/irsa_autoscale_refresh/README.md
similarity index 73%
rename from examples/instance_refresh/README.md
rename to examples/irsa_autoscale_refresh/README.md
index b3ed0ade4b..c38b43436a 100644
--- a/examples/instance_refresh/README.md
+++ b/examples/irsa_autoscale_refresh/README.md
@@ -1,8 +1,9 @@
-# Instance refresh example
+# IRSA, Cluster Autoscaler, and Instance Refresh example
-This is EKS example using [instance refresh](https://aws.amazon.com/blogs/compute/introducing-instance-refresh-for-ec2-auto-scaling/) feature for worker groups.
-
-See [the official documentation](https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-instance-refresh.html) for more details.
+This is EKS example uses:
+- [IAM Roles for Service Accounts (IRSA)](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html)
+- [Cluster Autoscaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md)
+- [Instance Refresh](https://aws.amazon.com/blogs/compute/introducing-instance-refresh-for-ec2-auto-scaling/) feature for self managed node groups
## Usage
@@ -25,6 +26,7 @@ Note that this example may create resources which cost money. Run `terraform des
<<<<<<< HEAD
| [aws](#requirement\_aws) | >= 3.64 |
| [helm](#requirement\_helm) | >= 2.0 |
+<<<<<<< HEAD:examples/karpenter/README.md
=======
| [aws](#requirement\_aws) | >= 3.56 |
| [helm](#requirement\_helm) | >= 2.0 |
@@ -32,6 +34,9 @@ Note that this example may create resources which cost money. Run `terraform des
| [local](#requirement\_local) | >= 1.4 |
| [random](#requirement\_random) | >= 2.1 |
>>>>>>> b876ff9 (fix: update CI/CD process to enable auto-release workflow (#1698))
+=======
+| [null](#requirement\_null) | >= 3.0 |
+>>>>>>> fb3eb35 (chore: remove karpenter, back to instance refresh and node termination handler):examples/irsa_autoscale_refresh/README.md
## Providers
@@ -40,16 +45,29 @@ Note that this example may create resources which cost money. Run `terraform des
<<<<<<< HEAD
| [aws](#provider\_aws) | >= 3.64 |
| [helm](#provider\_helm) | >= 2.0 |
+<<<<<<< HEAD:examples/karpenter/README.md
+<<<<<<< HEAD:examples/custom/README.md
+<<<<<<< HEAD:examples/instance_refresh/README.md
=======
| [aws](#provider\_aws) | >= 3.56 |
| [helm](#provider\_helm) | >= 2.0 |
| [random](#provider\_random) | >= 2.1 |
>>>>>>> b876ff9 (fix: update CI/CD process to enable auto-release workflow (#1698))
+=======
+| [null](#provider\_null) | n/a |
+>>>>>>> bc35987 (chore: updating custom example):examples/custom/README.md
+=======
+>>>>>>> 84f3af3 (chore: ugh, just work already):examples/karpenter/README.md
+=======
+| [null](#provider\_null) | >= 3.0 |
+>>>>>>> fb3eb35 (chore: remove karpenter, back to instance refresh and node termination handler):examples/irsa_autoscale_refresh/README.md
## Modules
| Name | Source | Version |
|------|--------|---------|
+<<<<<<< HEAD:examples/karpenter/README.md
+<<<<<<< HEAD:examples/instance_refresh/README.md
<<<<<<< HEAD
| [aws\_node\_termination\_handler\_role](#module\_aws\_node\_termination\_handler\_role) | terraform-aws-modules/iam/aws//modules/iam-assumable-role-with-oidc | ~> 4.0 |
| [aws\_node\_termination\_handler\_sqs](#module\_aws\_node\_termination\_handler\_sqs) | terraform-aws-modules/sqs/aws | ~> 3.0 |
@@ -57,7 +75,14 @@ Note that this example may create resources which cost money. Run `terraform des
| [aws\_node\_termination\_handler\_role](#module\_aws\_node\_termination\_handler\_role) | terraform-aws-modules/iam/aws//modules/iam-assumable-role-with-oidc | 4.1.0 |
| [aws\_node\_termination\_handler\_sqs](#module\_aws\_node\_termination\_handler\_sqs) | terraform-aws-modules/sqs/aws | ~> 3.0.0 |
>>>>>>> b876ff9 (fix: update CI/CD process to enable auto-release workflow (#1698))
+=======
+>>>>>>> bc35987 (chore: updating custom example):examples/custom/README.md
+=======
+| [aws\_node\_termination\_handler\_role](#module\_aws\_node\_termination\_handler\_role) | terraform-aws-modules/iam/aws//modules/iam-assumable-role-with-oidc | ~> 4.0 |
+| [aws\_node\_termination\_handler\_sqs](#module\_aws\_node\_termination\_handler\_sqs) | terraform-aws-modules/sqs/aws | ~> 3.0 |
+>>>>>>> fb3eb35 (chore: remove karpenter, back to instance refresh and node termination handler):examples/irsa_autoscale_refresh/README.md
| [eks](#module\_eks) | ../.. | n/a |
+| [iam\_assumable\_role\_cluster\_autoscaler](#module\_iam\_assumable\_role\_cluster\_autoscaler) | terraform-aws-modules/iam/aws//modules/iam-assumable-role-with-oidc | ~> 4.0 |
| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 |
## Resources
@@ -70,11 +95,16 @@ Note that this example may create resources which cost money. Run `terraform des
| [aws_cloudwatch_event_target.aws_node_termination_handler_asg](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_target) | resource |
| [aws_cloudwatch_event_target.aws_node_termination_handler_spot](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_target) | resource |
| [aws_iam_policy.aws_node_termination_handler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
+| [aws_iam_policy.cluster_autoscaler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
| [helm_release.aws_node_termination_handler](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource |
+| [helm_release.cluster_autoscaler](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource |
+| [null_resource.apply](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source |
| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
+| [aws_eks_cluster_auth.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
| [aws_iam_policy_document.aws_node_termination_handler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
-| [aws_iam_policy_document.aws_node_termination_handler_events](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
+| [aws_iam_policy_document.aws_node_termination_handler_sqs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
+| [aws_iam_policy_document.cluster_autoscaler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
## Inputs
diff --git a/examples/irsa_autoscale_refresh/charts.tf b/examples/irsa_autoscale_refresh/charts.tf
new file mode 100644
index 0000000000..ed54db1442
--- /dev/null
+++ b/examples/irsa_autoscale_refresh/charts.tf
@@ -0,0 +1,292 @@
+provider "helm" {
+ kubernetes {
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
+ token = data.aws_eks_cluster_auth.cluster.token
+ }
+}
+
+################################################################################
+# Cluster Autoscaler
+# Based on the official docs at
+# https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler
+################################################################################
+
+resource "helm_release" "cluster_autoscaler" {
+ name = "cluster-autoscaler"
+ namespace = "kube-system"
+ repository = "https://kubernetes.github.io/autoscaler"
+ chart = "cluster-autoscaler"
+ version = "9.10.8"
+ create_namespace = false
+
+ set {
+ name = "awsRegion"
+ value = local.region
+ }
+
+ set {
+ name = "rbac.serviceAccount.name"
+ value = "cluster-autoscaler-aws"
+ }
+
+ set {
+ name = "rbac.serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn"
+ value = module.iam_assumable_role_cluster_autoscaler.iam_role_arn
+ type = "string"
+ }
+
+ set {
+ name = "autoDiscovery.clusterName"
+ value = local.name
+ }
+
+ set {
+ name = "autoDiscovery.enabled"
+ value = "true"
+ }
+
+ set {
+ name = "rbac.create"
+ value = "true"
+ }
+
+ depends_on = [
+ module.eks
+ ]
+}
+
+module "iam_assumable_role_cluster_autoscaler" {
+ source = "terraform-aws-modules/iam/aws//modules/iam-assumable-role-with-oidc"
+ version = "~> 4.0"
+
+ create_role = true
+ role_name_prefix = "cluster-autoscaler"
+ role_description = "IRSA role for cluster autoscaler"
+
+ provider_url = replace(module.eks.cluster_oidc_issuer_url, "https://", "")
+ role_policy_arns = [aws_iam_policy.cluster_autoscaler.arn]
+ oidc_fully_qualified_subjects = ["system:serviceaccount:kube-system:cluster-autoscaler-aws"]
+ oidc_fully_qualified_audiences = ["sts.amazonaws.com"]
+
+ tags = local.tags
+}
+
+resource "aws_iam_policy" "cluster_autoscaler" {
+ name = "KarpenterControllerPolicy-refresh"
+ policy = data.aws_iam_policy_document.cluster_autoscaler.json
+
+ tags = local.tags
+}
+
+data "aws_iam_policy_document" "cluster_autoscaler" {
+ statement {
+ sid = "clusterAutoscalerAll"
+ actions = [
+ "autoscaling:DescribeAutoScalingGroups",
+ "autoscaling:DescribeAutoScalingInstances",
+ "autoscaling:DescribeLaunchConfigurations",
+ "autoscaling:DescribeTags",
+ "ec2:DescribeLaunchTemplateVersions",
+ ]
+ resources = ["*"]
+ }
+
+ statement {
+ sid = "clusterAutoscalerOwn"
+ actions = [
+ "autoscaling:SetDesiredCapacity",
+ "autoscaling:TerminateInstanceInAutoScalingGroup",
+ "autoscaling:UpdateAutoScalingGroup",
+ ]
+ resources = ["*"]
+
+ condition {
+ test = "StringEquals"
+ variable = "autoscaling:ResourceTag/k8s.io/cluster-autoscaler/${module.eks.cluster_id}"
+ values = ["owned"]
+ }
+
+ condition {
+ test = "StringEquals"
+ variable = "autoscaling:ResourceTag/k8s.io/cluster-autoscaler/enabled"
+ values = ["true"]
+ }
+ }
+}
+
+################################################################################
+# Node Termination Handler
+# Based on the official docs at
+# https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-instance-refresh.html
+################################################################################
+
+resource "helm_release" "aws_node_termination_handler" {
+ name = "aws-node-termination-handler"
+ namespace = "kube-system"
+ repository = "https://aws.github.io/eks-charts"
+ chart = "aws-node-termination-handler"
+ version = "0.16.0"
+ create_namespace = false
+
+ set {
+ name = "awsRegion"
+ value = local.region
+ }
+
+ set {
+ name = "serviceAccount.name"
+ value = "aws-node-termination-handler"
+ }
+
+ set {
+ name = "serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn"
+ value = module.aws_node_termination_handler_role.iam_role_arn
+ type = "string"
+ }
+
+ set {
+ name = "enableSqsTerminationDraining"
+ value = "true"
+ }
+
+ set {
+ name = "enableSpotInterruptionDraining"
+ value = "true"
+ }
+
+ set {
+ name = "queueURL"
+ value = module.aws_node_termination_handler_sqs.sqs_queue_id
+ }
+
+ set {
+ name = "logLevel"
+ value = "debug"
+ }
+
+ depends_on = [
+ module.eks
+ ]
+}
+
+module "aws_node_termination_handler_role" {
+ source = "terraform-aws-modules/iam/aws//modules/iam-assumable-role-with-oidc"
+ version = "~> 4.0"
+
+ create_role = true
+ role_name_prefix = "node-termination-handler"
+ role_description = "IRSA role for node termination handler"
+
+ provider_url = replace(module.eks.cluster_oidc_issuer_url, "https://", "")
+ role_policy_arns = [aws_iam_policy.aws_node_termination_handler.arn]
+ oidc_fully_qualified_subjects = ["system:serviceaccount:kube-system:aws-node-termination-handler"]
+ oidc_fully_qualified_audiences = ["sts.amazonaws.com"]
+
+ tags = local.tags
+}
+
+resource "aws_iam_policy" "aws_node_termination_handler" {
+ name = "${local.name}-aws-node-termination-handler"
+ policy = data.aws_iam_policy_document.aws_node_termination_handler.json
+
+ tags = local.tags
+}
+
+data "aws_iam_policy_document" "aws_node_termination_handler" {
+ statement {
+ actions = [
+ "ec2:DescribeInstances",
+ "autoscaling:DescribeAutoScalingInstances",
+ "autoscaling:DescribeTags",
+ ]
+ resources = ["*"]
+ }
+
+ statement {
+ actions = ["autoscaling:CompleteLifecycleAction"]
+ resources = [for group in module.eks.self_managed_node_groups : group.autoscaling_group_arn]
+ }
+
+ statement {
+ actions = [
+ "sqs:DeleteMessage",
+ "sqs:ReceiveMessage"
+ ]
+ resources = [module.aws_node_termination_handler_sqs.sqs_queue_arn]
+ }
+}
+
+module "aws_node_termination_handler_sqs" {
+ source = "terraform-aws-modules/sqs/aws"
+ version = "~> 3.0"
+
+ name = local.name
+ message_retention_seconds = 300
+ policy = data.aws_iam_policy_document.aws_node_termination_handler_sqs.json
+
+ tags = local.tags
+}
+
+data "aws_iam_policy_document" "aws_node_termination_handler_sqs" {
+ statement {
+ actions = ["sqs:SendMessage"]
+ resources = ["arn:aws:sqs:${local.region}:${data.aws_caller_identity.current.account_id}:${local.name}"]
+
+ principals {
+ type = "Service"
+ identifiers = [
+ "events.amazonaws.com",
+ "sqs.amazonaws.com",
+ ]
+ }
+ }
+}
+
+resource "aws_cloudwatch_event_rule" "aws_node_termination_handler_asg" {
+ name = "${local.name}-asg-termination"
+ description = "Node termination event rule"
+
+ event_pattern = jsonencode({
+ "source" : ["aws.autoscaling"],
+ "detail-type" : ["EC2 Instance-terminate Lifecycle Action"]
+ "resources" : [for group in module.eks.self_managed_node_groups : group.autoscaling_group_arn]
+ })
+
+ tags = local.tags
+}
+
+resource "aws_cloudwatch_event_target" "aws_node_termination_handler_asg" {
+ target_id = "${local.name}-asg-termination"
+ rule = aws_cloudwatch_event_rule.aws_node_termination_handler_asg.name
+ arn = module.aws_node_termination_handler_sqs.sqs_queue_arn
+}
+
+resource "aws_cloudwatch_event_rule" "aws_node_termination_handler_spot" {
+ name = "${local.name}-spot-termination"
+ description = "Node termination event rule"
+ event_pattern = jsonencode({
+ "source" : ["aws.ec2"],
+ "detail-type" : ["EC2 Spot Instance Interruption Warning"]
+ "resources" : [for group in module.eks.self_managed_node_groups : group.autoscaling_group_arn]
+ })
+}
+
+resource "aws_cloudwatch_event_target" "aws_node_termination_handler_spot" {
+ target_id = "${local.name}-spot-termination"
+ rule = aws_cloudwatch_event_rule.aws_node_termination_handler_spot.name
+ arn = module.aws_node_termination_handler_sqs.sqs_queue_arn
+}
+
+# Creating the lifecycle-hook outside of the ASG resource's `initial_lifecycle_hook`
+# ensures that node termination does not require the lifecycle action to be completed,
+# and thus allows the ASG to be destroyed cleanly.
+resource "aws_autoscaling_lifecycle_hook" "aws_node_termination_handler" {
+ for_each = module.eks.self_managed_node_groups
+
+ name = "aws-node-termination-handler-${each.value.autoscaling_group_name}"
+ autoscaling_group_name = each.value.autoscaling_group_name
+ lifecycle_transition = "autoscaling:EC2_INSTANCE_TERMINATING"
+ heartbeat_timeout = 300
+ default_result = "CONTINUE"
+}
diff --git a/examples/irsa_autoscale_refresh/main.tf b/examples/irsa_autoscale_refresh/main.tf
new file mode 100644
index 0000000000..623e8bbd14
--- /dev/null
+++ b/examples/irsa_autoscale_refresh/main.tf
@@ -0,0 +1,165 @@
+provider "aws" {
+ region = local.region
+}
+
+locals {
+ name = "ex-${replace(basename(path.cwd), "_", "-")}"
+ cluster_version = "1.21"
+ region = "eu-west-1"
+
+ tags = {
+ Example = local.name
+ GithubRepo = "terraform-aws-eks"
+ GithubOrg = "terraform-aws-modules"
+ }
+}
+
+data "aws_caller_identity" "current" {}
+
+data "aws_eks_cluster_auth" "cluster" {
+ name = module.eks.cluster_id
+}
+
+################################################################################
+# EKS Module
+################################################################################
+
+module "eks" {
+ source = "../.."
+
+ cluster_name = local.name
+ cluster_version = local.cluster_version
+ cluster_endpoint_private_access = true
+ cluster_endpoint_public_access = true
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_subnets
+
+ enable_irsa = true
+
+ # Self Managed Node Group(s)
+ self_managed_node_groups = {
+ refresh = {
+ max_size = 5
+ desired_size = 1
+
+ instance_types = ["m5.large", "m5n.large", "m5zn.large", "m6i.large", ]
+ create_launch_template = true
+ launch_template_name = "refresh"
+ update_default_version = true
+
+ instance_refresh = {
+ strategy = "Rolling"
+ preferences = {
+ checkpoint_delay = 600
+ checkpoint_percentages = [35, 70, 100]
+ instance_warmup = 300
+ min_healthy_percentage = 50
+ }
+ triggers = ["tag"]
+ }
+
+ propogate_tags = [{
+ key = "aws-node-termination-handler/managed"
+ value = ""
+ propagate_at_launch = true
+ }]
+ }
+ }
+
+ tags = merge(local.tags, { Foo = "bar" })
+}
+
+################################################################################
+# aws-auth configmap
+# Only EKS managed node groups automatically add roles to aws-auth configmap
+# so we need to ensure fargate profiles and self-managed node roles are added
+################################################################################
+
+data "aws_eks_cluster_auth" "this" {
+ name = module.eks.cluster_id
+}
+
+locals {
+ kubeconfig = yamlencode({
+ apiVersion = "v1"
+ kind = "Config"
+ current-context = "terraform"
+ clusters = [{
+ name = "${module.eks.cluster_id}"
+ cluster = {
+ certificate-authority-data = "${module.eks.cluster_certificate_authority_data}"
+ server = "${module.eks.cluster_endpoint}"
+ }
+ }]
+ contexts = [{
+ name = "terraform"
+ context = {
+ cluster = "${module.eks.cluster_id}"
+ user = "terraform"
+ }
+ }]
+ users = [{
+ name = "terraform"
+ user = {
+ token = "${data.aws_eks_cluster_auth.this.token}"
+ }
+ }]
+ })
+}
+
+resource "null_resource" "apply" {
+ triggers = {
+ kubeconfig = base64encode(local.kubeconfig)
+ cmd_patch = <<-EOT
+ kubectl create configmap aws-auth -n kube-system --kubeconfig <(echo $KUBECONFIG | base64 --decode)
+ kubectl patch configmap/aws-auth --patch "${module.eks.aws_auth_configmap_yaml}" -n kube-system --kubeconfig <(echo $KUBECONFIG | base64 --decode)
+ EOT
+ }
+
+ provisioner "local-exec" {
+ interpreter = ["/bin/bash", "-c"]
+ environment = {
+ KUBECONFIG = self.triggers.kubeconfig
+ }
+ command = self.triggers.cmd_patch
+ }
+}
+
+################################################################################
+# Supporting Resources
+################################################################################
+
+module "vpc" {
+ source = "terraform-aws-modules/vpc/aws"
+ version = "~> 3.0"
+
+ name = local.name
+ cidr = "10.0.0.0/16"
+
+ azs = ["${local.region}a", "${local.region}b", "${local.region}c"]
+ private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
+ public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
+
+ enable_nat_gateway = true
+ single_nat_gateway = true
+ enable_dns_hostnames = true
+
+ enable_flow_log = true
+ create_flow_log_cloudwatch_iam_role = true
+ create_flow_log_cloudwatch_log_group = true
+
+ public_subnet_tags = {
+ "kubernetes.io/cluster/${local.name}" = "shared"
+ "kubernetes.io/role/elb" = 1
+ }
+
+ private_subnet_tags = {
+ "kubernetes.io/cluster/${local.name}" = "shared"
+ "kubernetes.io/role/internal-elb" = 1
+ }
+
+ tags = merge(local.tags,
+ { "kubernetes.io/cluster/${local.name}" = "shared" }
+ )
+}
diff --git a/examples/instance_refresh/outputs.tf b/examples/irsa_autoscale_refresh/outputs.tf
similarity index 100%
rename from examples/instance_refresh/outputs.tf
rename to examples/irsa_autoscale_refresh/outputs.tf
diff --git a/examples/instance_refresh/variables.tf b/examples/irsa_autoscale_refresh/variables.tf
similarity index 100%
rename from examples/instance_refresh/variables.tf
rename to examples/irsa_autoscale_refresh/variables.tf
diff --git a/examples/instance_refresh/versions.tf b/examples/irsa_autoscale_refresh/versions.tf
similarity index 88%
rename from examples/instance_refresh/versions.tf
rename to examples/irsa_autoscale_refresh/versions.tf
index 6d593dd4b9..5229f4454e 100644
--- a/examples/instance_refresh/versions.tf
+++ b/examples/irsa_autoscale_refresh/versions.tf
@@ -22,6 +22,10 @@ terraform {
version = ">= 2.1"
>>>>>>> b876ff9 (fix: update CI/CD process to enable auto-release workflow (#1698))
}
+ null = {
+ source = "hashicorp/null"
+ version = ">= 3.0"
+ }
helm = {
source = "hashicorp/helm"
version = ">= 2.0"
diff --git a/examples/self_managed_node_group/README.md b/examples/self_managed_node_group/README.md
index 1ae48de39f..eb4d0cc15f 100644
--- a/examples/self_managed_node_group/README.md
+++ b/examples/self_managed_node_group/README.md
@@ -64,6 +64,7 @@ Note that this example may create resources which cost money. Run `terraform des
|------|------|
| [aws_key_pair.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/key_pair) | resource |
| [aws_kms_key.ebs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource |
+| [aws_kms_key.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource |
| [aws_security_group.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
| [null_resource.apply](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
| [tls_private_key.this](https://registry.terraform.io/providers/hashicorp/tls/latest/docs/resources/private_key) | resource |
diff --git a/examples/self_managed_node_group/main.tf b/examples/self_managed_node_group/main.tf
index 6bd740448b..1881373fed 100644
--- a/examples/self_managed_node_group/main.tf
+++ b/examples/self_managed_node_group/main.tf
@@ -14,6 +14,8 @@ locals {
}
}
+data "aws_caller_identity" "current" {}
+
################################################################################
# EKS Module
################################################################################
@@ -21,12 +23,8 @@ locals {
module "eks" {
source = "../.."
- cluster_name = local.name
- cluster_version = local.cluster_version
-
- vpc_id = module.vpc.vpc_id
- subnet_ids = module.vpc.private_subnets
-
+ cluster_name = local.name
+ cluster_version = local.cluster_version
cluster_endpoint_private_access = true
cluster_endpoint_public_access = true
@@ -40,6 +38,18 @@ module "eks" {
}
}
+ cluster_encryption_config = [
+ {
+ provider_key_arn = aws_kms_key.eks.arn
+ resources = ["secrets"]
+ }
+ ]
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_subnets
+
+ enable_irsa = true
+
self_managed_node_group_defaults = {
disk_size = 50
instance_types = ["m6i.large", "m5.large", "m5n.large", "m5zn.large"]
@@ -309,6 +319,14 @@ resource "aws_security_group" "additional" {
tags = local.tags
}
+resource "aws_kms_key" "eks" {
+ description = "EKS Secret Encryption Key"
+ deletion_window_in_days = 7
+ enable_key_rotation = true
+
+ tags = local.tags
+}
+
data "aws_ami" "bottlerocket_ami" {
most_recent = true
owners = ["amazon"]
@@ -328,8 +346,6 @@ resource "aws_key_pair" "this" {
public_key = tls_private_key.this.public_key_openssh
}
-data "aws_caller_identity" "current" {}
-
resource "aws_kms_key" "ebs" {
description = "Customer managed key to encrypt self managed node group volumes"
deletion_window_in_days = 7
diff --git a/modules/_user_data/README.md b/modules/_user_data/README.md
index f7502ae095..6a029a9098 100644
--- a/modules/_user_data/README.md
+++ b/modules/_user_data/README.md
@@ -1,30 +1,28 @@
# Internal User Data Module
-Configuration in this directory renderes the appropriate user data for the given inputs. There are a number of different ways that user data can be utilized and this internal module is designed to aid in making that flexibility possible without over complicating and duplicating code.
+Configuration in this directory renderes the appropriate user data for the given inputs. There are a number of different ways that user data can be utilized and this internal module is designed to aid in making that flexibility possible as well as providing a means for out of bands testing and validation.
## Combinations
-At a high level, we have two methods for launching nodes within the EKS module:
+At a high level, AWS EKS users have two methods for launching nodes within this EKS module (ignoring Fargate profiles):
1. EKS managed node group
2. Self managed node group
### EKS Managed Node Group
-Within the EKS managed node group, users have 2 methods of user data populating
+When using an EKS managed node group, users have 2 primary routes for interacting with the bootstrap user data:
-ℹ️ Note: When using bottlerocket as the desired platform, since the user data for bottlerocket is TOML, all configurations are merged in the one file supplied as user data. Therefore, `pre_bootstra_user_data` and `post_bootstrap_user_data` are not valid since the bottlerocket OS handles when various settings are applied. If you wish to supply additional configuration settings when using bottlerocket, supply them via the `bootstrap_extra_args` variable. For the linux platform, `bootstrap_extra_args` are settings that will be supplied to the [AWS EKS Optimized AMI boostrap script](https://github.com/awslabs/amazon-eks-ami/blob/master/files/bootstrap.sh#L14) such as kubelet extra args, etc.
+1. If the EKS managed node group does **NOT** utilize a custom AMI, then users can elect to supply additional user data that is prepended before the EKS managed node group bootstrap user data. You can read more about this process from the [AWS supplied documentation](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-user-data)
-1. If the EKS managed node group does not use a custom AMI, the EKS managed node group is responsible for the AMI used, then users can elect to supply additional user data that is pre-pended before the EKS managed node group bootstrap user data. You can read more about this process from the [AWS supplied documentation](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-user-data)
-
- - Users can use the following two variables to supplement this process:
+ - Users can use the following variables to facilitate this process:
```hcl
pre_bootstrap_user_data = "..."
bootstrap_extra_args = "..."
```
-2. If the EKS managed node group utilizes a custom AMI, then per the [AWS documentation](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-custom-ami), users will need to supply the necessary bootstrap configuration via user data to ensure that the node is configured to register with the cluster upon launch. There are two routes that users can utilize to facilitate this bootstrapping process:
+2. If the EKS managed node group does utilize a custom AMI, then per the [AWS documentation](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-custom-ami), users will need to supply the necessary bootstrap configuration via user data to ensure that the node is configured to register with the cluster when launched. There are two routes that users can utilize to facilitate this bootstrapping process:
- If the AMI used is a derivative of the [AWS EKS Optimized AMI ](https://github.com/awslabs/amazon-eks-ami), users can opt in to using a template provided by the module that provides the minimum necessary configuration to bootstrap the node when launched, with the option to add additional pre and post boostrap user data as well as bootstrap additional args that are supplied to the [AWS EKS boostrap.sh script](https://github.com/awslabs/amazon-eks-ami/blob/master/files/bootstrap.sh)
- Users can use the following variables to facilitate this process:
```hcl
@@ -33,7 +31,7 @@ Within the EKS managed node group, users have 2 methods of user data populating
bootstrap_extra_args = "..."
post_bootstrap_user_data = "..."
```
- - If the AMI is not an AWS EKS Optmized AMI derivative, or if users wish to have more control over the user data that is supplied to the node upon launch, users have the ability to supply their own user data template that will be rendered instead of the module supplied template. Note - only the variables that are supplied to the `templatefile()` for the respective platform/OS are available for use in the supplied template, otherwise users will need to pre-render/pre-populate the template before supplying the final template to the module for rendering as user data.
+ - If the AMI is not an AWS EKS Optmized AMI derivative, or if users wish to have more control over the user data that is supplied to the node when launched, users have the ability to supply their own user data template that will be rendered instead of the module supplied template. Note - only the variables that are supplied to the `templatefile()` for the respective platform/OS are available for use in the supplied template, otherwise users will need to pre-render/pre-populate the template before supplying the final template to the module for rendering as user data.
- Users can use the following variables to faciliate this process:
```hcl
user_data_template_path = "./your/user_data.sh" # user supplied bootstrap user data template
@@ -42,9 +40,12 @@ Within the EKS managed node group, users have 2 methods of user data populating
post_bootstrap_user_data = "..."
```
+| ℹ️ When using bottlerocket as the desired platform, since the user data for bottlerocket is TOML, all configurations are merged in the one file supplied as user data. Therefore, `pre_bootstra_user_data` and `post_bootstrap_user_data` are not valid since the bottlerocket OS handles when various settings are applied. If you wish to supply additional configuration settings when using bottlerocket, supply them via the `bootstrap_extra_args` variable. For the linux platform, `bootstrap_extra_args` are settings that will be supplied to the [AWS EKS Optimized AMI boostrap script](https://github.com/awslabs/amazon-eks-ami/blob/master/files/bootstrap.sh#L14) such as kubelet extra args, etc. See the [bottlerocket GitHub repository documentation](https://github.com/bottlerocket-os/bottlerocket#description-of-settings) for more details on what settings can be supplied via the `boostrap_extra_args` variable. |
+| :--- |
+
### Self Managed Node Group
-Within the self managed node group, the options presented to users is very similar to the 2nd option listed above for EKS managed node groups. Because self managed node groups require users to provide the bootstrap user data, there is no concept of appending to user data that AWS provides, users can either elect to use the user data template provided for their platform/OS by the module or provide their own user data template for rendering by the module.
+When using a self managed node group, the options presented to users is very similar to the 2nd option listed above for EKS managed node groups. Since self managed node groups require users to provide the bootstrap user data, there is no concept of appending to user data that AWS provides; users can either elect to use the user data template provided for their platform/OS by the module or provide their own user data template for rendering by the module.
- If the AMI used is a derivative of the [AWS EKS Optimized AMI ](https://github.com/awslabs/amazon-eks-ami), users can opt in to using a template provided by the module that provides the minimum necessary configuration to bootstrap the node when launched, with the option to add additional pre and post boostrap user data as well as bootstrap additional args that are supplied to the [AWS EKS boostrap.sh script](https://github.com/awslabs/amazon-eks-ami/blob/master/files/bootstrap.sh)
- Users can use the following variables to facilitate this process:
@@ -68,9 +69,9 @@ Within the self managed node group, the options presented to users is very simil
The rough flow of logic that is encapsulated within the `_user_data` internal module can be represented by the following diagram to better highlight the various manners in which user data can be populated.
-
+
-
+
## Usage
diff --git a/modules/_user_data/main.tf b/modules/_user_data/main.tf
index f2c2854424..b5adda2294 100644
--- a/modules/_user_data/main.tf
+++ b/modules/_user_data/main.tf
@@ -17,7 +17,7 @@ locals {
)) : ""
platform = {
bottlerocket = {
- user_data = var.create && var.platform == "bottlerocket" && var.enable_bootstrap_user_data ? base64encode(templatefile(
+ user_data = var.create && var.platform == "bottlerocket" && (var.enable_bootstrap_user_data || var.user_data_template_path != "" || var.bootstrap_extra_args != "") ? base64encode(templatefile(
coalesce(var.user_data_template_path, "${path.module}/../../templates/bottlerocket_user_data.tpl"),
{
# https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-custom-ami
diff --git a/modules/eks-managed-node-group/README.md b/modules/eks-managed-node-group/README.md
index 6e23482db4..f8e3173e53 100644
--- a/modules/eks-managed-node-group/README.md
+++ b/modules/eks-managed-node-group/README.md
@@ -127,7 +127,7 @@ If you use a custom AMI, you need to supply via user-data, the bootstrap script
| [kernel\_id](#input\_kernel\_id) | The kernel ID | `string` | `null` | no |
| [key\_name](#input\_key\_name) | The key name that should be used for the instance | `string` | `null` | no |
| [labels](#input\_labels) | Key-value map of Kubernetes labels. Only labels that are applied with the EKS API are managed by this argument. Other Kubernetes labels applied to the EKS Node Group will not be managed | `map(string)` | `null` | no |
-| [launch\_template\_name](#input\_launch\_template\_name) | Launch template name - either to be created (`var.create_launch_template` = `true`) or existing (`var.create_launch_template` = `false`) | `string` | `null` | no |
+| [launch\_template\_name](#input\_launch\_template\_name) | Launch template name - either to be created (`var.create_launch_template` = `true`) or existing (`var.create_launch_template` = `false`) | `string` | `""` | no |
| [launch\_template\_use\_name\_prefix](#input\_launch\_template\_use\_name\_prefix) | Determines whether to use `launch_template_name` as is or create a unique name beginning with the `launch_template_name` as the prefix | `bool` | `true` | no |
| [launch\_template\_version](#input\_launch\_template\_version) | Launch template version number. The default is `$Default` | `string` | `null` | no |
| [license\_specifications](#input\_license\_specifications) | A list of license specifications to associate with | `map(string)` | `null` | no |
@@ -156,7 +156,7 @@ If you use a custom AMI, you need to supply via user-data, the bootstrap script
| [use\_name\_prefix](#input\_use\_name\_prefix) | Determines whether to use `name` as is or create a unique name beginning with the `name` as the prefix | `bool` | `true` | no |
| [user\_data\_template\_path](#input\_user\_data\_template\_path) | Path to a local, custom user data template file to use when rendering user data | `string` | `""` | no |
| [vpc\_id](#input\_vpc\_id) | ID of the VPC where the security group/nodes will be provisioned | `string` | `null` | no |
-| [vpc\_security\_group\_ids](#input\_vpc\_security\_group\_ids) | A list of security group IDs to associate | `list(string)` | `null` | no |
+| [vpc\_security\_group\_ids](#input\_vpc\_security\_group\_ids) | A list of security group IDs to associate | `list(string)` | `[]` | no |
## Outputs
diff --git a/modules/eks-managed-node-group/variables.tf b/modules/eks-managed-node-group/variables.tf
index 7bc5913eb1..39b0ea52e8 100644
--- a/modules/eks-managed-node-group/variables.tf
+++ b/modules/eks-managed-node-group/variables.tf
@@ -75,7 +75,7 @@ variable "create_launch_template" {
variable "launch_template_name" {
description = "Launch template name - either to be created (`var.create_launch_template` = `true`) or existing (`var.create_launch_template` = `false`)"
type = string
- default = null
+ default = ""
}
variable "launch_template_use_name_prefix" {
@@ -111,7 +111,7 @@ variable "key_name" {
variable "vpc_security_group_ids" {
description = "A list of security group IDs to associate"
type = list(string)
- default = null
+ default = []
}
variable "default_version" {
diff --git a/modules/fargate-profile/README.md b/modules/fargate-profile/README.md
index 423780e1b7..61193a6c03 100644
--- a/modules/fargate-profile/README.md
+++ b/modules/fargate-profile/README.md
@@ -55,7 +55,6 @@ No modules.
| [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster | `string` | `null` | no |
| [create](#input\_create) | Controls if Fargate resources should be created (it affects all resources) | `bool` | `true` | no |
| [create\_iam\_role](#input\_create\_iam\_role) | Controls if the the IAM Role that provides permissions for the EKS Fargate Profile will be created | `bool` | `true` | no |
-| [fargate\_profile\_name](#input\_fargate\_profile\_name) | Name of the EKS Fargate Profile | `string` | `""` | no |
| [iam\_role\_additional\_policies](#input\_iam\_role\_additional\_policies) | Additional policies to be added to the IAM role | `list(string)` | `[]` | no |
| [iam\_role\_arn](#input\_iam\_role\_arn) | Amazon Resource Name (ARN) of an existing IAM role that provides permissions for the Fargate pod executions | `string` | `null` | no |
| [iam\_role\_name](#input\_iam\_role\_name) | Name to use on IAM role created | `string` | `""` | no |
@@ -63,6 +62,7 @@ No modules.
| [iam\_role\_permissions\_boundary](#input\_iam\_role\_permissions\_boundary) | ARN of the policy that is used to set the permissions boundary for the IAM role | `string` | `null` | no |
| [iam\_role\_tags](#input\_iam\_role\_tags) | A map of additional tags to add to the IAM role created | `map(string)` | `{}` | no |
| [iam\_role\_use\_name\_prefix](#input\_iam\_role\_use\_name\_prefix) | Determines whether the IAM role name (`iam_role_name`) is used as a prefix | `string` | `true` | no |
+| [name](#input\_name) | Name of the EKS Fargate Profile | `string` | `""` | no |
| [selectors](#input\_selectors) | Configuration block(s) for selecting Kubernetes Pods to execute with this Fargate Profile | `any` | `[]` | no |
| [subnet\_ids](#input\_subnet\_ids) | A list of subnet IDs for the EKS Fargate Profile | `list(string)` | `[]` | no |
| [tags](#input\_tags) | A map of tags to add to all resources | `map(string)` | `{}` | no |
diff --git a/modules/fargate-profile/main.tf b/modules/fargate-profile/main.tf
index a1abba341b..dbffbda3be 100644
--- a/modules/fargate-profile/main.tf
+++ b/modules/fargate-profile/main.tf
@@ -1,7 +1,7 @@
data "aws_partition" "current" {}
locals {
- iam_role_name = coalesce(var.iam_role_name, var.fargate_profile_name, "fargate-profile")
+ iam_role_name = coalesce(var.iam_role_name, var.name, "fargate-profile")
policy_arn_prefix = "arn:${data.aws_partition.current.partition}:iam::aws:policy"
}
@@ -54,7 +54,7 @@ resource "aws_eks_fargate_profile" "this" {
count = var.create ? 1 : 0
cluster_name = var.cluster_name
- fargate_profile_name = var.fargate_profile_name
+ fargate_profile_name = var.name
pod_execution_role_arn = var.create_iam_role ? aws_iam_role.this[0].arn : var.iam_role_arn
subnet_ids = var.subnet_ids
diff --git a/modules/fargate-profile/variables.tf b/modules/fargate-profile/variables.tf
index de185adcc4..b012ff2176 100644
--- a/modules/fargate-profile/variables.tf
+++ b/modules/fargate-profile/variables.tf
@@ -72,7 +72,7 @@ variable "cluster_name" {
default = null
}
-variable "fargate_profile_name" {
+variable "name" {
description = "Name of the EKS Fargate Profile"
type = string
default = ""
diff --git a/modules/self-managed-node-group/README.md b/modules/self-managed-node-group/README.md
index 154b915a76..8f72b82cbc 100644
--- a/modules/self-managed-node-group/README.md
+++ b/modules/self-managed-node-group/README.md
@@ -75,7 +75,7 @@ $ terraform apply
| [default\_version](#input\_default\_version) | Default Version of the launch template | `string` | `null` | no |
| [delete\_timeout](#input\_delete\_timeout) | Delete timeout to wait for destroying autoscaling group | `string` | `null` | no |
| [description](#input\_description) | Description of the launch template | `string` | `null` | no |
-| [desired\_size](#input\_desired\_size) | The number of Amazon EC2 instances that should be running in the autoscaling group | `number` | `null` | no |
+| [desired\_size](#input\_desired\_size) | The number of Amazon EC2 instances that should be running in the autoscaling group | `number` | `1` | no |
| [disable\_api\_termination](#input\_disable\_api\_termination) | If true, enables EC2 instance termination protection | `bool` | `null` | no |
| [ebs\_optimized](#input\_ebs\_optimized) | If true, the launched EC2 instance will be EBS-optimized | `bool` | `null` | no |
| [elastic\_gpu\_specifications](#input\_elastic\_gpu\_specifications) | The elastic GPU to attach to the instance | `map(string)` | `null` | no |
@@ -108,11 +108,11 @@ $ terraform apply
| [launch\_template\_version](#input\_launch\_template\_version) | Launch template version. Can be version number, `$Latest`, or `$Default` | `string` | `null` | no |
| [license\_specifications](#input\_license\_specifications) | A list of license specifications to associate with | `map(string)` | `null` | no |
| [max\_instance\_lifetime](#input\_max\_instance\_lifetime) | The maximum amount of time, in seconds, that an instance can be in service, values must be either equal to 0 or between 604800 and 31536000 seconds | `number` | `null` | no |
-| [max\_size](#input\_max\_size) | The maximum size of the autoscaling group | `number` | `null` | no |
+| [max\_size](#input\_max\_size) | The maximum size of the autoscaling group | `number` | `3` | no |
| [metadata\_options](#input\_metadata\_options) | Customize the metadata options for the instance | `map(string)` |
| no |
| [metrics\_granularity](#input\_metrics\_granularity) | The granularity to associate with the metrics to collect. The only valid value is `1Minute` | `string` | `null` | no |
| [min\_elb\_capacity](#input\_min\_elb\_capacity) | Setting this causes Terraform to wait for this number of instances to show up healthy in the ELB only on creation. Updates will not wait on ELB instance number changes | `number` | `null` | no |
-| [min\_size](#input\_min\_size) | The minimum size of the autoscaling group | `number` | `null` | no |
+| [min\_size](#input\_min\_size) | The minimum size of the autoscaling group | `number` | `0` | no |
| [mixed\_instances\_policy](#input\_mixed\_instances\_policy) | Configuration block containing settings to define launch targets for Auto Scaling groups | `any` | `null` | no |
| [name](#input\_name) | Name used across the resources created | `string` | `""` | no |
| [network\_interfaces](#input\_network\_interfaces) | Customize network interfaces to be attached at instance boot time | `list(any)` | `[]` | no |
@@ -141,7 +141,7 @@ $ terraform apply
| [use\_name\_prefix](#input\_use\_name\_prefix) | Determines whether to use `name` as is or create a unique name beginning with the `name` as the prefix | `bool` | `true` | no |
| [user\_data\_template\_path](#input\_user\_data\_template\_path) | Path to a local, custom user data template file to use when rendering user data | `string` | `""` | no |
| [vpc\_id](#input\_vpc\_id) | ID of the VPC where the security group/nodes will be provisioned | `string` | `null` | no |
-| [vpc\_security\_group\_ids](#input\_vpc\_security\_group\_ids) | A list of security group IDs to associate | `list(string)` | `null` | no |
+| [vpc\_security\_group\_ids](#input\_vpc\_security\_group\_ids) | A list of security group IDs to associate | `list(string)` | `[]` | no |
| [wait\_for\_capacity\_timeout](#input\_wait\_for\_capacity\_timeout) | A maximum duration that Terraform should wait for ASG instances to be healthy before timing out. (See also Waiting for Capacity below.) Setting this to '0' causes Terraform to skip all Capacity Waiting behavior. | `string` | `null` | no |
| [wait\_for\_elb\_capacity](#input\_wait\_for\_elb\_capacity) | Setting this will cause Terraform to wait for exactly this number of healthy instances in all attached load balancers on both create and update operations. Takes precedence over `min_elb_capacity` behavior. | `number` | `null` | no |
| [warm\_pool](#input\_warm\_pool) | If this block is configured, add a Warm Pool to the specified Auto Scaling group | `any` | `null` | no |
diff --git a/modules/self-managed-node-group/main.tf b/modules/self-managed-node-group/main.tf
index 1b21a9d83d..7f3c9a74f0 100644
--- a/modules/self-managed-node-group/main.tf
+++ b/modules/self-managed-node-group/main.tf
@@ -381,6 +381,9 @@ resource "aws_autoscaling_group" "this" {
lifecycle {
create_before_destroy = true
+ ignore_changes = [
+ desired_capacity
+ ]
}
tags = concat(
diff --git a/modules/self-managed-node-group/variables.tf b/modules/self-managed-node-group/variables.tf
index 564f13b517..df4e237490 100644
--- a/modules/self-managed-node-group/variables.tf
+++ b/modules/self-managed-node-group/variables.tf
@@ -20,6 +20,12 @@ variable "platform" {
# User Data
################################################################################
+variable "cluster_name" {
+ description = "Name of the EKS cluster that the node group will be associated with"
+ type = string
+ default = null
+}
+
variable "cluster_endpoint" {
description = "Endpoint of associated EKS cluster"
type = string
@@ -99,19 +105,19 @@ variable "subnet_ids" {
variable "min_size" {
description = "The minimum size of the autoscaling group"
type = number
- default = null
+ default = 0
}
variable "max_size" {
description = "The maximum size of the autoscaling group"
type = number
- default = null
+ default = 3
}
variable "desired_size" {
description = "The number of Amazon EC2 instances that should be running in the autoscaling group"
type = number
- default = null
+ default = 1
}
variable "capacity_rebalance" {
@@ -252,12 +258,6 @@ variable "delete_timeout" {
default = null
}
-variable "cluster_name" {
- description = "Name of the EKS cluster that the node group will be associated with"
- type = string
- default = null
-}
-
variable "propagate_tags" {
description = "A list of tag blocks. Each element should have keys named key, value, and propagate_at_launch"
type = list(map(string))
@@ -427,7 +427,7 @@ variable "key_name" {
variable "vpc_security_group_ids" {
description = "A list of security group IDs to associate"
type = list(string)
- default = null
+ default = []
}
variable "enable_monitoring" {
diff --git a/node_groups.tf b/node_groups.tf
index fe50f2cfb2..b33a52b265 100644
--- a/node_groups.tf
+++ b/node_groups.tf
@@ -149,11 +149,11 @@ module "fargate_profile" {
for_each = { for k, v in var.fargate_profiles : k => v if var.create }
# Fargate Profile
- cluster_name = aws_eks_cluster.this[0].name
- fargate_profile_name = try(each.value.fargate_profile_name, each.key)
- subnet_ids = try(each.value.subnet_ids, var.fargate_profile_defaults.subnet_ids, var.subnet_ids)
- selectors = try(each.value.selectors, var.fargate_profile_defaults.selectors, [])
- timeouts = try(each.value.timeouts, var.fargate_profile_defaults.timeouts, {})
+ cluster_name = aws_eks_cluster.this[0].name
+ name = try(each.value.name, each.key)
+ subnet_ids = try(each.value.subnet_ids, var.fargate_profile_defaults.subnet_ids, var.subnet_ids)
+ selectors = try(each.value.selectors, var.fargate_profile_defaults.selectors, [])
+ timeouts = try(each.value.timeouts, var.fargate_profile_defaults.timeouts, {})
# IAM role
create_iam_role = try(each.value.create_iam_role, var.fargate_profile_defaults.create_iam_role, true)
@@ -289,7 +289,7 @@ module "self_managed_node_group" {
availability_zones = try(each.value.availability_zones, var.self_managed_node_group_defaults.availability_zones, null)
subnet_ids = try(each.value.subnet_ids, var.self_managed_node_group_defaults.subnet_ids, var.subnet_ids)
- min_size = try(each.value.min_size, var.self_managed_node_group_defaults.min_size, 1)
+ min_size = try(each.value.min_size, var.self_managed_node_group_defaults.min_size, 0)
max_size = try(each.value.max_size, var.self_managed_node_group_defaults.max_size, 3)
desired_size = try(each.value.desired_size, var.self_managed_node_group_defaults.desired_size, 1)
capacity_rebalance = try(each.value.capacity_rebalance, var.self_managed_node_group_defaults.capacity_rebalance, null)
From 561e5764a79d2c16be990f47d45b2dce6e992e4b Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Mon, 6 Dec 2021 14:16:45 -0500
Subject: [PATCH 48/83] chore: updates from testing
---
examples/complete/main.tf | 45 ++++++++++++++++++++++-----------------
main.tf | 6 ++++++
2 files changed, 31 insertions(+), 20 deletions(-)
diff --git a/examples/complete/main.tf b/examples/complete/main.tf
index 4241e38d01..e9d6c57943 100644
--- a/examples/complete/main.tf
+++ b/examples/complete/main.tf
@@ -197,9 +197,14 @@ module "self_managed_node_group" {
vpc_id = module.vpc.vpc_id
subnet_ids = module.vpc.private_subnets
+ vpc_security_group_ids = [
+ module.eks.cluster_primary_security_group_id,
+ module.eks.cluster_security_group_id,
+ ]
create_launch_template = true
launch_template_name = "separate-self-mng"
+ update_default_version = true
instance_type = "m5.large"
tags = merge(local.tags, { Separate = "self-managed-node-group" })
@@ -285,31 +290,31 @@ locals {
})
# we have to combine the configmap created by the eks module with the externally created node group/profile sub-modules
- # aws_auth_configmap = <<-EOT
- # ${chomp(module.eks.aws_auth_configmap_yaml)}
- # - rolearn: ${module.eks_managed_node_group.iam_role_arn}
- # username: system:node:{{EC2PrivateDNSName}}
- # groups:
- # - system:bootstrappers
- # - system:nodes
- # - rolearn: ${module.self_managed_node_group.iam_role_arn}
- # username: system:node:{{EC2PrivateDNSName}}
- # groups:
- # - system:bootstrappers
- # - system:nodes
- # - rolearn: ${module.fargate_profile.fargate_profile_arn}
- # username: system:node:{{SessionName}}
- # groups:
- # - system:bootstrappers
- # - system:nodes
- # - system:node-proxier
- # EOT
+ aws_auth_configmap_yaml = <<-EOT
+ ${chomp(module.eks.aws_auth_configmap_yaml)}
+ - rolearn: ${module.eks_managed_node_group.iam_role_arn}
+ username: system:node:{{EC2PrivateDNSName}}
+ groups:
+ - system:bootstrappers
+ - system:nodes
+ - rolearn: ${module.self_managed_node_group.iam_role_arn}
+ username: system:node:{{EC2PrivateDNSName}}
+ groups:
+ - system:bootstrappers
+ - system:nodes
+ - rolearn: ${module.fargate_profile.fargate_profile_arn}
+ username: system:node:{{SessionName}}
+ groups:
+ - system:bootstrappers
+ - system:nodes
+ - system:node-proxier
+ EOT
}
resource "null_resource" "patch" {
triggers = {
kubeconfig = base64encode(local.kubeconfig)
- cmd_patch = "kubectl patch configmap/aws-auth --patch \"${module.eks.aws_auth_configmap_yaml}\" -n kube-system --kubeconfig <(echo $KUBECONFIG | base64 --decode)"
+ cmd_patch = "kubectl patch configmap/aws-auth --patch \"${local.aws_auth_configmap_yaml}\" -n kube-system --kubeconfig <(echo $KUBECONFIG | base64 --decode)"
}
provisioner "local-exec" {
diff --git a/main.tf b/main.tf
index 6b640218d2..fa4eead6bb 100644
--- a/main.tf
+++ b/main.tf
@@ -243,6 +243,12 @@ resource "aws_eks_addon" "this" {
resolve_conflicts = lookup(each.value, "resolve_conflicts", null)
service_account_role_arn = lookup(each.value, "service_account_role_arn", null)
+ lifecycle {
+ ignore_changes = [
+ modified_at
+ ]
+ }
+
tags = var.tags
}
From 7ce79d7a506c731bc92c2aea6453c176398b472c Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Mon, 6 Dec 2021 17:33:38 -0500
Subject: [PATCH 49/83] chore: updating documentation
---
README.md | 453 ++++++++-------------
examples/complete/README.md | 20 +-
examples/complete/main.tf | 14 +-
examples/eks_managed_node_group/README.md | 7 +-
examples/eks_managed_node_group/main.tf | 10 +-
examples/fargate_profile/README.md | 5 +-
examples/fargate_profile/main.tf | 10 +-
examples/irsa_autoscale_refresh/README.md | 7 +-
examples/irsa_autoscale_refresh/charts.tf | 2 +-
examples/self_managed_node_group/README.md | 11 +-
examples/self_managed_node_group/main.tf | 15 +-
examples/user_data/README.md | 2 +
modules/_user_data/README.md | 10 +-
modules/_user_data/variables.tf | 10 +-
variables.tf | 48 +--
15 files changed, 250 insertions(+), 374 deletions(-)
diff --git a/README.md b/README.md
index 0c3de66092..1de80cb573 100644
--- a/README.md
+++ b/README.md
@@ -4,13 +4,19 @@ Terraform module which creates AWS EKS (Kubernetes) resources
## Available Features
-- EKS cluster
+- AWS EKS Cluster
+- AWS EKS Cluster Addons
+- AWS EKS Identity Provider Configuration
- All [node types](https://docs.aws.amazon.com/eks/latest/userguide/eks-compute.html) are supported:
- [EKS Managed Node Group](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html)
- [Self Managed Node Group](https://docs.aws.amazon.com/eks/latest/userguide/worker.html)
- [Fargate Profile](https://docs.aws.amazon.com/eks/latest/userguide/fargate.html)
- Support for custom AMI, custom launch template, and custom user data
-- Create or manage security groups that allow communication and coordination
+- Support for Amazon Linux 2 EKS Optmized AMI and Bottlerocket nodes
+ - Windows based node support is limited to a default user data template that is provided due to the lack of Windows support and manual steps required to provision Windows based EKS nodes
+- Module security group creation, bring your own security groups, as well as adding additiona security group rules to the module created security groups
+- Support for providing maps of node groups/Fargate profiles to the cluster module definition or use separate node group/Fargate profile sub-modules
+- Provisions to provide node group/Fargate profile "default" settings - useful for when creating multiple node groups/Fargate profiles where you want to set a common set of configurations once, and then individual control only select features
## Usage
@@ -18,23 +24,140 @@ Terraform module which creates AWS EKS (Kubernetes) resources
module "eks" {
source = "terraform-aws-modules/eks/aws"
- cluster_version = "1.22"
- cluster_name = "my-cluster"
-
- vpc_id = "vpc-1234556abcdef"
- subnet_ids = ["subnet-abcde012", "subnet-bcde012a", "subnet-fghi345a"]
+ cluster_name = "my-cluster"
+ cluster_version = "1.21"
cluster_endpoint_private_access = true
cluster_endpoint_public_access = true
- eks_managed_node_groups = {
- default = {}
+ cluster_addons = {
+ coredns = {
+ resolve_conflicts = "OVERWRITE"
+ }
+ kube-proxy = {}
+ vpc-cni = {
+ resolve_conflicts = "OVERWRITE"
+ }
+ }
+
+ cluster_encryption_config = [{
+ provider_key_arn = "ac01234b-00d9-40f6-ac95-e42345f78b00"
+ resources = ["secrets"]
+ }]
+
+ vpc_id = "vpc-1234556abcdef"
+ subnet_ids = ["subnet-abcde012", "subnet-bcde012a", "subnet-fghi345a"]
+
+ # Self Managed Node Group(s)
+ self_managed_node_group_defaults = {
+ instance_type = "m6i.large"
+ update_default_version = true
+ iam_role_additional_policies = ["arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"]
}
self_managed_node_groups = {
one = {
- instance_type = "m5.large"
- desired_capacity = 1
- max_size = 5
+ name = "spot-1"
+
+ public_ip = true
+ max_size = 5
+ desired_size = 2
+
+ use_mixed_instances_policy = true
+ mixed_instances_policy = {
+ instances_distribution = {
+ on_demand_base_capacity = 0
+ on_demand_percentage_above_base_capacity = 10
+ spot_allocation_strategy = "capacity-optimized"
+ }
+
+ override = [
+ {
+ instance_type = "m5.large"
+ weighted_capacity = "1"
+ },
+ {
+ instance_type = "m6i.large"
+ weighted_capacity = "2"
+ },
+ ]
+ }
+
+ pre_bootstrap_user_data = <<-EOT
+ echo "foo"
+ export FOO=bar
+ EOT
+
+ bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"
+
+ post_bootstrap_user_data = <<-EOT
+ cd /tmp
+ sudo yum install -y https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_amd64/amazon-ssm-agent.rpm
+ sudo systemctl enable amazon-ssm-agent
+ sudo systemctl start amazon-ssm-agent
+ EOT
+ }
+ }
+
+ # EKS Managed Node Group(s)
+ eks_managed_node_group_defaults = {
+ ami_type = "AL2_x86_64"
+ disk_size = 50
+ instance_types = ["m6i.large", "m5.large", "m5n.large", "m5zn.large"]
+ vpc_security_group_ids = [aws_security_group.additional.id]
+ create_launch_template = true
+ }
+
+ eks_managed_node_groups = {
+ blue = {}
+ green = {
+ min_size = 1
+ max_size = 10
+ desired_size = 1
+
+ instance_types = ["t3.large"]
+ capacity_type = "SPOT"
+ labels = {
+ Environment = "test"
+ GithubRepo = "terraform-aws-eks"
+ GithubOrg = "terraform-aws-modules"
+ }
+ taints = {
+ dedicated = {
+ key = "dedicated"
+ value = "gpuGroup"
+ effect = "NO_SCHEDULE"
+ }
+ }
+ tags = {
+ ExtraTag = "example"
+ }
+ }
+ }
+
+ # Fargate Profile(s)
+ fargate_profiles = {
+ default = {
+ name = "default"
+ selectors = [
+ {
+ namespace = "kube-system"
+ labels = {
+ k8s-app = "kube-dns"
+ }
+ },
+ {
+ namespace = "default"
+ }
+ ]
+
+ tags = {
+ Owner = "test"
+ }
+
+ timeouts = {
+ create = "20m"
+ delete = "20m"
+ }
}
}
@@ -47,27 +170,22 @@ module "eks" {
## Notes
-- Kubernetes is constantly evolving, and each version brings new features, fixes, and changes. Always check [Kubernetes Release Notes](https://kubernetes.io/docs/setup/release/notes/) before updating the major version, and [CHANGELOG.md](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/CHANGELOG.md) for all changes related to this EKS module. Applications and add-ons will most likely require updates or workloads could fail after an upgrade. Check [the documentation](https://docs.aws.amazon.com/eks/latest/userguide/update-cluster.html) for any necessary steps you may need to perform.
-
- Setting `instance_refresh_enabled = true` will recreate your worker nodes without draining them first. It is recommended to install [aws-node-termination-handler](https://github.com/aws/aws-node-termination-handler) for proper node draining. See the [instance_refresh](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/instance_refresh) example provided.
Frequently Asked Questions
Why are nodes not being registered?
-Often an issue caused by a networking or endpoint mis-configuration. At least one of the cluster public or private endpoints must be enabled to access the cluster to work. If you require a public endpoint, setting up both (public and private) and restricting the public endpoint via setting `cluster_endpoint_public_access_cidrs` is recommended. More about communication with an endpoint is available [here](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html).
-
-Nodes need to be able to contact the EKS cluster endpoint. By default, the module only creates a public endpoint. To access endpoint, the nodes need outgoing internet access:
+Often an issue caused by a networking or endpoint mis-configuration. At least one of the cluster public or private endpoints must be enabled to access the cluster to work. If you require a public endpoint, setting up both (public and private) and restricting the public endpoint via setting `cluster_endpoint_public_access_cidrs` is recommended. More info regarding communication with an endpoint is available [here](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html).
-- Nodes in private subnets: via a NAT gateway or instance. It will need adding along with appropriate routing rules.
-- Nodes in public subnets: assign public IPs to nodes. Set `public_ip = true` in the `worker_groups` list on this module.
+Nodes need to be able to contact the EKS cluster endpoint. By default, the module only creates a public endpoint. To access the endpoint, the nodes need outgoing internet access:
-Important: If you apply only the public endpoint and setup `cluster_endpoint_public_access_cidrs` to restrict access. Remember, EKS nodes also use the public endpoint, so you must allow access to the endpoint. If not, then your nodes will fail to work correctly.
+- Nodes in private subnets: via a NAT gateway or instance along with the appropriate routing rules
+- Nodes in public subnets: ensure that nodes are launched with public IPs is enabled (either through the module here or your subnet setting defaults)
-Cluster private endpoint can also be enabled by setting `cluster_endpoint_private_access = true` on this module. Node communication to the endpoint stay within the VPC. When the private endpoint is enabled ensure that VPC DNS resolution and hostnames are also enabled:
+Important: If you apply only the public endpoint and configure the `cluster_endpoint_public_access_cidrs` to restrict access, know that EKS nodes will also use the public endpoint and you must allow access to the endpoint. If not, then your nodes will fail to work correctly.
-- If managing the VPC with Terraform: set `enable_dns_hostnames = true` and `enable_dns_support = true` on the `aws_vpc` resource. The [`terraform-aws-module/vpc/aws`](https://github.com/terraform-aws-modules/terraform-aws-vpc/) community module also has these variables.
-- Otherwise refer to the [AWS VPC docs](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-dns.html#vpc-dns-updating) and [AWS EKS Cluster Endpoint Access docs](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html) for more information.
+Cluster private endpoint can also be enabled by setting `cluster_endpoint_private_access = true` on this module. Node communication to the endpoint stays within the VPC. Ensure that VPC DNS resolution and hostnames are also enabled for your VPC when the private endpoint is enabled.
Nodes need to be able to connect to other AWS services plus pull down container images from container registries (ECR). If for some reason you cannot enable public internet access for nodes you can add VPC endpoints to the relevant services: EC2 API, ECR API, ECR DKR and S3.
@@ -85,7 +203,7 @@ An alternative is to use the aws provider's [`ignore_tags` variable](https://www
The module is configured to ignore this value. Unfortunately, Terraform does not support variables within the `lifecycle` block. The setting is ignored to allow the cluster autoscaler to work correctly so that `terraform apply` does not accidentally remove running workers. You can change the desired count via the CLI or console if you're not using the cluster autoscaler.
-If you are not using autoscaling and want to control the number of nodes via terraform, set the `min_capacity` and `max_capacity` for node groups. Before changing those values, you must satisfy AWS `desired_capacity` constraints (which must be between new min/max values).
+If you are not using autoscaling and want to control the number of nodes via terraform, set the `min_size` and `max_size` for node groups. Before changing those values, you must satisfy AWS `desired_size` constraints (which must be between new min/max values).
Why are nodes not recreated when the `launch_template` is recreated?
@@ -113,46 +231,7 @@ You can also use a 3rd party tool like Gruntwork's kubergrunt. See the [`eks dep
To enable Windows support for your EKS cluster, you should apply some configs manually. See the [Enabling Windows Support (Windows/MacOS/Linux)](https://docs.aws.amazon.com/eks/latest/userguide/windows-support.html#enable-windows-support).
-Windows nodes requires additional cluster role (`eks:kube-proxy-windows`).
-
-
Example configuration
-
-Amazon EKS clusters must contain one or more Linux worker nodes to run core system pods that only run on Linux, such as coredns and the VPC resource controller.
-
-1. Build AWS EKS cluster with the next workers configuration (default Linux):
-
-```hcl
- worker_groups = {
- one = {
- name = "worker-group-linux"
- instance_type = "m5.large"
- platform = "linux"
- asg_desired_capacity = 2
- },
- }
-```
-
-2. Apply commands from https://docs.aws.amazon.com/eks/latest/userguide/windows-support.html#enable-windows-support (use tab with name `Windows`)
-3. Add one more worker group for Windows with required field `platform = "windows"` and update your cluster. Worker group example:
-
-```hcl
- worker_groups = {
- linux = {
- name = "worker-group-linux"
- instance_type = "m5.large"
- platform = "linux"
- asg_desired_capacity = 2
- },
- windows = {
- name = "worker-group-windows"
- instance_type = "m5.large"
- platform = "windows"
- asg_desired_capacity = 1
- },
- }
-```
-
-4. With `kubectl get nodes` you can see cluster with mixed (Linux/Windows) nodes support.
+Windows based nodes aksi require additional cluster role (`eks:kube-proxy-windows`).
Worker nodes with labels do not join a 1.16+ cluster
@@ -160,231 +239,21 @@ Kubelet restricts the allowed list of labels in the `kubernetes.io` namespace th
Reference the `--node-labels` argument for your version of Kubenetes for the allowed prefixes. [Documentation for 1.16](https://v1-16.docs.kubernetes.io/docs/reference/command-line-tools-reference/kubelet/)
-
I am using both EKS Managed node groups and Self Managed node groups and pods scheduled on an EKS Managed node group are unable resolve DNS (even communication between pods)
-
-This happen because CoreDNS can be scheduled on Self Managed nodes and by default this module does not create security group rules to allow communication between pods scheduled on Self Managed node groups and EKS Managed node groups.
-
-You can set `var.worker_create_cluster_primary_security_group_rules` to `true` to create required rules.
-
-
Dedicated control plane subnets
-
-[AWS recommends](https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html) to create dedicated subnets for EKS created network interfaces (control plane) which this module supports. To enable this:
-1. Set the `subnet_ids` to the subnets for the control plane
-2. Within the `eks_managed_node_groups`, `self_managed_node_groups`, or `fargate_profiles`, set the `subnet_ids` for the nodes (different from the control plane).
-
-```hcl
-module "eks" {
- source = "terraform-aws-modules/eks/aws"
-
- cluster_version = "1.21"
- cluster_name = "my-cluster"
-
- vpc_id = "vpc-1234556abcdef"
- subnet_ids = ["subnet-abcde123", "subnet-abcde456", "subnet-abcde789"]
-
- self_managed_node_group_defaults = {
- subnet_ids = ["subnet-xyz123", "subnet-xyz456", "subnet-xyz789"]
- }
-
- self_managed_node_groups = {
- one = {
- instance_type = "m4.large"
- asg_max_size = 5
- },
- two = {
- name = "worker-group-2"
- subnet_ids = ["subnet-qwer123"]
- instance_type = "t3.medium"
- asg_desired_capacity = 1
- public_ip = true
- ebs_optimized = true
- }
- }
-
- tags = {
- Environment = "dev"
- Terraform = "true"
- }
-}
-```
-
-
-Autoscaling
-
-To enable worker node autoscaling you will need to do a few things:
-
-- Add the [required tags](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler/cloudprovider/aws#auto-discovery-setup) to the worker group
-- Install the cluster-autoscaler
-- Give the cluster-autoscaler access via an IAM policy
-
-It's probably easiest to follow the example in [examples/irsa](examples/irsa), this will install the cluster-autoscaler using [Helm](https://helm.sh/) and use IRSA to attach a policy.
-
-If you don't want to use IRSA then you will need to attach the IAM policy to the worker node IAM role or add AWS credentials to the cluster-autoscaler environment variables. Here is some example terraform code for the policy:
-
-```hcl
-resource "aws_iam_role_policy_attachment" "workers_autoscaling" {
- policy_arn = aws_iam_policy.worker_autoscaling.arn
- role = module.my_cluster.worker_iam_role_name
-}
-
-resource "aws_iam_policy" "worker_autoscaling" {
- name_prefix = "eks-worker-autoscaling-${module.my_cluster.cluster_id}"
- description = "EKS worker node autoscaling policy for cluster ${module.my_cluster.cluster_id}"
- policy = data.aws_iam_policy_document.worker_autoscaling.json
- path = var.iam_path
- tags = var.tags
-}
-
-data "aws_iam_policy_document" "worker_autoscaling" {
- statement {
- sid = "eksWorkerAutoscalingAll"
- effect = "Allow"
-
- actions = [
- "autoscaling:DescribeAutoScalingGroups",
- "autoscaling:DescribeAutoScalingInstances",
- "autoscaling:DescribeLaunchConfigurations",
- "autoscaling:DescribeTags",
- "ec2:DescribeLaunchTemplateVersions",
- ]
-
- resources = ["*"]
- }
-
- statement {
- sid = "eksWorkerAutoscalingOwn"
- effect = "Allow"
-
- actions = [
- "autoscaling:SetDesiredCapacity",
- "autoscaling:TerminateInstanceInAutoScalingGroup",
- "autoscaling:UpdateAutoScalingGroup",
- ]
-
- resources = ["*"]
-
- condition {
- test = "StringEquals"
- variable = "autoscaling:ResourceTag/kubernetes.io/cluster/${module.my_cluster.cluster_id}"
- values = ["owned"]
- }
-
- condition {
- test = "StringEquals"
- variable = "autoscaling:ResourceTag/k8s.io/cluster-autoscaler/enabled"
- values = ["true"]
- }
- }
-}
-```
-
-And example values for the [helm chart](https://github.com/helm/charts/tree/master/stable/cluster-autoscaler):
-
-```yaml
-rbac:
- create: true
-
-cloudProvider: aws
-awsRegion: YOUR_AWS_REGION
-
-autoDiscovery:
- clusterName: YOUR_CLUSTER_NAME
- enabled: true
-
-image:
- repository: us.gcr.io/k8s-artifacts-prod/autoscaling/cluster-autoscaler
- tag: v1.16.5
-```
-
-To install the chart, simply run helm with the `--values` option:
-
-```bash
-helm install stable/cluster-autoscaler --values=path/to/your/values-file.yaml
-```
-
-
-
-Spot Instances
-
-# TODO - move to an example
-
-You will need to install a daemonset to catch the 2 minute warning before termination. This will ensure the node is gracefully drained before termination. You can install the [k8s-spot-termination-handler](https://github.com/kube-aws/kube-spot-termination-notice-handler) for this. There's a [Helm chart](https://github.com/helm/charts/tree/master/stable/k8s-spot-termination-handler):
-
-In the following examples at least 1 worker group that uses on-demand instances is included. This worker group has an added node label that can be used in scheduling. This could be used to schedule any workload not suitable for spot instances but is important for the [cluster-autoscaler](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler) as it might be end up unscheduled when spot instances are terminated. You can add this to the values of the [cluster-autoscaler helm chart](https://github.com/kubernetes/autoscaler/tree/master/charts/cluster-autoscaler-chart):
-
-```yaml
-nodeSelector:
- kubernetes.io/lifecycle: normal
-```
-
-Notes:
-
-- The `spot_price` is set to the on-demand price so that the spot instances will run as long as they are the cheaper.
-- It's best to have a broad range of instance types to ensure there's always some instances to run when prices fluctuate.
-- There is an AWS blog article about this [here](https://aws.amazon.com/blogs/compute/run-your-kubernetes-workloads-on-amazon-ec2-spot-instances-with-amazon-eks/).
-- Consider using [k8s-spot-rescheduler](https://github.com/pusher/k8s-spot-rescheduler) to move pods from on-demand to spot instances.
-
-## Using Launch Templates
-
-```hcl
- self_managed_node_groups = {
- one = {
- name = "spot-1"
- instance_types = ["m5.large", "m5a.large", "m5d.large", "m5ad.large"]
- spot_instance_pools = 4
- max_size = 5
- desired_capacity = 5
- bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"
- public_ip = true
- },
- }
-```
-
-## Using Launch Templates With Both Spot and On Demand
-
-Example launch template to launch 2 on demand instances of type m5.large, and have the ability to scale up using spot instances and on demand instances. The `node.kubernetes.io/lifecycle` node label will be set to the value queried from the EC2 meta-data service: either "on-demand" or "spot".
-
-`on_demand_percentage_above_base_capacity` is set to 25 so 1 in 4 new nodes, when auto-scaling, will be on-demand instances. If not set, all new nodes will be spot instances. The on-demand instances will be the primary instance type (first in the array if they are not weighted).
-
-```hcl
- self_managed_node_groups = {
- one = {
- name = "mixed-demand-spot"
- override_instance_types = ["m5.large", "m5a.large", "m4.large"]
- root_encrypted = true
- root_volume_size = 50
-
- min_size = 2
- desired_capacity = 2
- on_demand_base_capacity = 3
- on_demand_percentage_above_base_capacity = 25
- asg_max_size = 20
- spot_instance_pools = 3
-
- bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=`curl -s http://169.254.169.254/latest/meta-data/instance-life-cycle`'"
- }
- }
-```
-
-Note: An issue with the cluster-autoscaler: https://github.com/kubernetes/autoscaler/issues/1133 - AWS have released their own termination handler now: https://github.com/aws/aws-node-termination-handler
-
## Examples
-- [Bottlerocket](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/bottlerocket): EKS cluster using [Bottlerocket AMI](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami-bottlerocket.html)
- [Complete](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/complete): EKS Cluster using all available node group types in various combinations demonstrating many of the supported features and configurations
- [EKS Managed Node Group](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/eks_managed_node_group): EKS Cluster using EKS managed node groups
-- [Fargate](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/fargate): EKS cluster using [Fargate Profiles](https://docs.aws.amazon.com/eks/latest/userguide/fargate.html)
-- [Instance Refresh](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/instance_refresh): EKS Cluster using self-managed node group demonstrating how to enable/utilize instance refresh configuration along with node termination handler
-- [IRSA](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/irsa): EKS Cluster demonstrating how to enable IRSA
-- [Secrets Encryption](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/secrets_encryption): EKS Cluster demonstrating how to encrypt cluster secrets
+- [Fargate Profile](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/fargate_profile): EKS cluster using [Fargate Profiles](https://docs.aws.amazon.com/eks/latest/userguide/fargate.html)
+- [IRSA, Node Autoscaler, Instance Refresh](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/irsa_autoscale_instance_refresh): EKS Cluster using self-managed node group demonstrating how to enable/utilize instance refresh configuration along with node termination handler
- [Self Managed Node Group](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/self_managed_node_group): EKS Cluster using self-managed node groups
+- [User Data](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/self_managed_node_group): Various supported methods of providing necessary bootstrap scripts and configuration settings via user data
## Contributing
-Report issues/questions/feature requests on in the [issues](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/new) section.
-Full contributing [guidelines are covered here](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/.github/CONTRIBUTING.md).
+Report issues/questions/feature requests via [issues](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/new)
+Full contributing [guidelines are covered here](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/.github/CONTRIBUTING.md)
## Requirements
@@ -458,30 +327,30 @@ Full contributing [guidelines are covered here](https://github.com/terraform-aws
| [cloudwatch\_log\_group\_retention\_in\_days](#input\_cloudwatch\_log\_group\_retention\_in\_days) | Number of days to retain log events. Default retention - 90 days | `number` | `90` | no |
| [cluster\_additional\_security\_group\_rules](#input\_cluster\_additional\_security\_group\_rules) | List of additional security group rules to add to the cluster security group created | `map(any)` | `{}` | no |
| [cluster\_addons](#input\_cluster\_addons) | Map of cluster addon configurations to enable for the cluster. Addon name can be the map keys or set with `name` | `any` | `{}` | no |
-| [cluster\_enabled\_log\_types](#input\_cluster\_enabled\_log\_types) | A list of the desired control plane logging to enable. For more information, see Amazon EKS Control Plane Logging documentation (https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html) | `list(string)` |
[ "audit", "api", "authenticator" ]
| no |
+| [cluster\_enabled\_log\_types](#input\_cluster\_enabled\_log\_types) | A list of the desired control plane logs to enable. For more information, see Amazon EKS Control Plane Logging documentation (https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html) | `list(string)` |
[ "audit", "api", "authenticator" ]
| no |
| [cluster\_encryption\_config](#input\_cluster\_encryption\_config) | Configuration block with encryption configuration for the cluster |
| `[]` | no |
| [cluster\_endpoint\_private\_access](#input\_cluster\_endpoint\_private\_access) | Indicates whether or not the Amazon EKS private API server endpoint is enabled | `bool` | `false` | no |
-| [cluster\_endpoint\_public\_access](#input\_cluster\_endpoint\_public\_access) | Indicates whether or not the Amazon EKS public API server endpoint is enabled. When it's set to `false` ensure to have a proper private access with `cluster_endpoint_private_access = true` | `bool` | `true` | no |
+| [cluster\_endpoint\_public\_access](#input\_cluster\_endpoint\_public\_access) | Indicates whether or not the Amazon EKS public API server endpoint is enabled | `bool` | `true` | no |
| [cluster\_endpoint\_public\_access\_cidrs](#input\_cluster\_endpoint\_public\_access\_cidrs) | List of CIDR blocks which can access the Amazon EKS public API server endpoint | `list(string)` |
[ "0.0.0.0/0" ]
| no |
-| [cluster\_identity\_providers](#input\_cluster\_identity\_providers) | Map of cluster identity provider configurations to enable for the cluster. Note - this is different from IRSA | `any` | `{}` | no |
-| [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster and default name (prefix) used throughout the resources created | `string` | `""` | no |
+| [cluster\_identity\_providers](#input\_cluster\_identity\_providers) | Map of cluster identity provider configurations to enable for the cluster. Note - this is different/separate from IRSA | `any` | `{}` | no |
+| [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster | `string` | `""` | no |
| [cluster\_security\_group\_description](#input\_cluster\_security\_group\_description) | Description of the cluster security group created | `string` | `"EKS cluster security group"` | no |
-| [cluster\_security\_group\_id](#input\_cluster\_security\_group\_id) | If provided, the EKS cluster will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the workers | `string` | `""` | no |
+| [cluster\_security\_group\_id](#input\_cluster\_security\_group\_id) | Existing security group ID to be attached to the cluster. Required if `create_cluster_security_group` = `false` | `string` | `""` | no |
| [cluster\_security\_group\_name](#input\_cluster\_security\_group\_name) | Name to use on cluster security group created | `string` | `null` | no |
| [cluster\_security\_group\_tags](#input\_cluster\_security\_group\_tags) | A map of additional tags to add to the cluster security group created | `map(string)` | `{}` | no |
| [cluster\_security\_group\_use\_name\_prefix](#input\_cluster\_security\_group\_use\_name\_prefix) | Determines whether cluster security group name (`cluster_security_group_name`) is used as a prefix | `string` | `true` | no |
-| [cluster\_service\_ipv4\_cidr](#input\_cluster\_service\_ipv4\_cidr) | service ipv4 cidr for the kubernetes cluster | `string` | `null` | no |
+| [cluster\_service\_ipv4\_cidr](#input\_cluster\_service\_ipv4\_cidr) | The CIDR block to assign Kubernetes service IP addresses from. If you don't specify a block, Kubernetes assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks | `string` | `null` | no |
| [cluster\_tags](#input\_cluster\_tags) | A map of additional tags to add to the cluster | `map(string)` | `{}` | no |
| [cluster\_timeouts](#input\_cluster\_timeouts) | Create, update, and delete timeout configurations for the cluster | `map(string)` | `{}` | no |
-| [cluster\_version](#input\_cluster\_version) | Kubernetes minor version to use for the EKS cluster (for example 1.21) | `string` | `null` | no |
-| [create](#input\_create) | Controls if EKS resources should be created (it affects almost all resources) | `bool` | `true` | no |
+| [cluster\_version](#input\_cluster\_version) | Kubernetes `.` version to use for the EKS cluster (i.e.: `1.21`) | `string` | `null` | no |
+| [create](#input\_create) | Controls if EKS resources should be created (affects nearly all resources) | `bool` | `true` | no |
| [create\_cloudwatch\_log\_group](#input\_create\_cloudwatch\_log\_group) | Determines whether a log group is created by this module for the cluster logs. If not, AWS will automatically create one if logging is enabled | `bool` | `true` | no |
-| [create\_cluster\_security\_group](#input\_create\_cluster\_security\_group) | Whether to create a security group for the cluster or use the existing `cluster_security_group_id` | `bool` | `true` | no |
+| [create\_cluster\_security\_group](#input\_create\_cluster\_security\_group) | Determines if a security group is created for the cluster or use the existing `cluster_security_group_id` | `bool` | `true` | no |
| [create\_iam\_role](#input\_create\_iam\_role) | Determines whether a cluster IAM role is created or to use an existing IAM role | `bool` | `true` | no |
-| [create\_node\_security\_group](#input\_create\_node\_security\_group) | Whether to create a security group for the node groups or use the existing `node_security_group_id` | `bool` | `true` | no |
+| [create\_node\_security\_group](#input\_create\_node\_security\_group) | Determines whether to create a security group for the node groups or use the existing `node_security_group_id` | `bool` | `true` | no |
| [eks\_managed\_node\_group\_defaults](#input\_eks\_managed\_node\_group\_defaults) | Map of EKS managed node group default configurations | `any` | `{}` | no |
| [eks\_managed\_node\_groups](#input\_eks\_managed\_node\_groups) | Map of EKS managed node group definitions to create | `any` | `{}` | no |
-| [enable\_irsa](#input\_enable\_irsa) | Whether to create OpenID Connect Provider for EKS to enable IRSA | `bool` | `false` | no |
+| [enable\_irsa](#input\_enable\_irsa) | Determines whether to create an OpenID Connect Provider for EKS to enable IRSA | `bool` | `false` | no |
| [fargate\_profile\_defaults](#input\_fargate\_profile\_defaults) | Map of Fargate Profile default configurations | `any` | `{}` | no |
| [fargate\_profiles](#input\_fargate\_profiles) | Map of Fargate Profile definitions to create | `any` | `{}` | no |
| [iam\_role\_arn](#input\_iam\_role\_arn) | Existing IAM role ARN for the cluster. Required if `create_iam_role` is set to `false` | `string` | `null` | no |
@@ -499,8 +368,8 @@ Full contributing [guidelines are covered here](https://github.com/terraform-aws
| [openid\_connect\_audiences](#input\_openid\_connect\_audiences) | List of OpenID Connect audience client IDs to add to the IRSA provider | `list(string)` | `[]` | no |
| [self\_managed\_node\_group\_defaults](#input\_self\_managed\_node\_group\_defaults) | Map of self-managed node group default configurations | `any` | `{}` | no |
| [self\_managed\_node\_groups](#input\_self\_managed\_node\_groups) | Map of self-managed node group definitions to create | `any` | `{}` | no |
-| [subnet\_ids](#input\_subnet\_ids) | A list of subnet IDs to place the EKS cluster and workers within | `list(string)` | `[]` | no |
-| [tags](#input\_tags) | A map of tags to add to all resources. Tags added to launch configuration or templates override these values for ASG Tags only | `map(string)` | `{}` | no |
+| [subnet\_ids](#input\_subnet\_ids) | A list of subnet IDs where the EKS cluster (ENIs) will be provisioned along with the nodes/node groups. Node groups can be deployed within a different set of subnet IDs from within the node group configuration | `list(string)` | `[]` | no |
+| [tags](#input\_tags) | A map of tags to add to all resources | `map(string)` | `{}` | no |
| [vpc\_id](#input\_vpc\_id) | ID of the VPC where the cluster and its nodes will be provisioned | `string` | `null` | no |
## Outputs
diff --git a/examples/complete/README.md b/examples/complete/README.md
index daf25524a1..8ab968f24b 100644
--- a/examples/complete/README.md
+++ b/examples/complete/README.md
@@ -1,10 +1,20 @@
# Complete AWS EKS Cluster
-Configuration in this directory creates EKS cluster with different features shown all-in-one cluster (e.g. Managed Node Groups, Worker Groups, Fargate, Spot instances, AWS Auth enabled).
-
-This example can be used to do smoke test.
-
-See configurations in other `examples` directories for more specific cases.
+Configuration in this directory creates an AWS EKS cluster with a broad mix of various features and settings provided by this module:
+
+- AWS EKS cluster
+- Disabled EKS cluster
+- Self managed node group
+- Externally attached self managed node group
+- Disabled self managed node group
+- EKS managed node group
+- Externally attached EKS managed node group
+- Disabled self managed node group
+- Fargate profile
+- Externally attached Fargate profile
+- Disabled Fargate profile
+- Cluster addons: CoreDNS, Kube-Proxy, and VPC-CNI
+- IAM roles for service accounts
## Usage
diff --git a/examples/complete/main.tf b/examples/complete/main.tf
index e9d6c57943..097e86f332 100644
--- a/examples/complete/main.tf
+++ b/examples/complete/main.tf
@@ -36,12 +36,10 @@ module "eks" {
}
}
- cluster_encryption_config = [
- {
- provider_key_arn = aws_kms_key.eks.arn
- resources = ["secrets"]
- }
- ]
+ cluster_encryption_config = [{
+ provider_key_arn = aws_kms_key.eks.arn
+ resources = ["secrets"]
+ }]
vpc_id = module.vpc.vpc_id
subnet_ids = module.vpc.private_subnets
@@ -59,8 +57,7 @@ module "eks" {
one = {
name = "spot-1"
- public_ip = true
-
+ public_ip = true
max_size = 5
desired_size = 2
@@ -104,6 +101,7 @@ module "eks" {
eks_managed_node_group_defaults = {
ami_type = "AL2_x86_64"
disk_size = 50
+ instance_types = ["m6i.large", "m5.large", "m5n.large", "m5zn.large"]
vpc_security_group_ids = [aws_security_group.additional.id]
create_launch_template = true
}
diff --git a/examples/eks_managed_node_group/README.md b/examples/eks_managed_node_group/README.md
index a0171a87fd..7d6e863772 100644
--- a/examples/eks_managed_node_group/README.md
+++ b/examples/eks_managed_node_group/README.md
@@ -3,9 +3,14 @@
Configuration in this directory creates an AWS EKS cluster with various EKS Managed Node Groups demonstrating the various methods of configurating/customizing:
- A default, "out of the box" EKS managed node group as supplied by AWS EKS
+- A default, "out of the box" Bottlerocket EKS managed node group as supplied by AWS EKS
+- A Bottlerocket EKS managed node group that supplies additional bootstrap settings
+- A Bottlerocket EKS managed node group that demonstrates many of the configuration/customizations offered by the `eks-managed-node-group` sub-module for the Bottlerocket OS
- An EKS managed node group created from a launch template created outside of the module
- An EKS managed node group that utilizes a custom AMI that is an EKS optimized AMI derivative
-- An EKS managed node group that demonstrates nearly all of the configurations/customizations offered by the `eks_managed_node_group` sub-module
+- An EKS managed node group that demonstrates nearly all of the configurations/customizations offered by the `eks-managed-node-group` sub-module
+
+See the [AWS documentation](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html) for further details.
## Usage
diff --git a/examples/eks_managed_node_group/main.tf b/examples/eks_managed_node_group/main.tf
index 9654603078..c8f3a4fef0 100644
--- a/examples/eks_managed_node_group/main.tf
+++ b/examples/eks_managed_node_group/main.tf
@@ -38,12 +38,10 @@ module "eks" {
}
}
- cluster_encryption_config = [
- {
- provider_key_arn = aws_kms_key.eks.arn
- resources = ["secrets"]
- }
- ]
+ cluster_encryption_config = [{
+ provider_key_arn = aws_kms_key.eks.arn
+ resources = ["secrets"]
+ }]
vpc_id = module.vpc.vpc_id
subnet_ids = module.vpc.private_subnets
diff --git a/examples/fargate_profile/README.md b/examples/fargate_profile/README.md
index 388ec18833..3f6c00116b 100644
--- a/examples/fargate_profile/README.md
+++ b/examples/fargate_profile/README.md
@@ -1,9 +1,6 @@
# AWS EKS Cluster with Fargate profiles
-Configuration in this directory creates EKS cluster with Fargate profiles in two different ways:
-
-- Using a root module, where EKS Cluster and Fargate profiles should be created at once. This is the default behaviour for most users.
-- Using `modules/fargate` submodule where Fargate profiles should be attached to the existing EKS Cluster.
+Configuration in this directory creates an AWS EKS cluster utilizing Fargate profiles.
## Usage
diff --git a/examples/fargate_profile/main.tf b/examples/fargate_profile/main.tf
index f2df652486..13e3e4b7b0 100644
--- a/examples/fargate_profile/main.tf
+++ b/examples/fargate_profile/main.tf
@@ -36,12 +36,10 @@ module "eks" {
}
}
- cluster_encryption_config = [
- {
- provider_key_arn = aws_kms_key.eks.arn
- resources = ["secrets"]
- }
- ]
+ cluster_encryption_config = [{
+ provider_key_arn = aws_kms_key.eks.arn
+ resources = ["secrets"]
+ }]
vpc_id = module.vpc.vpc_id
subnet_ids = module.vpc.private_subnets
diff --git a/examples/irsa_autoscale_refresh/README.md b/examples/irsa_autoscale_refresh/README.md
index c38b43436a..ba03602b0d 100644
--- a/examples/irsa_autoscale_refresh/README.md
+++ b/examples/irsa_autoscale_refresh/README.md
@@ -1,9 +1,10 @@
# IRSA, Cluster Autoscaler, and Instance Refresh example
-This is EKS example uses:
-- [IAM Roles for Service Accounts (IRSA)](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html)
-- [Cluster Autoscaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md)
+Configuration in this directory creates an AWS EKS cluster with:
+- [IAM Roles for Service Accounts (IRSA)](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html) enabled
+- [Cluster Autoscaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md) provisioned via a Helm Chart manifest
- [Instance Refresh](https://aws.amazon.com/blogs/compute/introducing-instance-refresh-for-ec2-auto-scaling/) feature for self managed node groups
+- [Node Termination Handler](https://github.com/aws/aws-node-termination-handler) provisioned via a Helm Chart manifest
## Usage
diff --git a/examples/irsa_autoscale_refresh/charts.tf b/examples/irsa_autoscale_refresh/charts.tf
index ed54db1442..d997565cfd 100644
--- a/examples/irsa_autoscale_refresh/charts.tf
+++ b/examples/irsa_autoscale_refresh/charts.tf
@@ -118,7 +118,7 @@ data "aws_iam_policy_document" "cluster_autoscaler" {
################################################################################
# Node Termination Handler
# Based on the official docs at
-# https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-instance-refresh.html
+# https://github.com/aws/aws-node-termination-handler
################################################################################
resource "helm_release" "aws_node_termination_handler" {
diff --git a/examples/self_managed_node_group/README.md b/examples/self_managed_node_group/README.md
index eb4d0cc15f..63e752fbdb 100644
--- a/examples/self_managed_node_group/README.md
+++ b/examples/self_managed_node_group/README.md
@@ -1,11 +1,12 @@
-# Managed groups example
+# Self Managed Node Groups Example
-This is EKS example using managed groups feature in two different ways:
+Configuration in this directory creates an AWS EKS cluster with various Self Managed Node Groups (AutoScaling Groups) demonstrating the various methods of configurating/customizing:
-- Using SPOT instances in node group
-- Using ON_DEMAND instance in node group
+- A default, "out of the box" self managed node group as supplied by the `self-managed-node-group` sub-module
+- A Bottlerocket self managed node group that demonstrates many of the configuration/customizations offered by the `self-manged-node-group` sub-module for the Bottlerocket OS
+- A self managed node group that demonstrates nearly all of the configurations/customizations offered by the `self-managed-node-group` sub-module
-See [the official documentation](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html) for more details.
+See the [AWS documentation](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html) for further details.
## Usage
diff --git a/examples/self_managed_node_group/main.tf b/examples/self_managed_node_group/main.tf
index 1881373fed..33387e84af 100644
--- a/examples/self_managed_node_group/main.tf
+++ b/examples/self_managed_node_group/main.tf
@@ -38,12 +38,10 @@ module "eks" {
}
}
- cluster_encryption_config = [
- {
- provider_key_arn = aws_kms_key.eks.arn
- resources = ["secrets"]
- }
- ]
+ cluster_encryption_config = [{
+ provider_key_arn = aws_kms_key.eks.arn
+ resources = ["secrets"]
+ }]
vpc_id = module.vpc.vpc_id
subnet_ids = module.vpc.private_subnets
@@ -51,8 +49,7 @@ module "eks" {
enable_irsa = true
self_managed_node_group_defaults = {
- disk_size = 50
- instance_types = ["m6i.large", "m5.large", "m5n.large", "m5zn.large"]
+ disk_size = 50
}
self_managed_node_groups = {
@@ -114,7 +111,7 @@ module "eks" {
capacity_type = "SPOT"
disk_size = 256
force_update_version = true
- instance_types = ["m6i.large", "m5.large", "m5n.large", "m5zn.large", "m3.large", "m4.large"]
+ instance_type = "m6i.large"
labels = {
GithubRepo = "terraform-aws-eks"
GithubOrg = "terraform-aws-modules"
diff --git a/examples/user_data/README.md b/examples/user_data/README.md
index 7d43a530c2..57ba591944 100644
--- a/examples/user_data/README.md
+++ b/examples/user_data/README.md
@@ -1,5 +1,7 @@
# Internal User Data Module
+Configuration in this directory render various user data outputs used for testing and validating the internal `_user-data` sub-module.
+
## Usage
To run this example you need to execute:
diff --git a/modules/_user_data/README.md b/modules/_user_data/README.md
index 6a029a9098..1dcba03964 100644
--- a/modules/_user_data/README.md
+++ b/modules/_user_data/README.md
@@ -112,16 +112,16 @@ No modules.
| Name | Description | Type | Default | Required |
|------|-------------|------|---------|:--------:|
-| [bootstrap\_extra\_args](#input\_bootstrap\_extra\_args) | Additional arguments passed to the bootstrap script | `string` | `""` | no |
+| [bootstrap\_extra\_args](#input\_bootstrap\_extra\_args) | Additional arguments passed to the bootstrap script. When `platform` = `bottlerocket`; these are additional [settings](https://github.com/bottlerocket-os/bottlerocket#settings) that are provided to the Bottlerocket user data | `string` | `""` | no |
| [cluster\_auth\_base64](#input\_cluster\_auth\_base64) | Base64 encoded CA of associated EKS cluster | `string` | `""` | no |
| [cluster\_endpoint](#input\_cluster\_endpoint) | Endpoint of associated EKS cluster | `string` | `""` | no |
-| [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster and default name (prefix) used throughout the resources created | `string` | `""` | no |
+| [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster | `string` | `""` | no |
| [create](#input\_create) | Determines whether to create EKS managed node group or not | `bool` | `true` | no |
| [enable\_bootstrap\_user\_data](#input\_enable\_bootstrap\_user\_data) | Determines whether the bootstrap configurations are populated within the user data template | `bool` | `false` | no |
-| [is\_eks\_managed\_node\_group](#input\_is\_eks\_managed\_node\_group) | Determines whether the user data is used on nodes in an EKS managed node group | `bool` | `true` | no |
+| [is\_eks\_managed\_node\_group](#input\_is\_eks\_managed\_node\_group) | Determines whether the user data is used on nodes in an EKS managed node group. Used to determine if user data will be appended or not | `bool` | `true` | no |
| [platform](#input\_platform) | Identifies if the OS platform is `bottlerocket`, `linux`, or `windows` based | `string` | `"linux"` | no |
-| [post\_bootstrap\_user\_data](#input\_post\_bootstrap\_user\_data) | User data that is appended to the user data script after of the EKS bootstrap script. Only valid when using a custom EKS optimized AMI derivative | `string` | `""` | no |
-| [pre\_bootstrap\_user\_data](#input\_pre\_bootstrap\_user\_data) | User data that is injected into the user data script ahead of the EKS bootstrap script | `string` | `""` | no |
+| [post\_bootstrap\_user\_data](#input\_post\_bootstrap\_user\_data) | User data that is appended to the user data script after of the EKS bootstrap script. Not used when `platform` = `bottlerocket` | `string` | `""` | no |
+| [pre\_bootstrap\_user\_data](#input\_pre\_bootstrap\_user\_data) | User data that is injected into the user data script ahead of the EKS bootstrap script. Not used when `platform` = `bottlerocket` | `string` | `""` | no |
| [user\_data\_template\_path](#input\_user\_data\_template\_path) | Path to a local, custom user data template file to use when rendering user data | `string` | `""` | no |
## Outputs
diff --git a/modules/_user_data/variables.tf b/modules/_user_data/variables.tf
index ff4c2c7526..66ae91cc4c 100644
--- a/modules/_user_data/variables.tf
+++ b/modules/_user_data/variables.tf
@@ -17,13 +17,13 @@ variable "enable_bootstrap_user_data" {
}
variable "is_eks_managed_node_group" {
- description = "Determines whether the user data is used on nodes in an EKS managed node group"
+ description = "Determines whether the user data is used on nodes in an EKS managed node group. Used to determine if user data will be appended or not"
type = bool
default = true
}
variable "cluster_name" {
- description = "Name of the EKS cluster and default name (prefix) used throughout the resources created"
+ description = "Name of the EKS cluster"
type = string
default = ""
}
@@ -41,19 +41,19 @@ variable "cluster_auth_base64" {
}
variable "pre_bootstrap_user_data" {
- description = "User data that is injected into the user data script ahead of the EKS bootstrap script"
+ description = "User data that is injected into the user data script ahead of the EKS bootstrap script. Not used when `platform` = `bottlerocket`"
type = string
default = ""
}
variable "post_bootstrap_user_data" {
- description = "User data that is appended to the user data script after of the EKS bootstrap script. Only valid when using a custom EKS optimized AMI derivative"
+ description = "User data that is appended to the user data script after of the EKS bootstrap script. Not used when `platform` = `bottlerocket`"
type = string
default = ""
}
variable "bootstrap_extra_args" {
- description = "Additional arguments passed to the bootstrap script"
+ description = "Additional arguments passed to the bootstrap script. When `platform` = `bottlerocket`; these are additional [settings](https://github.com/bottlerocket-os/bottlerocket#settings) that are provided to the Bottlerocket user data"
type = string
default = ""
}
diff --git a/variables.tf b/variables.tf
index 13101c7af6..c572f6b28a 100644
--- a/variables.tf
+++ b/variables.tf
@@ -1,11 +1,11 @@
variable "tags" {
- description = "A map of tags to add to all resources. Tags added to launch configuration or templates override these values for ASG Tags only"
+ description = "A map of tags to add to all resources"
type = map(string)
default = {}
}
variable "create" {
- description = "Controls if EKS resources should be created (it affects almost all resources)"
+ description = "Controls if EKS resources should be created (affects nearly all resources)"
type = bool
default = true
}
@@ -15,37 +15,25 @@ variable "create" {
################################################################################
variable "cluster_name" {
- description = "Name of the EKS cluster and default name (prefix) used throughout the resources created"
+ description = "Name of the EKS cluster"
type = string
default = ""
}
-variable "iam_role_arn" {
- description = "Existing IAM role ARN for the cluster. Required if `create_iam_role` is set to `false`"
- type = string
- default = null
-}
-
variable "cluster_version" {
- description = "Kubernetes minor version to use for the EKS cluster (for example 1.21)"
+ description = "Kubernetes `.` version to use for the EKS cluster (i.e.: `1.21`)"
type = string
default = null
}
variable "cluster_enabled_log_types" {
- description = "A list of the desired control plane logging to enable. For more information, see Amazon EKS Control Plane Logging documentation (https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html)"
+ description = "A list of the desired control plane logs to enable. For more information, see Amazon EKS Control Plane Logging documentation (https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html)"
type = list(string)
default = ["audit", "api", "authenticator"]
}
-variable "cluster_security_group_id" {
- description = "If provided, the EKS cluster will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the workers"
- type = string
- default = ""
-}
-
variable "subnet_ids" {
- description = "A list of subnet IDs to place the EKS cluster and workers within"
+ description = "A list of subnet IDs where the EKS cluster (ENIs) will be provisioned along with the nodes/node groups. Node groups can be deployed within a different set of subnet IDs from within the node group configuration"
type = list(string)
default = []
}
@@ -57,7 +45,7 @@ variable "cluster_endpoint_private_access" {
}
variable "cluster_endpoint_public_access" {
- description = "Indicates whether or not the Amazon EKS public API server endpoint is enabled. When it's set to `false` ensure to have a proper private access with `cluster_endpoint_private_access = true`"
+ description = "Indicates whether or not the Amazon EKS public API server endpoint is enabled"
type = bool
default = true
}
@@ -69,7 +57,7 @@ variable "cluster_endpoint_public_access_cidrs" {
}
variable "cluster_service_ipv4_cidr" {
- description = "service ipv4 cidr for the kubernetes cluster"
+ description = "The CIDR block to assign Kubernetes service IP addresses from. If you don't specify a block, Kubernetes assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks"
type = string
default = null
}
@@ -122,11 +110,17 @@ variable "cloudwatch_log_group_kms_key_id" {
################################################################################
variable "create_cluster_security_group" {
- description = "Whether to create a security group for the cluster or use the existing `cluster_security_group_id`"
+ description = "Determines if a security group is created for the cluster or use the existing `cluster_security_group_id`"
type = bool
default = true
}
+variable "cluster_security_group_id" {
+ description = "Existing security group ID to be attached to the cluster. Required if `create_cluster_security_group` = `false`"
+ type = string
+ default = ""
+}
+
variable "vpc_id" {
description = "ID of the VPC where the cluster and its nodes will be provisioned"
type = string
@@ -168,7 +162,7 @@ variable "cluster_security_group_tags" {
################################################################################
variable "create_node_security_group" {
- description = "Whether to create a security group for the node groups or use the existing `node_security_group_id`"
+ description = "Determines whether to create a security group for the node groups or use the existing `node_security_group_id`"
type = bool
default = true
}
@@ -214,7 +208,7 @@ variable "node_security_group_tags" {
################################################################################
variable "enable_irsa" {
- description = "Whether to create OpenID Connect Provider for EKS to enable IRSA"
+ description = "Determines whether to create an OpenID Connect Provider for EKS to enable IRSA"
type = bool
default = false
}
@@ -235,6 +229,12 @@ variable "create_iam_role" {
default = true
}
+variable "iam_role_arn" {
+ description = "Existing IAM role ARN for the cluster. Required if `create_iam_role` is set to `false`"
+ type = string
+ default = null
+}
+
variable "iam_role_name" {
description = "Name to use on cluster role created"
type = string
@@ -280,7 +280,7 @@ variable "cluster_addons" {
################################################################################
variable "cluster_identity_providers" {
- description = "Map of cluster identity provider configurations to enable for the cluster. Note - this is different from IRSA"
+ description = "Map of cluster identity provider configurations to enable for the cluster. Note - this is different/separate from IRSA"
type = any
default = {}
}
From 1d6296f7498f11334bc08f55c3ad497c593c0841 Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Mon, 6 Dec 2021 18:59:34 -0500
Subject: [PATCH 50/83] chore: update variables
---
README.md | 26 +-
examples/complete/main.tf | 14 +-
examples/eks_managed_node_group/main.tf | 14 +-
examples/irsa_autoscale_refresh/main.tf | 8 +-
examples/self_managed_node_group/main.tf | 2 +-
main.tf | 5 +-
modules/eks-managed-node-group/README.md | 30 +-
modules/eks-managed-node-group/main.tf | 4 +-
modules/eks-managed-node-group/variables.tf | 56 +--
modules/fargate-profile/README.md | 7 +-
modules/fargate-profile/main.tf | 1 +
modules/fargate-profile/variables.tf | 12 +-
modules/self-managed-node-group/README.md | 30 +-
modules/self-managed-node-group/main.tf | 4 +-
modules/self-managed-node-group/variables.tf | 400 +++++++++----------
node_groups.tf | 37 +-
variables.tf | 34 +-
17 files changed, 354 insertions(+), 330 deletions(-)
diff --git a/README.md b/README.md
index 1de80cb573..72ebb66843 100644
--- a/README.md
+++ b/README.md
@@ -12,9 +12,9 @@ Terraform module which creates AWS EKS (Kubernetes) resources
- [Self Managed Node Group](https://docs.aws.amazon.com/eks/latest/userguide/worker.html)
- [Fargate Profile](https://docs.aws.amazon.com/eks/latest/userguide/fargate.html)
- Support for custom AMI, custom launch template, and custom user data
-- Support for Amazon Linux 2 EKS Optmized AMI and Bottlerocket nodes
+- Support for Amazon Linux 2 EKS Optimized AMI and Bottlerocket nodes
- Windows based node support is limited to a default user data template that is provided due to the lack of Windows support and manual steps required to provision Windows based EKS nodes
-- Module security group creation, bring your own security groups, as well as adding additiona security group rules to the module created security groups
+- Support for module created security group, bring your own security groups, as well as adding additional security group rules to the module created security group(s)
- Support for providing maps of node groups/Fargate profiles to the cluster module definition or use separate node group/Fargate profile sub-modules
- Provisions to provide node group/Fargate profile "default" settings - useful for when creating multiple node groups/Fargate profiles where you want to set a common set of configurations once, and then individual control only select features
@@ -49,9 +49,9 @@ module "eks" {
# Self Managed Node Group(s)
self_managed_node_group_defaults = {
- instance_type = "m6i.large"
- update_default_version = true
- iam_role_additional_policies = ["arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"]
+ instance_type = "m6i.large"
+ update_launch_template_default_version = true
+ iam_role_additional_policies = ["arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"]
}
self_managed_node_groups = {
@@ -231,13 +231,13 @@ You can also use a 3rd party tool like Gruntwork's kubergrunt. See the [`eks dep
To enable Windows support for your EKS cluster, you should apply some configs manually. See the [Enabling Windows Support (Windows/MacOS/Linux)](https://docs.aws.amazon.com/eks/latest/userguide/windows-support.html#enable-windows-support).
-Windows based nodes aksi require additional cluster role (`eks:kube-proxy-windows`).
+Windows based nodes require additional cluster role (`eks:kube-proxy-windows`).
Worker nodes with labels do not join a 1.16+ cluster
Kubelet restricts the allowed list of labels in the `kubernetes.io` namespace that can be applied to nodes starting in 1.16. Older configurations used labels such as `kubernetes.io/lifecycle=spot` which is no longer allowed; instead, use `node.kubernetes.io/lifecycle=spot`
-Reference the `--node-labels` argument for your version of Kubenetes for the allowed prefixes. [Documentation for 1.16](https://v1-16.docs.kubernetes.io/docs/reference/command-line-tools-reference/kubelet/)
+Reference the `--node-labels` argument for your version of Kubernetes for the allowed prefixes. [Documentation for 1.16](https://v1-16.docs.kubernetes.io/docs/reference/command-line-tools-reference/kubelet/)
@@ -346,19 +346,21 @@ Full contributing [guidelines are covered here](https://github.com/terraform-aws
| [create](#input\_create) | Controls if EKS resources should be created (affects nearly all resources) | `bool` | `true` | no |
| [create\_cloudwatch\_log\_group](#input\_create\_cloudwatch\_log\_group) | Determines whether a log group is created by this module for the cluster logs. If not, AWS will automatically create one if logging is enabled | `bool` | `true` | no |
| [create\_cluster\_security\_group](#input\_create\_cluster\_security\_group) | Determines if a security group is created for the cluster or use the existing `cluster_security_group_id` | `bool` | `true` | no |
-| [create\_iam\_role](#input\_create\_iam\_role) | Determines whether a cluster IAM role is created or to use an existing IAM role | `bool` | `true` | no |
+| [create\_iam\_role](#input\_create\_iam\_role) | Determines whether a an IAM role is created or to use an existing IAM role | `bool` | `true` | no |
| [create\_node\_security\_group](#input\_create\_node\_security\_group) | Determines whether to create a security group for the node groups or use the existing `node_security_group_id` | `bool` | `true` | no |
| [eks\_managed\_node\_group\_defaults](#input\_eks\_managed\_node\_group\_defaults) | Map of EKS managed node group default configurations | `any` | `{}` | no |
| [eks\_managed\_node\_groups](#input\_eks\_managed\_node\_groups) | Map of EKS managed node group definitions to create | `any` | `{}` | no |
| [enable\_irsa](#input\_enable\_irsa) | Determines whether to create an OpenID Connect Provider for EKS to enable IRSA | `bool` | `false` | no |
| [fargate\_profile\_defaults](#input\_fargate\_profile\_defaults) | Map of Fargate Profile default configurations | `any` | `{}` | no |
| [fargate\_profiles](#input\_fargate\_profiles) | Map of Fargate Profile definitions to create | `any` | `{}` | no |
+| [iam\_role\_additional\_policies](#input\_iam\_role\_additional\_policies) | Additional policies to be added to the IAM role | `list(string)` | `[]` | no |
| [iam\_role\_arn](#input\_iam\_role\_arn) | Existing IAM role ARN for the cluster. Required if `create_iam_role` is set to `false` | `string` | `null` | no |
-| [iam\_role\_name](#input\_iam\_role\_name) | Name to use on cluster role created | `string` | `null` | no |
+| [iam\_role\_description](#input\_iam\_role\_description) | Description of the role | `string` | `null` | no |
+| [iam\_role\_name](#input\_iam\_role\_name) | Name to use on IAM role created | `string` | `null` | no |
| [iam\_role\_path](#input\_iam\_role\_path) | Cluster IAM role path | `string` | `null` | no |
-| [iam\_role\_permissions\_boundary](#input\_iam\_role\_permissions\_boundary) | ARN of the policy that is used to set the permissions boundary for the cluster role | `string` | `null` | no |
-| [iam\_role\_tags](#input\_iam\_role\_tags) | A map of additional tags to add to the cluster IAM role created | `map(string)` | `{}` | no |
-| [iam\_role\_use\_name\_prefix](#input\_iam\_role\_use\_name\_prefix) | Determines whether cluster IAM role name (`iam_role_name`) is used as a prefix | `string` | `true` | no |
+| [iam\_role\_permissions\_boundary](#input\_iam\_role\_permissions\_boundary) | ARN of the policy that is used to set the permissions boundary for the IAM role | `string` | `null` | no |
+| [iam\_role\_tags](#input\_iam\_role\_tags) | A map of additional tags to add to the IAM role created | `map(string)` | `{}` | no |
+| [iam\_role\_use\_name\_prefix](#input\_iam\_role\_use\_name\_prefix) | Determines whether the IAM role name (`iam_role_name`) is used as a prefix | `string` | `true` | no |
| [node\_additional\_security\_group\_rules](#input\_node\_additional\_security\_group\_rules) | List of additional security group rules to add to the node security group created | `map(any)` | `{}` | no |
| [node\_security\_group\_description](#input\_node\_security\_group\_description) | Description of the node security group created | `string` | `"EKS node shared security group"` | no |
| [node\_security\_group\_id](#input\_node\_security\_group\_id) | ID of an existing security group to attach to the node groups created | `string` | `""` | no |
diff --git a/examples/complete/main.tf b/examples/complete/main.tf
index 097e86f332..77448ed90a 100644
--- a/examples/complete/main.tf
+++ b/examples/complete/main.tf
@@ -48,9 +48,9 @@ module "eks" {
# Self Managed Node Group(s)
self_managed_node_group_defaults = {
- update_default_version = true
- vpc_security_group_ids = [aws_security_group.additional.id]
- iam_role_additional_policies = ["arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"]
+ launch_template_default_version = true
+ vpc_security_group_ids = [aws_security_group.additional.id]
+ iam_role_additional_policies = ["arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"]
}
self_managed_node_groups = {
@@ -200,10 +200,10 @@ module "self_managed_node_group" {
module.eks.cluster_security_group_id,
]
- create_launch_template = true
- launch_template_name = "separate-self-mng"
- update_default_version = true
- instance_type = "m5.large"
+ create_launch_template = true
+ launch_template_name = "separate-self-mng"
+ launch_template_default_version = true
+ instance_type = "m5.large"
tags = merge(local.tags, { Separate = "self-managed-node-group" })
}
diff --git a/examples/eks_managed_node_group/main.tf b/examples/eks_managed_node_group/main.tf
index c8f3a4fef0..9056c930d3 100644
--- a/examples/eks_managed_node_group/main.tf
+++ b/examples/eks_managed_node_group/main.tf
@@ -69,9 +69,9 @@ module "eks" {
ami_type = "BOTTLEROCKET_x86_64"
platform = "bottlerocket"
- create_launch_template = true
- launch_template_name = "bottlerocket-custom"
- update_default_version = true
+ create_launch_template = true
+ launch_template_name = "bottlerocket-custom"
+ launch_template_default_version = true
# this will get added to what AWS provides
bootstrap_extra_args = <<-EOT
@@ -87,9 +87,9 @@ module "eks" {
ami_id = "ami-0ff61e0bcfc81dc94"
platform = "bottlerocket"
- create_launch_template = true
- launch_template_name = "bottlerocket-custom"
- update_default_version = true
+ create_launch_template = true
+ launch_template_name = "bottlerocket-custom"
+ launch_template_default_version = true
# use module user data template to boostrap
enable_bootstrap_user_data = true
@@ -180,7 +180,7 @@ module "eks" {
launch_template_name = "eks-managed-ex"
launch_template_use_name_prefix = true
description = "EKS managed node group example launch template"
- update_default_version = true
+ launch_template_default_version = true
ebs_optimized = true
vpc_security_group_ids = [aws_security_group.additional.id]
diff --git a/examples/irsa_autoscale_refresh/main.tf b/examples/irsa_autoscale_refresh/main.tf
index 623e8bbd14..69b3a2cc3f 100644
--- a/examples/irsa_autoscale_refresh/main.tf
+++ b/examples/irsa_autoscale_refresh/main.tf
@@ -43,10 +43,10 @@ module "eks" {
max_size = 5
desired_size = 1
- instance_types = ["m5.large", "m5n.large", "m5zn.large", "m6i.large", ]
- create_launch_template = true
- launch_template_name = "refresh"
- update_default_version = true
+ instance_types = ["m5.large", "m5n.large", "m5zn.large", "m6i.large", ]
+ create_launch_template = true
+ launch_template_name = "refresh"
+ launch_template_default_version = true
instance_refresh = {
strategy = "Rolling"
diff --git a/examples/self_managed_node_group/main.tf b/examples/self_managed_node_group/main.tf
index 33387e84af..3e31feba2a 100644
--- a/examples/self_managed_node_group/main.tf
+++ b/examples/self_managed_node_group/main.tf
@@ -126,7 +126,7 @@ module "eks" {
launch_template_name = "self-managed-ex"
launch_template_use_name_prefix = true
description = "Self managed node group example launch template"
- update_default_version = true
+ launch_template_default_version = true
ebs_optimized = true
vpc_security_group_ids = [aws_security_group.additional.id]
diff --git a/main.tf b/main.tf
index fa4eead6bb..2c224afced 100644
--- a/main.tf
+++ b/main.tf
@@ -193,6 +193,7 @@ resource "aws_iam_role" "this" {
name = var.iam_role_use_name_prefix ? null : local.iam_role_name
name_prefix = var.iam_role_use_name_prefix ? "${local.iam_role_name}-" : null
path = var.iam_role_path
+ description = var.iam_role_description
assume_role_policy = data.aws_iam_policy_document.assume_role_policy[0].json
permissions_boundary = var.iam_role_permissions_boundary
@@ -220,10 +221,10 @@ data "aws_iam_policy_document" "additional" {
# Policies attached ref https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_node_group
resource "aws_iam_role_policy_attachment" "this" {
- for_each = var.create && var.create_iam_role ? toset([
+ for_each = var.create && var.create_iam_role ? toset(compact(distinct(concat([
"${local.policy_arn_prefix}/AmazonEKSClusterPolicy",
"${local.policy_arn_prefix}/AmazonEKSVPCResourceController",
- ]) : toset([])
+ ], var.iam_role_additional_policies)))) : toset([])
policy_arn = each.value
role = aws_iam_role.this[0].name
diff --git a/modules/eks-managed-node-group/README.md b/modules/eks-managed-node-group/README.md
index f8e3173e53..c789c5ad7b 100644
--- a/modules/eks-managed-node-group/README.md
+++ b/modules/eks-managed-node-group/README.md
@@ -88,34 +88,33 @@ If you use a custom AMI, you need to supply via user-data, the bootstrap script
| [ami\_release\_version](#input\_ami\_release\_version) | AMI version of the EKS Node Group. Defaults to latest version for Kubernetes version | `string` | `null` | no |
| [ami\_type](#input\_ami\_type) | Type of Amazon Machine Image (AMI) associated with the EKS Node Group. Valid values are `AL2_x86_64`, `AL2_x86_64_GPU`, `AL2_ARM_64`, `CUSTOM`, `BOTTLEROCKET_ARM_64`, `BOTTLEROCKET_x86_64` | `string` | `null` | no |
| [block\_device\_mappings](#input\_block\_device\_mappings) | Specify volumes to attach to the instance besides the volumes specified by the AMI | `any` | `{}` | no |
-| [bootstrap\_extra\_args](#input\_bootstrap\_extra\_args) | Additional arguments passed to the bootstrap script | `string` | `""` | no |
+| [bootstrap\_extra\_args](#input\_bootstrap\_extra\_args) | Additional arguments passed to the bootstrap script. When `platform` = `bottlerocket`; these are additional [settings](https://github.com/bottlerocket-os/bottlerocket#settings) that are provided to the Bottlerocket user data | `string` | `""` | no |
| [capacity\_reservation\_specification](#input\_capacity\_reservation\_specification) | Targeting for EC2 capacity reservations | `any` | `null` | no |
| [capacity\_type](#input\_capacity\_type) | Type of capacity associated with the EKS Node Group. Valid values: `ON_DEMAND`, `SPOT` | `string` | `"ON_DEMAND"` | no |
| [cluster\_auth\_base64](#input\_cluster\_auth\_base64) | Base64 encoded CA of associated EKS cluster | `string` | `""` | no |
| [cluster\_endpoint](#input\_cluster\_endpoint) | Endpoint of associated EKS cluster | `string` | `""` | no |
| [cluster\_name](#input\_cluster\_name) | Name of associated EKS cluster | `string` | `null` | no |
-| [cluster\_security\_group\_id](#input\_cluster\_security\_group\_id) | Cluster control plain security group ID | `string` | `null` | no |
+| [cluster\_security\_group\_id](#input\_cluster\_security\_group\_id) | Cluster control plane security group ID | `string` | `null` | no |
| [cluster\_version](#input\_cluster\_version) | Kubernetes version. Defaults to EKS Cluster Kubernetes version | `string` | `null` | no |
| [cpu\_options](#input\_cpu\_options) | The CPU options for the instance | `map(string)` | `null` | no |
| [create](#input\_create) | Determines whether to create EKS managed node group or not | `bool` | `true` | no |
| [create\_iam\_role](#input\_create\_iam\_role) | Determines whether an IAM role is created or to use an existing IAM role | `bool` | `true` | no |
-| [create\_launch\_template](#input\_create\_launch\_template) | Determines whether to create launch template or not | `bool` | `false` | no |
-| [create\_security\_group](#input\_create\_security\_group) | Whether to create a security group | `bool` | `true` | no |
+| [create\_launch\_template](#input\_create\_launch\_template) | Determines whether to create a launch template or not. By default, EKS will use its own default launch template | `bool` | `false` | no |
+| [create\_security\_group](#input\_create\_security\_group) | Determines whether to create a security group | `bool` | `true` | no |
| [credit\_specification](#input\_credit\_specification) | Customize the credit specification of the instance | `map(string)` | `null` | no |
-| [default\_version](#input\_default\_version) | Default Version of the launch template | `string` | `null` | no |
| [description](#input\_description) | Description of the launch template | `string` | `null` | no |
| [desired\_size](#input\_desired\_size) | Desired number of worker nodes | `number` | `1` | no |
| [disable\_api\_termination](#input\_disable\_api\_termination) | If true, enables EC2 instance termination protection | `bool` | `null` | no |
| [disk\_size](#input\_disk\_size) | Disk size in GiB for worker nodes. Defaults to `20` | `number` | `null` | no |
-| [ebs\_optimized](#input\_ebs\_optimized) | If true, the launched EC2 instance will be EBS-optimized | `bool` | `null` | no |
+| [ebs\_optimized](#input\_ebs\_optimized) | If true, the launched EC2 instance(s) will be EBS-optimized | `bool` | `null` | no |
| [elastic\_gpu\_specifications](#input\_elastic\_gpu\_specifications) | The elastic GPU to attach to the instance | `map(string)` | `null` | no |
| [elastic\_inference\_accelerator](#input\_elastic\_inference\_accelerator) | Configuration block containing an Elastic Inference Accelerator to attach to the instance | `map(string)` | `null` | no |
| [enable\_bootstrap\_user\_data](#input\_enable\_bootstrap\_user\_data) | Determines whether the bootstrap configurations are populated within the user data template | `bool` | `false` | no |
-| [enable\_monitoring](#input\_enable\_monitoring) | Enables/disables detailed monitoring | `bool` | `null` | no |
+| [enable\_monitoring](#input\_enable\_monitoring) | Enables/disables detailed monitoring | `bool` | `true` | no |
| [enclave\_options](#input\_enclave\_options) | Enable Nitro Enclaves on launched instances | `map(string)` | `null` | no |
| [force\_update\_version](#input\_force\_update\_version) | Force version update if existing pods are unable to be drained due to a pod disruption budget issue | `bool` | `null` | no |
| [iam\_role\_additional\_policies](#input\_iam\_role\_additional\_policies) | Additional policies to be added to the IAM role | `list(string)` | `[]` | no |
-| [iam\_role\_arn](#input\_iam\_role\_arn) | Amazon Resource Name (ARN) of the IAM Role that provides permissions for the EKS Node Group | `string` | `null` | no |
+| [iam\_role\_arn](#input\_iam\_role\_arn) | Existing IAM role ARN for the node group. Required if `create_iam_role` is set to `false` | `string` | `null` | no |
| [iam\_role\_description](#input\_iam\_role\_description) | Description of the role | `string` | `null` | no |
| [iam\_role\_name](#input\_iam\_role\_name) | Name to use on IAM role created | `string` | `null` | no |
| [iam\_role\_path](#input\_iam\_role\_path) | IAM role path | `string` | `null` | no |
@@ -125,8 +124,9 @@ If you use a custom AMI, you need to supply via user-data, the bootstrap script
| [instance\_market\_options](#input\_instance\_market\_options) | The market (purchasing) option for the instance | `any` | `null` | no |
| [instance\_types](#input\_instance\_types) | Set of instance types associated with the EKS Node Group. Defaults to `["t3.medium"]` | `list(string)` | `null` | no |
| [kernel\_id](#input\_kernel\_id) | The kernel ID | `string` | `null` | no |
-| [key\_name](#input\_key\_name) | The key name that should be used for the instance | `string` | `null` | no |
+| [key\_name](#input\_key\_name) | The key name that should be used for the instance(s) | `string` | `null` | no |
| [labels](#input\_labels) | Key-value map of Kubernetes labels. Only labels that are applied with the EKS API are managed by this argument. Other Kubernetes labels applied to the EKS Node Group will not be managed | `map(string)` | `null` | no |
+| [launch\_template\_default\_version](#input\_launch\_template\_default\_version) | Default version of the launch template | `string` | `null` | no |
| [launch\_template\_name](#input\_launch\_template\_name) | Launch template name - either to be created (`var.create_launch_template` = `true`) or existing (`var.create_launch_template` = `false`) | `string` | `""` | no |
| [launch\_template\_use\_name\_prefix](#input\_launch\_template\_use\_name\_prefix) | Determines whether to use `launch_template_name` as is or create a unique name beginning with the `launch_template_name` as the prefix | `bool` | `true` | no |
| [launch\_template\_version](#input\_launch\_template\_version) | Launch template version number. The default is `$Default` | `string` | `null` | no |
@@ -134,15 +134,15 @@ If you use a custom AMI, you need to supply via user-data, the bootstrap script
| [max\_size](#input\_max\_size) | Maximum number of worker nodes | `number` | `3` | no |
| [metadata\_options](#input\_metadata\_options) | Customize the metadata options for the instance | `map(string)` |
| no |
| [min\_size](#input\_min\_size) | Minimum number of worker nodes | `number` | `0` | no |
-| [name](#input\_name) | Name of the EKS Node Group | `string` | `""` | no |
+| [name](#input\_name) | Name of the EKS managed node group | `string` | `""` | no |
| [network\_interfaces](#input\_network\_interfaces) | Customize network interfaces to be attached at instance boot time | `list(any)` | `[]` | no |
| [placement](#input\_placement) | The placement of the instance | `map(string)` | `null` | no |
-| [platform](#input\_platform) | Identifies if the OS platform is `bottlerocket`, `linux`, or `windows` based | `string` | `"linux"` | no |
-| [post\_bootstrap\_user\_data](#input\_post\_bootstrap\_user\_data) | User data that is appended to the user data script after of the EKS bootstrap script. Only valid when using a custom EKS optimized AMI derivative | `string` | `""` | no |
-| [pre\_bootstrap\_user\_data](#input\_pre\_bootstrap\_user\_data) | User data that is injected into the user data script ahead of the EKS bootstrap script | `string` | `""` | no |
+| [platform](#input\_platform) | Identifies if the OS platform is `bottlerocket` or `linux` based; `windows` is not supported | `string` | `"linux"` | no |
+| [post\_bootstrap\_user\_data](#input\_post\_bootstrap\_user\_data) | User data that is appended to the user data script after of the EKS bootstrap script. Not used when `platform` = `bottlerocket` | `string` | `""` | no |
+| [pre\_bootstrap\_user\_data](#input\_pre\_bootstrap\_user\_data) | User data that is injected into the user data script ahead of the EKS bootstrap script. Not used when `platform` = `bottlerocket` | `string` | `""` | no |
| [ram\_disk\_id](#input\_ram\_disk\_id) | The ID of the ram disk | `string` | `null` | no |
| [remote\_access](#input\_remote\_access) | Configuration block with remote access settings | `map(string)` | `{}` | no |
-| [security\_group\_description](#input\_security\_group\_description) | Description for the security group | `string` | `"EKS managed node group security group"` | no |
+| [security\_group\_description](#input\_security\_group\_description) | Description for the security group created | `string` | `"EKS managed node group security group"` | no |
| [security\_group\_name](#input\_security\_group\_name) | Name to use on security group created | `string` | `null` | no |
| [security\_group\_rules](#input\_security\_group\_rules) | List of security group rules to add to the security group created | `any` | `{}` | no |
| [security\_group\_tags](#input\_security\_group\_tags) | A map of additional tags to add to the security group created | `map(string)` | `{}` | no |
@@ -152,7 +152,7 @@ If you use a custom AMI, you need to supply via user-data, the bootstrap script
| [taints](#input\_taints) | The Kubernetes taints to be applied to the nodes in the node group. Maximum of 50 taints per node group | `any` | `{}` | no |
| [timeouts](#input\_timeouts) | Create, update, and delete timeout configurations for the node group | `map(string)` | `{}` | no |
| [update\_config](#input\_update\_config) | Configuration block of settings for max unavailable resources during node group updates | `map(string)` | `{}` | no |
-| [update\_default\_version](#input\_update\_default\_version) | Whether to update Default Version each update. Conflicts with `default_version` | `bool` | `true` | no |
+| [update\_launch\_template\_default\_version](#input\_update\_launch\_template\_default\_version) | Whether to update the launch templates default version on each update. Conflicts with `launch_template_default_version` | `bool` | `true` | no |
| [use\_name\_prefix](#input\_use\_name\_prefix) | Determines whether to use `name` as is or create a unique name beginning with the `name` as the prefix | `bool` | `true` | no |
| [user\_data\_template\_path](#input\_user\_data\_template\_path) | Path to a local, custom user data template file to use when rendering user data | `string` | `""` | no |
| [vpc\_id](#input\_vpc\_id) | ID of the VPC where the security group/nodes will be provisioned | `string` | `null` | no |
diff --git a/modules/eks-managed-node-group/main.tf b/modules/eks-managed-node-group/main.tf
index a2a5aa7688..f32e8e8a97 100644
--- a/modules/eks-managed-node-group/main.tf
+++ b/modules/eks-managed-node-group/main.tf
@@ -46,8 +46,8 @@ resource "aws_launch_template" "this" {
vpc_security_group_ids = compact(concat([try(aws_security_group.this[0].id, "")], var.vpc_security_group_ids))
- default_version = var.default_version
- update_default_version = var.update_default_version
+ default_version = var.launch_template_default_version
+ update_default_version = var.update_launch_template_default_version
disable_api_termination = var.disable_api_termination
# Set on EKS managed node group, will fail if set here
# https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-basics
diff --git a/modules/eks-managed-node-group/variables.tf b/modules/eks-managed-node-group/variables.tf
index 39b0ea52e8..47ca3c2991 100644
--- a/modules/eks-managed-node-group/variables.tf
+++ b/modules/eks-managed-node-group/variables.tf
@@ -11,7 +11,7 @@ variable "tags" {
}
variable "platform" {
- description = "Identifies if the OS platform is `bottlerocket`, `linux`, or `windows` based"
+ description = "Identifies if the OS platform is `bottlerocket` or `linux` based; `windows` is not supported"
type = string
default = "linux"
}
@@ -26,6 +26,12 @@ variable "enable_bootstrap_user_data" {
default = false
}
+variable "cluster_name" {
+ description = "Name of associated EKS cluster"
+ type = string
+ default = null
+}
+
variable "cluster_endpoint" {
description = "Endpoint of associated EKS cluster"
type = string
@@ -39,19 +45,19 @@ variable "cluster_auth_base64" {
}
variable "pre_bootstrap_user_data" {
- description = "User data that is injected into the user data script ahead of the EKS bootstrap script"
+ description = "User data that is injected into the user data script ahead of the EKS bootstrap script. Not used when `platform` = `bottlerocket`"
type = string
default = ""
}
variable "post_bootstrap_user_data" {
- description = "User data that is appended to the user data script after of the EKS bootstrap script. Only valid when using a custom EKS optimized AMI derivative"
+ description = "User data that is appended to the user data script after of the EKS bootstrap script. Not used when `platform` = `bottlerocket`"
type = string
default = ""
}
variable "bootstrap_extra_args" {
- description = "Additional arguments passed to the bootstrap script"
+ description = "Additional arguments passed to the bootstrap script. When `platform` = `bottlerocket`; these are additional [settings](https://github.com/bottlerocket-os/bottlerocket#settings) that are provided to the Bottlerocket user data"
type = string
default = ""
}
@@ -67,7 +73,7 @@ variable "user_data_template_path" {
################################################################################
variable "create_launch_template" {
- description = "Determines whether to create launch template or not"
+ description = "Determines whether to create a launch template or not. By default, EKS will use its own default launch template"
type = bool
default = false
}
@@ -91,7 +97,7 @@ variable "description" {
}
variable "ebs_optimized" {
- description = "If true, the launched EC2 instance will be EBS-optimized"
+ description = "If true, the launched EC2 instance(s) will be EBS-optimized"
type = bool
default = null
}
@@ -103,7 +109,7 @@ variable "ami_id" {
}
variable "key_name" {
- description = "The key name that should be used for the instance"
+ description = "The key name that should be used for the instance(s)"
type = string
default = null
}
@@ -114,14 +120,14 @@ variable "vpc_security_group_ids" {
default = []
}
-variable "default_version" {
- description = "Default Version of the launch template"
+variable "launch_template_default_version" {
+ description = "Default version of the launch template"
type = string
default = null
}
-variable "update_default_version" {
- description = "Whether to update Default Version each update. Conflicts with `default_version`"
+variable "update_launch_template_default_version" {
+ description = "Whether to update the launch templates default version on each update. Conflicts with `launch_template_default_version`"
type = bool
default = true
}
@@ -211,7 +217,7 @@ variable "metadata_options" {
variable "enable_monitoring" {
description = "Enables/disables detailed monitoring"
type = bool
- default = null
+ default = true
}
variable "network_interfaces" {
@@ -230,18 +236,6 @@ variable "placement" {
# EKS Managed Node Group
################################################################################
-variable "cluster_name" {
- description = "Name of associated EKS cluster"
- type = string
- default = null
-}
-
-variable "iam_role_arn" {
- description = "Amazon Resource Name (ARN) of the IAM Role that provides permissions for the EKS Node Group"
- type = string
- default = null
-}
-
variable "subnet_ids" {
description = "Identifiers of EC2 Subnets to associate with the EKS Node Group. These subnets must have the following resource tag: `kubernetes.io/cluster/CLUSTER_NAME`"
type = list(string)
@@ -267,7 +261,7 @@ variable "desired_size" {
}
variable "name" {
- description = "Name of the EKS Node Group"
+ description = "Name of the EKS managed node group"
type = string
default = ""
}
@@ -361,7 +355,7 @@ variable "timeouts" {
################################################################################
variable "create_security_group" {
- description = "Whether to create a security group"
+ description = "Determines whether to create a security group"
type = bool
default = true
}
@@ -379,7 +373,7 @@ variable "security_group_use_name_prefix" {
}
variable "security_group_description" {
- description = "Description for the security group"
+ description = "Description for the security group created"
type = string
default = "EKS managed node group security group"
}
@@ -397,7 +391,7 @@ variable "security_group_rules" {
}
variable "cluster_security_group_id" {
- description = "Cluster control plain security group ID"
+ description = "Cluster control plane security group ID"
type = string
default = null
}
@@ -418,6 +412,12 @@ variable "create_iam_role" {
default = true
}
+variable "iam_role_arn" {
+ description = "Existing IAM role ARN for the node group. Required if `create_iam_role` is set to `false`"
+ type = string
+ default = null
+}
+
variable "iam_role_name" {
description = "Name to use on IAM role created"
type = string
diff --git a/modules/fargate-profile/README.md b/modules/fargate-profile/README.md
index 61193a6c03..02a06b7730 100644
--- a/modules/fargate-profile/README.md
+++ b/modules/fargate-profile/README.md
@@ -53,10 +53,11 @@ No modules.
| Name | Description | Type | Default | Required |
|------|-------------|------|---------|:--------:|
| [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster | `string` | `null` | no |
-| [create](#input\_create) | Controls if Fargate resources should be created (it affects all resources) | `bool` | `true` | no |
-| [create\_iam\_role](#input\_create\_iam\_role) | Controls if the the IAM Role that provides permissions for the EKS Fargate Profile will be created | `bool` | `true` | no |
+| [create](#input\_create) | Determines whether to create Fargate profile or not | `bool` | `true` | no |
+| [create\_iam\_role](#input\_create\_iam\_role) | Determines whether an IAM role is created or to use an existing IAM role | `bool` | `true` | no |
| [iam\_role\_additional\_policies](#input\_iam\_role\_additional\_policies) | Additional policies to be added to the IAM role | `list(string)` | `[]` | no |
-| [iam\_role\_arn](#input\_iam\_role\_arn) | Amazon Resource Name (ARN) of an existing IAM role that provides permissions for the Fargate pod executions | `string` | `null` | no |
+| [iam\_role\_arn](#input\_iam\_role\_arn) | Existing IAM role ARN for the Fargate profile. Required if `create_iam_role` is set to `false` | `string` | `null` | no |
+| [iam\_role\_description](#input\_iam\_role\_description) | Description of the role | `string` | `null` | no |
| [iam\_role\_name](#input\_iam\_role\_name) | Name to use on IAM role created | `string` | `""` | no |
| [iam\_role\_path](#input\_iam\_role\_path) | IAM role path | `string` | `null` | no |
| [iam\_role\_permissions\_boundary](#input\_iam\_role\_permissions\_boundary) | ARN of the policy that is used to set the permissions boundary for the IAM role | `string` | `null` | no |
diff --git a/modules/fargate-profile/main.tf b/modules/fargate-profile/main.tf
index dbffbda3be..b37fc45116 100644
--- a/modules/fargate-profile/main.tf
+++ b/modules/fargate-profile/main.tf
@@ -29,6 +29,7 @@ resource "aws_iam_role" "this" {
name = var.iam_role_use_name_prefix ? null : local.iam_role_name
name_prefix = var.iam_role_use_name_prefix ? "${local.iam_role_name}-" : null
path = var.iam_role_path
+ description = var.iam_role_description
assume_role_policy = data.aws_iam_policy_document.assume_role_policy[0].json
permissions_boundary = var.iam_role_permissions_boundary
diff --git a/modules/fargate-profile/variables.tf b/modules/fargate-profile/variables.tf
index b012ff2176..afad4809a6 100644
--- a/modules/fargate-profile/variables.tf
+++ b/modules/fargate-profile/variables.tf
@@ -1,5 +1,5 @@
variable "create" {
- description = "Controls if Fargate resources should be created (it affects all resources)"
+ description = "Determines whether to create Fargate profile or not"
type = bool
default = true
}
@@ -15,13 +15,13 @@ variable "tags" {
################################################################################
variable "create_iam_role" {
- description = "Controls if the the IAM Role that provides permissions for the EKS Fargate Profile will be created"
+ description = "Determines whether an IAM role is created or to use an existing IAM role"
type = bool
default = true
}
variable "iam_role_arn" {
- description = "Amazon Resource Name (ARN) of an existing IAM role that provides permissions for the Fargate pod executions"
+ description = "Existing IAM role ARN for the Fargate profile. Required if `create_iam_role` is set to `false`"
type = string
default = null
}
@@ -44,6 +44,12 @@ variable "iam_role_path" {
default = null
}
+variable "iam_role_description" {
+ description = "Description of the role"
+ type = string
+ default = null
+}
+
variable "iam_role_permissions_boundary" {
description = "ARN of the policy that is used to set the permissions boundary for the IAM role"
type = string
diff --git a/modules/self-managed-node-group/README.md b/modules/self-managed-node-group/README.md
index 8f72b82cbc..7808290c5a 100644
--- a/modules/self-managed-node-group/README.md
+++ b/modules/self-managed-node-group/README.md
@@ -56,23 +56,22 @@ $ terraform apply
| [ami\_id](#input\_ami\_id) | The AMI from which to launch the instance | `string` | `""` | no |
| [availability\_zones](#input\_availability\_zones) | A list of one or more availability zones for the group. Used for EC2-Classic and default subnets when not specified with `subnet_ids` argument. Conflicts with `subnet_ids` | `list(string)` | `null` | no |
| [block\_device\_mappings](#input\_block\_device\_mappings) | Specify volumes to attach to the instance besides the volumes specified by the AMI | `any` | `{}` | no |
-| [bootstrap\_extra\_args](#input\_bootstrap\_extra\_args) | Additional arguments passed to the bootstrap script | `string` | `""` | no |
+| [bootstrap\_extra\_args](#input\_bootstrap\_extra\_args) | Additional arguments passed to the bootstrap script. When `platform` = `bottlerocket`; these are additional [settings](https://github.com/bottlerocket-os/bottlerocket#settings) that are provided to the Bottlerocket user data | `string` | `""` | no |
| [capacity\_rebalance](#input\_capacity\_rebalance) | Indicates whether capacity rebalance is enabled | `bool` | `null` | no |
| [capacity\_reservation\_specification](#input\_capacity\_reservation\_specification) | Targeting for EC2 capacity reservations | `any` | `null` | no |
| [cluster\_auth\_base64](#input\_cluster\_auth\_base64) | Base64 encoded CA of associated EKS cluster | `string` | `""` | no |
| [cluster\_endpoint](#input\_cluster\_endpoint) | Endpoint of associated EKS cluster | `string` | `""` | no |
-| [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster that the node group will be associated with | `string` | `null` | no |
-| [cluster\_security\_group\_id](#input\_cluster\_security\_group\_id) | Cluster control plain security group ID | `string` | `null` | no |
+| [cluster\_name](#input\_cluster\_name) | Name of associated EKS cluster | `string` | `null` | no |
+| [cluster\_security\_group\_id](#input\_cluster\_security\_group\_id) | Cluster control plane security group ID | `string` | `null` | no |
| [cluster\_version](#input\_cluster\_version) | Kubernetes cluster version - used to lookup default AMI ID if one is not provided | `string` | `null` | no |
| [cpu\_options](#input\_cpu\_options) | The CPU options for the instance | `map(string)` | `null` | no |
-| [create](#input\_create) | Determines whether to create autoscaling group or not | `bool` | `true` | no |
+| [create](#input\_create) | Determines whether to create self managed node group or not | `bool` | `true` | no |
| [create\_iam\_instance\_profile](#input\_create\_iam\_instance\_profile) | Determines whether an IAM instance profile is created or to use an existing IAM instance profile | `bool` | `true` | no |
| [create\_launch\_template](#input\_create\_launch\_template) | Determines whether to create launch template or not | `bool` | `true` | no |
| [create\_schedule](#input\_create\_schedule) | Determines whether to create autoscaling group schedule or not | `bool` | `true` | no |
-| [create\_security\_group](#input\_create\_security\_group) | Whether to create a security group | `bool` | `true` | no |
+| [create\_security\_group](#input\_create\_security\_group) | Determines whether to create a security group | `bool` | `true` | no |
| [credit\_specification](#input\_credit\_specification) | Customize the credit specification of the instance | `map(string)` | `null` | no |
| [default\_cooldown](#input\_default\_cooldown) | The amount of time, in seconds, after a scaling activity completes before another scaling activity can start | `number` | `null` | no |
-| [default\_version](#input\_default\_version) | Default Version of the launch template | `string` | `null` | no |
| [delete\_timeout](#input\_delete\_timeout) | Delete timeout to wait for destroying autoscaling group | `string` | `null` | no |
| [description](#input\_description) | Description of the launch template | `string` | `null` | no |
| [desired\_size](#input\_desired\_size) | The number of Amazon EC2 instances that should be running in the autoscaling group | `number` | `1` | no |
@@ -80,14 +79,14 @@ $ terraform apply
| [ebs\_optimized](#input\_ebs\_optimized) | If true, the launched EC2 instance will be EBS-optimized | `bool` | `null` | no |
| [elastic\_gpu\_specifications](#input\_elastic\_gpu\_specifications) | The elastic GPU to attach to the instance | `map(string)` | `null` | no |
| [elastic\_inference\_accelerator](#input\_elastic\_inference\_accelerator) | Configuration block containing an Elastic Inference Accelerator to attach to the instance | `map(string)` | `null` | no |
-| [enable\_monitoring](#input\_enable\_monitoring) | Enables/disables detailed monitoring | `bool` | `null` | no |
+| [enable\_monitoring](#input\_enable\_monitoring) | Enables/disables detailed monitoring | `bool` | `true` | no |
| [enabled\_metrics](#input\_enabled\_metrics) | A list of metrics to collect. The allowed values are `GroupDesiredCapacity`, `GroupInServiceCapacity`, `GroupPendingCapacity`, `GroupMinSize`, `GroupMaxSize`, `GroupInServiceInstances`, `GroupPendingInstances`, `GroupStandbyInstances`, `GroupStandbyCapacity`, `GroupTerminatingCapacity`, `GroupTerminatingInstances`, `GroupTotalCapacity`, `GroupTotalInstances` | `list(string)` | `null` | no |
| [enclave\_options](#input\_enclave\_options) | Enable Nitro Enclaves on launched instances | `map(string)` | `null` | no |
| [force\_delete](#input\_force\_delete) | Allows deleting the Auto Scaling Group without waiting for all instances in the pool to terminate. You can force an Auto Scaling Group to delete even if it's in the process of scaling a resource. Normally, Terraform drains all the instances before deleting the group. This bypasses that behavior and potentially leaves resources dangling | `bool` | `null` | no |
| [health\_check\_grace\_period](#input\_health\_check\_grace\_period) | Time (in seconds) after instance comes into service before checking health | `number` | `null` | no |
| [health\_check\_type](#input\_health\_check\_type) | `EC2` or `ELB`. Controls how health checking is done | `string` | `null` | no |
| [hibernation\_options](#input\_hibernation\_options) | The hibernation options for the instance | `map(string)` | `null` | no |
-| [iam\_instance\_profile\_arn](#input\_iam\_instance\_profile\_arn) | Amazon Resource Name (ARN) of an existing IAM instance profile that provides permissions for the node group | `string` | `null` | no |
+| [iam\_instance\_profile\_arn](#input\_iam\_instance\_profile\_arn) | Amazon Resource Name (ARN) of an existing IAM instance profile that provides permissions for the node group. Required if `create_iam_instance_profile` = `false` | `string` | `null` | no |
| [iam\_role\_additional\_policies](#input\_iam\_role\_additional\_policies) | Additional policies to be added to the IAM role | `list(string)` | `[]` | no |
| [iam\_role\_attach\_cni\_policy](#input\_iam\_role\_attach\_cni\_policy) | Whether to attach the Amazon managed `AmazonEKS_CNI_Policy` IAM policy to the IAM IAM role. WARNING: If set `false` the permissions must be assigned to the `aws-node` DaemonSet pods via another method or nodes will not be able to join the cluster | `bool` | `true` | no |
| [iam\_role\_description](#input\_iam\_role\_description) | Description of the role | `string` | `null` | no |
@@ -103,6 +102,7 @@ $ terraform apply
| [instance\_type](#input\_instance\_type) | The type of the instance to launch | `string` | `""` | no |
| [kernel\_id](#input\_kernel\_id) | The kernel ID | `string` | `null` | no |
| [key\_name](#input\_key\_name) | The key name that should be used for the instance | `string` | `null` | no |
+| [launch\_template\_default\_version](#input\_launch\_template\_default\_version) | Default Version of the launch template | `string` | `null` | no |
| [launch\_template\_name](#input\_launch\_template\_name) | Launch template name - either to be created (`var.create_launch_template` = `true`) or existing (`var.create_launch_template` = `false`) | `string` | `null` | no |
| [launch\_template\_use\_name\_prefix](#input\_launch\_template\_use\_name\_prefix) | Determines whether to use `launch_template_name` as is or create a unique name beginning with the `launch_template_name` as the prefix | `bool` | `true` | no |
| [launch\_template\_version](#input\_launch\_template\_version) | Launch template version. Can be version number, `$Latest`, or `$Default` | `string` | `null` | no |
@@ -114,18 +114,18 @@ $ terraform apply
| [min\_elb\_capacity](#input\_min\_elb\_capacity) | Setting this causes Terraform to wait for this number of instances to show up healthy in the ELB only on creation. Updates will not wait on ELB instance number changes | `number` | `null` | no |
| [min\_size](#input\_min\_size) | The minimum size of the autoscaling group | `number` | `0` | no |
| [mixed\_instances\_policy](#input\_mixed\_instances\_policy) | Configuration block containing settings to define launch targets for Auto Scaling groups | `any` | `null` | no |
-| [name](#input\_name) | Name used across the resources created | `string` | `""` | no |
+| [name](#input\_name) | Name of the Self managed Node Group | `string` | `""` | no |
| [network\_interfaces](#input\_network\_interfaces) | Customize network interfaces to be attached at instance boot time | `list(any)` | `[]` | no |
| [placement](#input\_placement) | The placement of the instance | `map(string)` | `null` | no |
| [placement\_group](#input\_placement\_group) | The name of the placement group into which you'll launch your instances, if any | `string` | `null` | no |
| [platform](#input\_platform) | Identifies if the OS platform is `bottlerocket`, `linux`, or `windows` based | `string` | `"linux"` | no |
-| [post\_bootstrap\_user\_data](#input\_post\_bootstrap\_user\_data) | User data that is appended to the user data script after of the EKS bootstrap script. Only valid when using a custom EKS optimized AMI derivative | `string` | `""` | no |
-| [pre\_bootstrap\_user\_data](#input\_pre\_bootstrap\_user\_data) | User data that is injected into the user data script ahead of the EKS bootstrap script | `string` | `""` | no |
-| [propagate\_tags](#input\_propagate\_tags) | A list of tag blocks. Each element should have keys named key, value, and propagate\_at\_launch | `list(map(string))` | `[]` | no |
+| [post\_bootstrap\_user\_data](#input\_post\_bootstrap\_user\_data) | User data that is appended to the user data script after of the EKS bootstrap script. Not used when `platform` = `bottlerocket` | `string` | `""` | no |
+| [pre\_bootstrap\_user\_data](#input\_pre\_bootstrap\_user\_data) | User data that is injected into the user data script ahead of the EKS bootstrap script. Not used when `platform` = `bottlerocket` | `string` | `""` | no |
+| [propagate\_tags](#input\_propagate\_tags) | A list of tag blocks. Each element should have keys named `key`, `value`, and `propagate_at_launch` | `list(map(string))` | `[]` | no |
| [protect\_from\_scale\_in](#input\_protect\_from\_scale\_in) | Allows setting instance protection. The autoscaling group will not select instances with this setting for termination during scale in events. | `bool` | `false` | no |
| [ram\_disk\_id](#input\_ram\_disk\_id) | The ID of the ram disk | `string` | `null` | no |
| [schedules](#input\_schedules) | Map of autoscaling group schedule to create | `map(any)` | `{}` | no |
-| [security\_group\_description](#input\_security\_group\_description) | Description for the security group | `string` | `"EKS worker security group"` | no |
+| [security\_group\_description](#input\_security\_group\_description) | Description for the security group created | `string` | `"EKS worker security group"` | no |
| [security\_group\_name](#input\_security\_group\_name) | Name to use on security group created | `string` | `null` | no |
| [security\_group\_rules](#input\_security\_group\_rules) | List of security group rules to add to the security group created | `any` | `{}` | no |
| [security\_group\_tags](#input\_security\_group\_tags) | A map of additional tags to add to the security group created | `map(string)` | `{}` | no |
@@ -133,10 +133,10 @@ $ terraform apply
| [service\_linked\_role\_arn](#input\_service\_linked\_role\_arn) | The ARN of the service-linked role that the ASG will use to call other AWS services | `string` | `null` | no |
| [subnet\_ids](#input\_subnet\_ids) | A list of subnet IDs to launch resources in. Subnets automatically determine which availability zones the group will reside. Conflicts with `availability_zones` | `list(string)` | `null` | no |
| [suspended\_processes](#input\_suspended\_processes) | A list of processes to suspend for the Auto Scaling Group. The allowed values are `Launch`, `Terminate`, `HealthCheck`, `ReplaceUnhealthy`, `AZRebalance`, `AlarmNotification`, `ScheduledActions`, `AddToLoadBalancer`. Note that if you suspend either the `Launch` or `Terminate` process types, it can prevent your Auto Scaling Group from functioning properly | `list(string)` | `null` | no |
-| [tags](#input\_tags) | A map of tags and values in the same format as other resources accept. This will be converted into the non-standard format that the aws\_autoscaling\_group requires. | `map(string)` | `{}` | no |
+| [tags](#input\_tags) | A map of tags to add to all resources | `map(string)` | `{}` | no |
| [target\_group\_arns](#input\_target\_group\_arns) | A set of `aws_alb_target_group` ARNs, for use with Application or Network Load Balancing | `list(string)` | `[]` | no |
| [termination\_policies](#input\_termination\_policies) | A list of policies to decide how the instances in the Auto Scaling Group should be terminated. The allowed values are `OldestInstance`, `NewestInstance`, `OldestLaunchConfiguration`, `ClosestToNextInstanceHour`, `OldestLaunchTemplate`, `AllocationStrategy`, `Default` | `list(string)` | `null` | no |
-| [update\_default\_version](#input\_update\_default\_version) | Whether to update Default Version each update. Conflicts with `default_version` | `string` | `null` | no |
+| [update\_launch\_template\_default\_version](#input\_update\_launch\_template\_default\_version) | Whether to update Default Version each update. Conflicts with `launch_template_default_version` | `string` | `null` | no |
| [use\_mixed\_instances\_policy](#input\_use\_mixed\_instances\_policy) | Determines whether to use a mixed instances policy in the autoscaling group or not | `bool` | `false` | no |
| [use\_name\_prefix](#input\_use\_name\_prefix) | Determines whether to use `name` as is or create a unique name beginning with the `name` as the prefix | `bool` | `true` | no |
| [user\_data\_template\_path](#input\_user\_data\_template\_path) | Path to a local, custom user data template file to use when rendering user data | `string` | `""` | no |
diff --git a/modules/self-managed-node-group/main.tf b/modules/self-managed-node-group/main.tf
index 7f3c9a74f0..beda37df0c 100644
--- a/modules/self-managed-node-group/main.tf
+++ b/modules/self-managed-node-group/main.tf
@@ -57,8 +57,8 @@ resource "aws_launch_template" "this" {
vpc_security_group_ids = compact(concat([try(aws_security_group.this[0].id, "")], var.vpc_security_group_ids))
- default_version = var.default_version
- update_default_version = var.update_default_version
+ default_version = var.launch_template_default_version
+ update_default_version = var.update_launch_template_default_version
disable_api_termination = var.disable_api_termination
instance_initiated_shutdown_behavior = var.instance_initiated_shutdown_behavior
kernel_id = var.kernel_id
diff --git a/modules/self-managed-node-group/variables.tf b/modules/self-managed-node-group/variables.tf
index df4e237490..85d816e65f 100644
--- a/modules/self-managed-node-group/variables.tf
+++ b/modules/self-managed-node-group/variables.tf
@@ -1,11 +1,11 @@
variable "create" {
- description = "Determines whether to create autoscaling group or not"
+ description = "Determines whether to create self managed node group or not"
type = bool
default = true
}
variable "tags" {
- description = "A map of tags and values in the same format as other resources accept. This will be converted into the non-standard format that the aws_autoscaling_group requires."
+ description = "A map of tags to add to all resources"
type = map(string)
default = {}
}
@@ -21,7 +21,7 @@ variable "platform" {
################################################################################
variable "cluster_name" {
- description = "Name of the EKS cluster that the node group will be associated with"
+ description = "Name of associated EKS cluster"
type = string
default = null
}
@@ -39,19 +39,19 @@ variable "cluster_auth_base64" {
}
variable "pre_bootstrap_user_data" {
- description = "User data that is injected into the user data script ahead of the EKS bootstrap script"
+ description = "User data that is injected into the user data script ahead of the EKS bootstrap script. Not used when `platform` = `bottlerocket`"
type = string
default = ""
}
variable "post_bootstrap_user_data" {
- description = "User data that is appended to the user data script after of the EKS bootstrap script. Only valid when using a custom EKS optimized AMI derivative"
+ description = "User data that is appended to the user data script after of the EKS bootstrap script. Not used when `platform` = `bottlerocket`"
type = string
default = ""
}
variable "bootstrap_extra_args" {
- description = "Additional arguments passed to the bootstrap script"
+ description = "Additional arguments passed to the bootstrap script. When `platform` = `bottlerocket`; these are additional [settings](https://github.com/bottlerocket-os/bottlerocket#settings) that are provided to the Bottlerocket user data"
type = string
default = ""
}
@@ -62,12 +62,200 @@ variable "user_data_template_path" {
default = ""
}
+################################################################################
+# Launch template
+################################################################################
+
+variable "create_launch_template" {
+ description = "Determines whether to create launch template or not"
+ type = bool
+ default = true
+}
+
+variable "launch_template_name" {
+ description = "Launch template name - either to be created (`var.create_launch_template` = `true`) or existing (`var.create_launch_template` = `false`)"
+ type = string
+ default = null
+}
+
+variable "launch_template_use_name_prefix" {
+ description = "Determines whether to use `launch_template_name` as is or create a unique name beginning with the `launch_template_name` as the prefix"
+ type = bool
+ default = true
+}
+
+variable "description" {
+ description = "Description of the launch template"
+ type = string
+ default = null
+}
+
+variable "launch_template_default_version" {
+ description = "Default Version of the launch template"
+ type = string
+ default = null
+}
+
+variable "update_launch_template_default_version" {
+ description = "Whether to update Default Version each update. Conflicts with `launch_template_default_version`"
+ type = string
+ default = null
+}
+
+variable "disable_api_termination" {
+ description = "If true, enables EC2 instance termination protection"
+ type = bool
+ default = null
+}
+
+variable "instance_initiated_shutdown_behavior" {
+ description = "Shutdown behavior for the instance. Can be `stop` or `terminate`. (Default: `stop`)"
+ type = string
+ default = null
+}
+
+variable "kernel_id" {
+ description = "The kernel ID"
+ type = string
+ default = null
+}
+
+variable "ram_disk_id" {
+ description = "The ID of the ram disk"
+ type = string
+ default = null
+}
+
+variable "block_device_mappings" {
+ description = "Specify volumes to attach to the instance besides the volumes specified by the AMI"
+ type = any
+ default = {}
+}
+
+variable "capacity_reservation_specification" {
+ description = "Targeting for EC2 capacity reservations"
+ type = any
+ default = null
+}
+
+variable "cpu_options" {
+ description = "The CPU options for the instance"
+ type = map(string)
+ default = null
+}
+
+variable "credit_specification" {
+ description = "Customize the credit specification of the instance"
+ type = map(string)
+ default = null
+}
+
+variable "elastic_gpu_specifications" {
+ description = "The elastic GPU to attach to the instance"
+ type = map(string)
+ default = null
+}
+
+variable "elastic_inference_accelerator" {
+ description = "Configuration block containing an Elastic Inference Accelerator to attach to the instance"
+ type = map(string)
+ default = null
+}
+
+variable "enclave_options" {
+ description = "Enable Nitro Enclaves on launched instances"
+ type = map(string)
+ default = null
+}
+
+variable "hibernation_options" {
+ description = "The hibernation options for the instance"
+ type = map(string)
+ default = null
+}
+
+variable "instance_market_options" {
+ description = "The market (purchasing) option for the instance"
+ type = any
+ default = null
+}
+
+variable "license_specifications" {
+ description = "A list of license specifications to associate with"
+ type = map(string)
+ default = null
+}
+
+variable "network_interfaces" {
+ description = "Customize network interfaces to be attached at instance boot time"
+ type = list(any)
+ default = []
+}
+
+variable "placement" {
+ description = "The placement of the instance"
+ type = map(string)
+ default = null
+}
+
+variable "ebs_optimized" {
+ description = "If true, the launched EC2 instance will be EBS-optimized"
+ type = bool
+ default = null
+}
+
+variable "ami_id" {
+ description = "The AMI from which to launch the instance"
+ type = string
+ default = ""
+}
+
+variable "cluster_version" {
+ description = "Kubernetes cluster version - used to lookup default AMI ID if one is not provided"
+ type = string
+ default = null
+}
+
+variable "instance_type" {
+ description = "The type of the instance to launch"
+ type = string
+ default = ""
+}
+
+variable "key_name" {
+ description = "The key name that should be used for the instance"
+ type = string
+ default = null
+}
+
+variable "vpc_security_group_ids" {
+ description = "A list of security group IDs to associate"
+ type = list(string)
+ default = []
+}
+
+variable "enable_monitoring" {
+ description = "Enables/disables detailed monitoring"
+ type = bool
+ default = true
+}
+
+variable "metadata_options" {
+ description = "Customize the metadata options for the instance"
+ type = map(string)
+ default = {
+ http_endpoint = "enabled"
+ http_tokens = "required"
+ http_put_response_hop_limit = 2
+ }
+}
+
################################################################################
# Autoscaling group
################################################################################
variable "name" {
- description = "Name used across the resources created"
+ description = "Name of the Self managed Node Group"
type = string
default = ""
}
@@ -78,12 +266,6 @@ variable "use_name_prefix" {
default = true
}
-variable "launch_template_name" {
- description = "Launch template name - either to be created (`var.create_launch_template` = `true`) or existing (`var.create_launch_template` = `false`)"
- type = string
- default = null
-}
-
variable "launch_template_version" {
description = "Launch template version. Can be version number, `$Latest`, or `$Default`"
type = string
@@ -259,193 +441,11 @@ variable "delete_timeout" {
}
variable "propagate_tags" {
- description = "A list of tag blocks. Each element should have keys named key, value, and propagate_at_launch"
+ description = "A list of tag blocks. Each element should have keys named `key`, `value`, and `propagate_at_launch`"
type = list(map(string))
default = []
}
-################################################################################
-# Launch template
-################################################################################
-
-variable "create_launch_template" {
- description = "Determines whether to create launch template or not"
- type = bool
- default = true
-}
-
-variable "launch_template_use_name_prefix" {
- description = "Determines whether to use `launch_template_name` as is or create a unique name beginning with the `launch_template_name` as the prefix"
- type = bool
- default = true
-}
-
-variable "description" {
- description = "Description of the launch template"
- type = string
- default = null
-}
-
-variable "default_version" {
- description = "Default Version of the launch template"
- type = string
- default = null
-}
-
-variable "update_default_version" {
- description = "Whether to update Default Version each update. Conflicts with `default_version`"
- type = string
- default = null
-}
-
-variable "disable_api_termination" {
- description = "If true, enables EC2 instance termination protection"
- type = bool
- default = null
-}
-
-variable "instance_initiated_shutdown_behavior" {
- description = "Shutdown behavior for the instance. Can be `stop` or `terminate`. (Default: `stop`)"
- type = string
- default = null
-}
-
-variable "kernel_id" {
- description = "The kernel ID"
- type = string
- default = null
-}
-
-variable "ram_disk_id" {
- description = "The ID of the ram disk"
- type = string
- default = null
-}
-
-variable "block_device_mappings" {
- description = "Specify volumes to attach to the instance besides the volumes specified by the AMI"
- type = any
- default = {}
-}
-
-variable "capacity_reservation_specification" {
- description = "Targeting for EC2 capacity reservations"
- type = any
- default = null
-}
-
-variable "cpu_options" {
- description = "The CPU options for the instance"
- type = map(string)
- default = null
-}
-
-variable "credit_specification" {
- description = "Customize the credit specification of the instance"
- type = map(string)
- default = null
-}
-
-variable "elastic_gpu_specifications" {
- description = "The elastic GPU to attach to the instance"
- type = map(string)
- default = null
-}
-
-variable "elastic_inference_accelerator" {
- description = "Configuration block containing an Elastic Inference Accelerator to attach to the instance"
- type = map(string)
- default = null
-}
-
-variable "enclave_options" {
- description = "Enable Nitro Enclaves on launched instances"
- type = map(string)
- default = null
-}
-
-variable "hibernation_options" {
- description = "The hibernation options for the instance"
- type = map(string)
- default = null
-}
-
-variable "instance_market_options" {
- description = "The market (purchasing) option for the instance"
- type = any
- default = null
-}
-
-variable "license_specifications" {
- description = "A list of license specifications to associate with"
- type = map(string)
- default = null
-}
-
-variable "network_interfaces" {
- description = "Customize network interfaces to be attached at instance boot time"
- type = list(any)
- default = []
-}
-
-variable "placement" {
- description = "The placement of the instance"
- type = map(string)
- default = null
-}
-
-variable "ebs_optimized" {
- description = "If true, the launched EC2 instance will be EBS-optimized"
- type = bool
- default = null
-}
-
-variable "ami_id" {
- description = "The AMI from which to launch the instance"
- type = string
- default = ""
-}
-
-variable "cluster_version" {
- description = "Kubernetes cluster version - used to lookup default AMI ID if one is not provided"
- type = string
- default = null
-}
-
-variable "instance_type" {
- description = "The type of the instance to launch"
- type = string
- default = ""
-}
-
-variable "key_name" {
- description = "The key name that should be used for the instance"
- type = string
- default = null
-}
-
-variable "vpc_security_group_ids" {
- description = "A list of security group IDs to associate"
- type = list(string)
- default = []
-}
-
-variable "enable_monitoring" {
- description = "Enables/disables detailed monitoring"
- type = bool
- default = null
-}
-
-variable "metadata_options" {
- description = "Customize the metadata options for the instance"
- type = map(string)
- default = {
- http_endpoint = "enabled"
- http_tokens = "required"
- http_put_response_hop_limit = 2
- }
-}
-
################################################################################
# Autoscaling group schedule
################################################################################
@@ -467,7 +467,7 @@ variable "schedules" {
################################################################################
variable "create_security_group" {
- description = "Whether to create a security group"
+ description = "Determines whether to create a security group"
type = bool
default = true
}
@@ -485,7 +485,7 @@ variable "security_group_use_name_prefix" {
}
variable "security_group_description" {
- description = "Description for the security group"
+ description = "Description for the security group created"
type = string
default = "EKS worker security group"
}
@@ -503,7 +503,7 @@ variable "security_group_rules" {
}
variable "cluster_security_group_id" {
- description = "Cluster control plain security group ID"
+ description = "Cluster control plane security group ID"
type = string
default = null
}
@@ -525,7 +525,7 @@ variable "create_iam_instance_profile" {
}
variable "iam_instance_profile_arn" {
- description = "Amazon Resource Name (ARN) of an existing IAM instance profile that provides permissions for the node group"
+ description = "Amazon Resource Name (ARN) of an existing IAM instance profile that provides permissions for the node group. Required if `create_iam_instance_profile` = `false`"
type = string
default = null
}
diff --git a/node_groups.tf b/node_groups.tf
index b33a52b265..ad47a2fa84 100644
--- a/node_groups.tf
+++ b/node_groups.tf
@@ -161,6 +161,7 @@ module "fargate_profile" {
iam_role_name = try(each.value.iam_role_name, var.fargate_profile_defaults.iam_role_name, null)
iam_role_use_name_prefix = try(each.value.iam_role_use_name_prefix, var.fargate_profile_defaults.iam_role_use_name_prefix, true)
iam_role_path = try(each.value.iam_role_path, var.fargate_profile_defaults.iam_role_path, null)
+ iam_role_description = try(each.value.iam_role_description, var.fargate_profile_defaults.iam_role_description, "Fargate profile IAM role")
iam_role_permissions_boundary = try(each.value.iam_role_permissions_boundary, var.fargate_profile_defaults.iam_role_permissions_boundary, null)
iam_role_tags = try(each.value.iam_role_tags, var.fargate_profile_defaults.iam_role_tags, {})
iam_role_additional_policies = try(each.value.iam_role_additional_policies, var.fargate_profile_defaults.iam_role_additional_policies, [])
@@ -223,14 +224,14 @@ module "eks_managed_node_group" {
launch_template_version = try(each.value.launch_template_version, var.eks_managed_node_group_defaults.launch_template_version, null)
description = try(each.value.description, var.eks_managed_node_group_defaults.description, "Custom launch template for ${try(each.value.name, each.key)} EKS managed node group")
- ebs_optimized = try(each.value.ebs_optimized, var.eks_managed_node_group_defaults.ebs_optimized, null)
- key_name = try(each.value.key_name, var.eks_managed_node_group_defaults.key_name, null)
- vpc_security_group_ids = compact(concat([try(aws_security_group.node[0].id, "")], try(each.value.vpc_security_group_ids, var.eks_managed_node_group_defaults.vpc_security_group_ids, [])))
- default_version = try(each.value.default_version, var.eks_managed_node_group_defaults.default_version, null)
- update_default_version = try(each.value.update_default_version, var.eks_managed_node_group_defaults.update_default_version, null)
- disable_api_termination = try(each.value.disable_api_termination, var.eks_managed_node_group_defaults.disable_api_termination, null)
- kernel_id = try(each.value.kernel_id, var.eks_managed_node_group_defaults.kernel_id, null)
- ram_disk_id = try(each.value.ram_disk_id, var.eks_managed_node_group_defaults.ram_disk_id, null)
+ ebs_optimized = try(each.value.ebs_optimized, var.eks_managed_node_group_defaults.ebs_optimized, null)
+ key_name = try(each.value.key_name, var.eks_managed_node_group_defaults.key_name, null)
+ vpc_security_group_ids = compact(concat([try(aws_security_group.node[0].id, "")], try(each.value.vpc_security_group_ids, var.eks_managed_node_group_defaults.vpc_security_group_ids, [])))
+ launch_template_default_version = try(each.value.launch_template_default_version, var.eks_managed_node_group_defaults.launch_template_default_version, null)
+ update_launch_template_default_version = try(each.value.update_launch_template_default_version, var.eks_managed_node_group_defaults.update_launch_template_default_version, null)
+ disable_api_termination = try(each.value.disable_api_termination, var.eks_managed_node_group_defaults.disable_api_termination, null)
+ kernel_id = try(each.value.kernel_id, var.eks_managed_node_group_defaults.kernel_id, null)
+ ram_disk_id = try(each.value.ram_disk_id, var.eks_managed_node_group_defaults.ram_disk_id, null)
block_device_mappings = try(each.value.block_device_mappings, var.eks_managed_node_group_defaults.block_device_mappings, {})
capacity_reservation_specification = try(each.value.capacity_reservation_specification, var.eks_managed_node_group_defaults.capacity_reservation_specification, null)
@@ -242,7 +243,7 @@ module "eks_managed_node_group" {
instance_market_options = try(each.value.instance_market_options, var.eks_managed_node_group_defaults.instance_market_options, null)
license_specifications = try(each.value.license_specifications, var.eks_managed_node_group_defaults.license_specifications, null)
metadata_options = try(each.value.metadata_options, var.eks_managed_node_group_defaults.metadata_options, local.metadata_options)
- enable_monitoring = try(each.value.enable_monitoring, var.eks_managed_node_group_defaults.enable_monitoring, null)
+ enable_monitoring = try(each.value.enable_monitoring, var.eks_managed_node_group_defaults.enable_monitoring, true)
network_interfaces = try(each.value.network_interfaces, var.eks_managed_node_group_defaults.network_interfaces, [])
placement = try(each.value.placement, var.eks_managed_node_group_defaults.placement, null)
@@ -342,14 +343,14 @@ module "self_managed_node_group" {
instance_type = try(each.value.instance_type, var.self_managed_node_group_defaults.instance_type, "m6i.large")
key_name = try(each.value.key_name, var.self_managed_node_group_defaults.key_name, null)
- vpc_security_group_ids = compact(concat([try(aws_security_group.node[0].id, "")], try(each.value.vpc_security_group_ids, var.self_managed_node_group_defaults.vpc_security_group_ids, [])))
- cluster_security_group_id = local.cluster_security_group_id
- default_version = try(each.value.default_version, var.self_managed_node_group_defaults.default_version, null)
- update_default_version = try(each.value.update_default_version, var.self_managed_node_group_defaults.update_default_version, null)
- disable_api_termination = try(each.value.disable_api_termination, var.self_managed_node_group_defaults.disable_api_termination, null)
- instance_initiated_shutdown_behavior = try(each.value.instance_initiated_shutdown_behavior, var.self_managed_node_group_defaults.instance_initiated_shutdown_behavior, null)
- kernel_id = try(each.value.kernel_id, var.self_managed_node_group_defaults.kernel_id, null)
- ram_disk_id = try(each.value.ram_disk_id, var.self_managed_node_group_defaults.ram_disk_id, null)
+ vpc_security_group_ids = compact(concat([try(aws_security_group.node[0].id, "")], try(each.value.vpc_security_group_ids, var.self_managed_node_group_defaults.vpc_security_group_ids, [])))
+ cluster_security_group_id = local.cluster_security_group_id
+ launch_template_default_version = try(each.value.launch_template_default_version, var.self_managed_node_group_defaults.launch_template_default_version, null)
+ update_launch_template_default_version = try(each.value.update_launch_template_default_version, var.self_managed_node_group_defaults.update_launch_template_default_version, null)
+ disable_api_termination = try(each.value.disable_api_termination, var.self_managed_node_group_defaults.disable_api_termination, null)
+ instance_initiated_shutdown_behavior = try(each.value.instance_initiated_shutdown_behavior, var.self_managed_node_group_defaults.instance_initiated_shutdown_behavior, null)
+ kernel_id = try(each.value.kernel_id, var.self_managed_node_group_defaults.kernel_id, null)
+ ram_disk_id = try(each.value.ram_disk_id, var.self_managed_node_group_defaults.ram_disk_id, null)
block_device_mappings = try(each.value.block_device_mappings, var.self_managed_node_group_defaults.block_device_mappings, [])
capacity_reservation_specification = try(each.value.capacity_reservation_specification, var.self_managed_node_group_defaults.capacity_reservation_specification, null)
@@ -362,7 +363,7 @@ module "self_managed_node_group" {
instance_market_options = try(each.value.instance_market_options, var.self_managed_node_group_defaults.instance_market_options, null)
license_specifications = try(each.value.license_specifications, var.self_managed_node_group_defaults.license_specifications, null)
metadata_options = try(each.value.metadata_options, var.self_managed_node_group_defaults.metadata_options, local.metadata_options)
- enable_monitoring = try(each.value.enable_monitoring, var.self_managed_node_group_defaults.enable_monitoring, null)
+ enable_monitoring = try(each.value.enable_monitoring, var.self_managed_node_group_defaults.enable_monitoring, true)
network_interfaces = try(each.value.network_interfaces, var.self_managed_node_group_defaults.network_interfaces, [])
placement = try(each.value.placement, var.self_managed_node_group_defaults.placement, null)
diff --git a/variables.tf b/variables.tf
index c572f6b28a..f5df58f58e 100644
--- a/variables.tf
+++ b/variables.tf
@@ -1,15 +1,15 @@
-variable "tags" {
- description = "A map of tags to add to all resources"
- type = map(string)
- default = {}
-}
-
variable "create" {
description = "Controls if EKS resources should be created (affects nearly all resources)"
type = bool
default = true
}
+variable "tags" {
+ description = "A map of tags to add to all resources"
+ type = map(string)
+ default = {}
+}
+
################################################################################
# Cluster
################################################################################
@@ -224,7 +224,7 @@ variable "openid_connect_audiences" {
################################################################################
variable "create_iam_role" {
- description = "Determines whether a cluster IAM role is created or to use an existing IAM role"
+ description = "Determines whether a an IAM role is created or to use an existing IAM role"
type = bool
default = true
}
@@ -236,13 +236,13 @@ variable "iam_role_arn" {
}
variable "iam_role_name" {
- description = "Name to use on cluster role created"
+ description = "Name to use on IAM role created"
type = string
default = null
}
variable "iam_role_use_name_prefix" {
- description = "Determines whether cluster IAM role name (`iam_role_name`) is used as a prefix"
+ description = "Determines whether the IAM role name (`iam_role_name`) is used as a prefix"
type = string
default = true
}
@@ -253,14 +253,26 @@ variable "iam_role_path" {
default = null
}
+variable "iam_role_description" {
+ description = "Description of the role"
+ type = string
+ default = null
+}
+
variable "iam_role_permissions_boundary" {
- description = "ARN of the policy that is used to set the permissions boundary for the cluster role"
+ description = "ARN of the policy that is used to set the permissions boundary for the IAM role"
type = string
default = null
}
+variable "iam_role_additional_policies" {
+ description = "Additional policies to be added to the IAM role"
+ type = list(string)
+ default = []
+}
+
variable "iam_role_tags" {
- description = "A map of additional tags to add to the cluster IAM role created"
+ description = "A map of additional tags to add to the IAM role created"
type = map(string)
default = {}
}
From c5976584102b62f3ac20dbeaff3bd86be0be93d4 Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Tue, 7 Dec 2021 11:36:10 -0500
Subject: [PATCH 51/83] Update modules/_user_data/variables.tf
Co-authored-by: Anton Babenko
---
modules/_user_data/variables.tf | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/_user_data/variables.tf b/modules/_user_data/variables.tf
index 66ae91cc4c..f819580eee 100644
--- a/modules/_user_data/variables.tf
+++ b/modules/_user_data/variables.tf
@@ -1,5 +1,5 @@
variable "create" {
- description = "Determines whether to create EKS managed node group or not"
+ description = "Determines whether to create user-data or not"
type = bool
default = true
}
From d7e11e0d9479b151b43c11581bd583d71bd7b345 Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Tue, 7 Dec 2021 14:31:32 -0500
Subject: [PATCH 52/83] chore: update documentation, incorporate feedback
suggestions
---
.github/images/security_groups.svg | 1 +
README.md | 50 +++++++++++++++++++++++
examples/complete/main.tf | 34 +++++++--------
examples/eks_managed_node_group/main.tf | 39 +++++++++---------
examples/irsa_autoscale_refresh/charts.tf | 4 +-
examples/irsa_autoscale_refresh/main.tf | 22 +++++-----
examples/self_managed_node_group/main.tf | 17 ++++----
main.tf | 2 +-
modules/_user_data/README.md | 2 +-
modules/eks-managed-node-group/README.md | 34 ---------------
modules/eks-managed-node-group/main.tf | 2 +-
node_groups.tf | 9 ++--
variables.tf | 6 +++
13 files changed, 123 insertions(+), 99 deletions(-)
create mode 100644 .github/images/security_groups.svg
diff --git a/.github/images/security_groups.svg b/.github/images/security_groups.svg
new file mode 100644
index 0000000000..6b120e98ba
--- /dev/null
+++ b/.github/images/security_groups.svg
@@ -0,0 +1 @@
+
diff --git a/README.md b/README.md
index 72ebb66843..4b7042697d 100644
--- a/README.md
+++ b/README.md
@@ -168,6 +168,55 @@ module "eks" {
}
```
+## Module Design Considerations
+
+### General Notes
+
+While the module is designed to be flexible and support as many use cases and configurations as possible, there is a limit to what first class support can be provided without over-burdening the complexity of the module. Below are a list of general notes on the design intent captured by this module which hopefully explains some of the decisions that are, or will be made, in terms of what is added/supported natively by the module:
+
+- Despite the addition of Windows Subsystem for Linux (WSL for short), containerization technology is very much a suite of Linux constrcuts and therefore Linux is the primary OS supported by this module. In addition, due to the first class support provided by AWS, Bottlerocket OS and Fargate Profiles are also very much natively supported by this module. This module does not make any attempt to NOT support Windows, as in preventing the usage of Windows based nodes, however it is up to users to put in additional effort in order to operate Winodws based nodes when using the module. User can refere to the [AWS documentation](https://docs.aws.amazon.com/eks/latest/userguide/windows-support.html) for further details. What does this mean:
+ - AWS EKS Managed Node Groups default to `linux` as the `platform`, but `bottlerocket` is also supported by AWS (`windows` is not supported by AWS EKS Managed Node groups)
+ - AWS Self Managed Node Groups also default to `linux` and the default AMI used is the latest AMI for the selected Kubernetes version. If you wish to use a different OS or AMI then you will need to opt in to the necessary configurations to ensure the correct AMI is used in conjunction with the necessary user data to ensure the nodes are launched and joined to your cluster successfully.
+- AWS EKS Managed Node groups are current the preffered route over Self Managed Node Groups for compute nodes. Both operate very similarly - both are backed by autoscaling groups and launch templates deployed and visible within your account. However, AWS EKS Managed Node groups provide a better user experience and offer a more "managed service" experience and therefore has precedence over Self Managed Node Groups. That said, there are currently inherent limitations as AWS continues to rollout additional feature support similar to the level of customization you can achieve with Self Managed Node Groups. When reqeusting added feature support for AWS EKS Managed Node groups, please ensure you have verified that the feature(s) are 1) supported by AWS and 2) supported by the Terraform AWS provider before submitting a feature request.
+- Due to the plethora of tooling and different manners of configuring your cluster, cluster configuration is intentionally left out of the module in order to simplify the module for a broader user base. Previous module versions provided support for managing the aws-auth configmap via the Kubernetes Terraform provider using the now deprecated aws-iam-authenticator; these are no longer included in the module. This module strictly focuses on the infrastructure resources to provision an EKS cluster as well as any supporting AWS resources - how the internals of the cluster are configured and managed is up to users and is outside the scope of this module. There is an output attribute, `aws_auth_configmap_yaml`, that has been provided that can be useful to help bridge this transition. Please see the various examples provided where this attribute is used to ensure that self managed node groups or external node groups have their IAM roles appropriately mapped to the aws-auth configmap. How users elect to manage the aws-auth configmap is left up to their choosing.
+
+### User Data & Bootstrapping
+
+There are a multitude of different possible configurations for how module users require their user data to be configured. In order to better support the various combinations from simple, out of the box support provided by the module to full customization of the user data using a template provided by users - the user data has been abstracted out to its own module. Users can see the various methods of using and providing user data through the [user data examples](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/user_data) as well more detailed information on the design and possible configurations via the [user data module itself](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/modules/_user_data)
+
+In general (tl;dr):
+- AWS EKS Managed Node Groups
+ - `linux` platform (default) -> user data is pre-pended to the AWS provided bootstrap user data (bash/shell script) when using the AWS EKS provided AMI, otherwise users need to opt in via `enable_bootstrap_user_data` and use the module provided user data template or provide their own user data template to boostrap nodes to join the cluster
+ - `bottlerocket` platform -> user data is merged with the AWS provided bootstrap user data (TOML file) when using the AWS EKS provided AMI, otherwise users need to opt in via `enable_bootstrap_user_data` and use the module provided user data template or provide their own user data template to boostrap nodes to join the cluster
+- Self Managed Node Groups
+ - `linux` platform (default) -> the user data template (bash/shell script) provided by the module is used as the default; users are able to provide their own user data template
+ - `bottlerocket` platform -> the user data template (TOML file) provided by the module is used as the default; users are able to provide their own user data template
+ - `windows` platform -> the user data template (powershell/PS1 script) provided by the module is used as the default; users are able to provide their own user data template
+
+Module provided default templates can be found under the [templates directory](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/templates)
+
+### Security Groups
+
+- Cluster Security Group
+ - This module by default creates a cluster security group ("additional" security group when viewed from the console) in addition to the default security group created by the AWS EKS service. This "additional" security group allows users to customize inbound and outbound rules via the module as they see fit
+ - The default inbound/outbound rules provided by the module are derived from the [AWS minimum recommendations](https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html) in addition to NTP and HTTPS public internet egress rules (without, these show up in VPC flow logs as rejects - they are used for clock sync and downloading necessary packages/updates)
+ - The minimum inbound/outbound rules are provided for cluster and node creation to succeed without errors, but users will most likely need to add the necessary port and protocol for node-to-node communication (this is user specific based on how nodes are configured to communicate across the cluster)
+ - Users have the ability to opt out of the security group creation and instead provide their own externally created security group if so desired
+ - The security group that is created is designed to handle the bare minimum communication necessary between the control plane and the nodes, as well as any external egress to allow the cluster to successfully launch without error
+ - Users also have the option to supply additional, externally created security groups to the cluster as well via the `cluster_additional_security_group_ids` variable
+
+- Node Group Security Group(s)
+ - Each node group (EKS Managed Node Group and Self Managed Node Group) by default creates its own security group. By default, this security group does not contain any additional security group rules. It is merely an "empty container" that offers users the ability to opt into any addition inbound our outbound rules as necessary
+ - Users also have the option to supply their own, and/or additonal, externally created security group(s) to the node group as well via the `vpc_security_group_ids` variable
+
+The security groups created by this module are depicted in the image shown below along with their default inbound/outbound rules:
+
+
+
+
+
+
+
## Notes
- Setting `instance_refresh_enabled = true` will recreate your worker nodes without draining them first. It is recommended to install [aws-node-termination-handler](https://github.com/aws/aws-node-termination-handler) for proper node draining. See the [instance_refresh](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/instance_refresh) example provided.
@@ -325,6 +374,7 @@ Full contributing [guidelines are covered here](https://github.com/terraform-aws
|------|-------------|------|---------|:--------:|
| [cloudwatch\_log\_group\_kms\_key\_id](#input\_cloudwatch\_log\_group\_kms\_key\_id) | If a KMS Key ARN is set, this key will be used to encrypt the corresponding log group. Please be sure that the KMS Key has an appropriate key policy (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/encrypt-log-data-kms.html) | `string` | `null` | no |
| [cloudwatch\_log\_group\_retention\_in\_days](#input\_cloudwatch\_log\_group\_retention\_in\_days) | Number of days to retain log events. Default retention - 90 days | `number` | `90` | no |
+| [cluster\_additional\_security\_group\_ids](#input\_cluster\_additional\_security\_group\_ids) | List of additional, externally created security group IDs to attach to the cluster control plane | `list(string)` | `[]` | no |
| [cluster\_additional\_security\_group\_rules](#input\_cluster\_additional\_security\_group\_rules) | List of additional security group rules to add to the cluster security group created | `map(any)` | `{}` | no |
| [cluster\_addons](#input\_cluster\_addons) | Map of cluster addon configurations to enable for the cluster. Addon name can be the map keys or set with `name` | `any` | `{}` | no |
| [cluster\_enabled\_log\_types](#input\_cluster\_enabled\_log\_types) | A list of the desired control plane logs to enable. For more information, see Amazon EKS Control Plane Logging documentation (https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html) | `list(string)` |
[ "audit", "api", "authenticator" ]
| no |
diff --git a/examples/complete/main.tf b/examples/complete/main.tf
index 77448ed90a..e32aaef930 100644
--- a/examples/complete/main.tf
+++ b/examples/complete/main.tf
@@ -48,9 +48,9 @@ module "eks" {
# Self Managed Node Group(s)
self_managed_node_group_defaults = {
- launch_template_default_version = true
- vpc_security_group_ids = [aws_security_group.additional.id]
- iam_role_additional_policies = ["arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"]
+ update_launch_template_default_version = true
+ vpc_security_group_ids = [aws_security_group.additional.id]
+ iam_role_additional_policies = ["arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"]
}
self_managed_node_groups = {
@@ -120,6 +120,7 @@ module "eks" {
GithubRepo = "terraform-aws-eks"
GithubOrg = "terraform-aws-modules"
}
+
taints = {
dedicated = {
key = "dedicated"
@@ -127,10 +128,11 @@ module "eks" {
effect = "NO_SCHEDULE"
}
}
- # TODO - this is throwing an error
- # update_config = {
- # max_unavailable_percentage = 50 # or set `max_unavailable`
- # }
+
+ update_config = {
+ max_unavailable_percentage = 50 # or set `max_unavailable`
+ }
+
tags = {
ExtraTag = "example"
}
@@ -200,10 +202,10 @@ module "self_managed_node_group" {
module.eks.cluster_security_group_id,
]
- create_launch_template = true
- launch_template_name = "separate-self-mng"
- launch_template_default_version = true
- instance_type = "m5.large"
+ create_launch_template = true
+ launch_template_name = "separate-self-mng"
+ update_launch_template_default_version = true
+ instance_type = "m5.large"
tags = merge(local.tags, { Separate = "self-managed-node-group" })
}
@@ -266,23 +268,23 @@ locals {
kind = "Config"
current-context = "terraform"
clusters = [{
- name = "${module.eks.cluster_id}"
+ name = module.eks.cluster_id
cluster = {
- certificate-authority-data = "${module.eks.cluster_certificate_authority_data}"
- server = "${module.eks.cluster_endpoint}"
+ certificate-authority-data = module.eks.cluster_certificate_authority_data
+ server = module.eks.cluster_endpoint
}
}]
contexts = [{
name = "terraform"
context = {
- cluster = "${module.eks.cluster_id}"
+ cluster = module.eks.cluster_id
user = "terraform"
}
}]
users = [{
name = "terraform"
user = {
- token = "${data.aws_eks_cluster_auth.this.token}"
+ token = data.aws_eks_cluster_auth.this.token
}
}]
})
diff --git a/examples/eks_managed_node_group/main.tf b/examples/eks_managed_node_group/main.tf
index 9056c930d3..0b58c0dabc 100644
--- a/examples/eks_managed_node_group/main.tf
+++ b/examples/eks_managed_node_group/main.tf
@@ -69,9 +69,9 @@ module "eks" {
ami_type = "BOTTLEROCKET_x86_64"
platform = "bottlerocket"
- create_launch_template = true
- launch_template_name = "bottlerocket-custom"
- launch_template_default_version = true
+ create_launch_template = true
+ launch_template_name = "bottlerocket-custom"
+ update_launch_template_default_version = true
# this will get added to what AWS provides
bootstrap_extra_args = <<-EOT
@@ -87,9 +87,9 @@ module "eks" {
ami_id = "ami-0ff61e0bcfc81dc94"
platform = "bottlerocket"
- create_launch_template = true
- launch_template_name = "bottlerocket-custom"
- launch_template_default_version = true
+ create_launch_template = true
+ launch_template_name = "bottlerocket-custom"
+ update_launch_template_default_version = true
# use module user data template to boostrap
enable_bootstrap_user_data = true
@@ -171,16 +171,15 @@ module "eks" {
}
]
- # TODO - this is throwing an error
- # update_config = {
- # max_unavailable_percentage = 50 # or set `max_unavailable`
- # }
+ update_config = {
+ max_unavailable_percentage = 50 # or set `max_unavailable`
+ }
- create_launch_template = true
- launch_template_name = "eks-managed-ex"
- launch_template_use_name_prefix = true
- description = "EKS managed node group example launch template"
- launch_template_default_version = true
+ create_launch_template = true
+ launch_template_name = "eks-managed-ex"
+ launch_template_use_name_prefix = true
+ description = "EKS managed node group example launch template"
+ update_launch_template_default_version = true
ebs_optimized = true
vpc_security_group_ids = [aws_security_group.additional.id]
@@ -270,23 +269,23 @@ locals {
kind = "Config"
current-context = "terraform"
clusters = [{
- name = "${module.eks.cluster_id}"
+ name = module.eks.cluster_id
cluster = {
- certificate-authority-data = "${module.eks.cluster_certificate_authority_data}"
- server = "${module.eks.cluster_endpoint}"
+ certificate-authority-data = module.eks.cluster_certificate_authority_data
+ server = module.eks.cluster_endpoint
}
}]
contexts = [{
name = "terraform"
context = {
- cluster = "${module.eks.cluster_id}"
+ cluster = module.eks.cluster_id
user = "terraform"
}
}]
users = [{
name = "terraform"
user = {
- token = "${data.aws_eks_cluster_auth.this.token}"
+ token = data.aws_eks_cluster_auth.this.token
}
}]
})
diff --git a/examples/irsa_autoscale_refresh/charts.tf b/examples/irsa_autoscale_refresh/charts.tf
index d997565cfd..6a98c1a9bf 100644
--- a/examples/irsa_autoscale_refresh/charts.tf
+++ b/examples/irsa_autoscale_refresh/charts.tf
@@ -52,7 +52,7 @@ resource "helm_release" "cluster_autoscaler" {
}
depends_on = [
- module.eks
+ module.eks.cluster_id
]
}
@@ -166,7 +166,7 @@ resource "helm_release" "aws_node_termination_handler" {
}
depends_on = [
- module.eks
+ module.eks.cluster_id
]
}
diff --git a/examples/irsa_autoscale_refresh/main.tf b/examples/irsa_autoscale_refresh/main.tf
index 69b3a2cc3f..687db41385 100644
--- a/examples/irsa_autoscale_refresh/main.tf
+++ b/examples/irsa_autoscale_refresh/main.tf
@@ -43,10 +43,10 @@ module "eks" {
max_size = 5
desired_size = 1
- instance_types = ["m5.large", "m5n.large", "m5zn.large", "m6i.large", ]
- create_launch_template = true
- launch_template_name = "refresh"
- launch_template_default_version = true
+ instance_type = "m5.large"
+ create_launch_template = true
+ launch_template_name = "refresh"
+ update_launch_template_default_version = true
instance_refresh = {
strategy = "Rolling"
@@ -86,23 +86,23 @@ locals {
kind = "Config"
current-context = "terraform"
clusters = [{
- name = "${module.eks.cluster_id}"
+ name = module.eks.cluster_id
cluster = {
- certificate-authority-data = "${module.eks.cluster_certificate_authority_data}"
- server = "${module.eks.cluster_endpoint}"
+ certificate-authority-data = module.eks.cluster_certificate_authority_data
+ server = module.eks.cluster_endpoint
}
}]
contexts = [{
name = "terraform"
context = {
- cluster = "${module.eks.cluster_id}"
+ cluster = module.eks.cluster_id
user = "terraform"
}
}]
users = [{
name = "terraform"
user = {
- token = "${data.aws_eks_cluster_auth.this.token}"
+ token = data.aws_eks_cluster_auth.this.token
}
}]
})
@@ -159,7 +159,5 @@ module "vpc" {
"kubernetes.io/role/internal-elb" = 1
}
- tags = merge(local.tags,
- { "kubernetes.io/cluster/${local.name}" = "shared" }
- )
+ tags = local.tags
}
diff --git a/examples/self_managed_node_group/main.tf b/examples/self_managed_node_group/main.tf
index 3e31feba2a..7646d24411 100644
--- a/examples/self_managed_node_group/main.tf
+++ b/examples/self_managed_node_group/main.tf
@@ -117,10 +117,9 @@ module "eks" {
GithubOrg = "terraform-aws-modules"
}
- # TODO - this is throwing an error
- # update_config = {
- # max_unavailable_percentage = 50 # or set `max_unavailable`
- # }
+ update_config = {
+ max_unavailable_percentage = 50 # or set `max_unavailable`
+ }
create_launch_template = true
launch_template_name = "self-managed-ex"
@@ -222,23 +221,23 @@ locals {
kind = "Config"
current-context = "terraform"
clusters = [{
- name = "${module.eks.cluster_id}"
+ name = module.eks.cluster_id
cluster = {
- certificate-authority-data = "${module.eks.cluster_certificate_authority_data}"
- server = "${module.eks.cluster_endpoint}"
+ certificate-authority-data = module.eks.cluster_certificate_authority_data
+ server = module.eks.cluster_endpoint
}
}]
contexts = [{
name = "terraform"
context = {
- cluster = "${module.eks.cluster_id}"
+ cluster = module.eks.cluster_id
user = "terraform"
}
}]
users = [{
name = "terraform"
user = {
- token = "${data.aws_eks_cluster_auth.this.token}"
+ token = data.aws_eks_cluster_auth.this.token
}
}]
})
diff --git a/main.tf b/main.tf
index 2c224afced..7ea9c5a479 100644
--- a/main.tf
+++ b/main.tf
@@ -13,7 +13,7 @@ resource "aws_eks_cluster" "this" {
enabled_cluster_log_types = var.cluster_enabled_log_types
vpc_config {
- security_group_ids = [local.cluster_security_group_id]
+ security_group_ids = distinct(concat(var.cluster_additional_security_group_ids, [local.cluster_security_group_id]))
subnet_ids = var.subnet_ids
endpoint_private_access = var.cluster_endpoint_private_access
endpoint_public_access = var.cluster_endpoint_public_access
diff --git a/modules/_user_data/README.md b/modules/_user_data/README.md
index 1dcba03964..80ea888cc8 100644
--- a/modules/_user_data/README.md
+++ b/modules/_user_data/README.md
@@ -116,7 +116,7 @@ No modules.
| [cluster\_auth\_base64](#input\_cluster\_auth\_base64) | Base64 encoded CA of associated EKS cluster | `string` | `""` | no |
| [cluster\_endpoint](#input\_cluster\_endpoint) | Endpoint of associated EKS cluster | `string` | `""` | no |
| [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster | `string` | `""` | no |
-| [create](#input\_create) | Determines whether to create EKS managed node group or not | `bool` | `true` | no |
+| [create](#input\_create) | Determines whether to create user-data or not | `bool` | `true` | no |
| [enable\_bootstrap\_user\_data](#input\_enable\_bootstrap\_user\_data) | Determines whether the bootstrap configurations are populated within the user data template | `bool` | `false` | no |
| [is\_eks\_managed\_node\_group](#input\_is\_eks\_managed\_node\_group) | Determines whether the user data is used on nodes in an EKS managed node group. Used to determine if user data will be appended or not | `bool` | `true` | no |
| [platform](#input\_platform) | Identifies if the OS platform is `bottlerocket`, `linux`, or `windows` based | `string` | `"linux"` | no |
diff --git a/modules/eks-managed-node-group/README.md b/modules/eks-managed-node-group/README.md
index c789c5ad7b..a5d5c01e0d 100644
--- a/modules/eks-managed-node-group/README.md
+++ b/modules/eks-managed-node-group/README.md
@@ -12,40 +12,6 @@ $ terraform plan
$ terraform apply
```
-# TODO - Update Notes vvv
-
-Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources.
-
-# User Data Configurations
-
-- https://github.com/aws/containers-roadmap/issues/596#issuecomment-675097667
-> An important note is that user data must in MIME multi-part archive format,
-> as by default, EKS will merge the bootstrapping command required for nodes to join the
-> cluster with your user data. If you use a custom AMI in your launch template,
-> this merging will (__NOT__) happen and you are responsible for nodes joining the cluster.
-> See [docs for more details](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-user-data)
-
-- https://aws.amazon.com/blogs/containers/introducing-launch-template-and-custom-ami-support-in-amazon-eks-managed-node-groups/
-
-a. Use EKS provided AMI which merges its user data with the user data users provide in the launch template
- i. No additional user data
- ii. Add additional user data
-b. Use custom AMI which MUST bring its own user data that bootstraps the node
- i. Bring your own user data (whole shebang)
- ii. Use "default" template provided by module here and (optionally) any additional user data
-
-TODO - need to try these out in order and verify and document what happens with user data.
-
-
-## From LT
-This is based on the LT that EKS would create if no custom one is specified (aws ec2 describe-launch-template-versions --launch-template-id xxx) there are several more options one could set but you probably dont need to modify them you can take the default and add your custom AMI and/or custom tags
-#
-Trivia: AWS transparently creates a copy of your LaunchTemplate and actually uses that copy then for the node group. If you DONT use a custom AMI,
-
-If you use a custom AMI, you need to supply via user-data, the bootstrap script as EKS DOESNT merge its managed user-data then you can add more than the minimum code you see in the template, e.g. install SSM agent, see https://github.com/aws/containers-roadmap/issues/593#issuecomment-577181345
- #
-(optionally you can use https://registry.terraform.io/providers/hashicorp/cloudinit/latest/docs/data-sources/cloudinit_config to render the script, example: https://github.com/terraform-aws-modules/terraform-aws-eks/pull/997#issuecomment-705286151) then the default user-data for bootstrapping a cluster is merged in the copy.
-
## Requirements
diff --git a/modules/eks-managed-node-group/main.tf b/modules/eks-managed-node-group/main.tf
index f32e8e8a97..85244506dd 100644
--- a/modules/eks-managed-node-group/main.tf
+++ b/modules/eks-managed-node-group/main.tf
@@ -310,7 +310,7 @@ resource "aws_eks_node_group" "this" {
}
dynamic "update_config" {
- for_each = var.update_config
+ for_each = length(var.update_config) > 0 ? [var.update_config] : []
content {
max_unavailable_percentage = try(update_config.value.max_unavailable_percentage, null)
max_unavailable = try(update_config.value.max_unavailable, null)
diff --git a/node_groups.tf b/node_groups.tf
index ad47a2fa84..12a716c3c1 100644
--- a/node_groups.tf
+++ b/node_groups.tf
@@ -112,7 +112,10 @@ resource "aws_security_group" "node" {
tags = merge(
var.tags,
- { "Name" = local.node_sg_name },
+ {
+ "Name" = local.node_sg_name
+ "kubernetes.io/cluster/${var.cluster_name}" = "owned"
+ },
var.node_security_group_tags
)
}
@@ -228,7 +231,7 @@ module "eks_managed_node_group" {
key_name = try(each.value.key_name, var.eks_managed_node_group_defaults.key_name, null)
vpc_security_group_ids = compact(concat([try(aws_security_group.node[0].id, "")], try(each.value.vpc_security_group_ids, var.eks_managed_node_group_defaults.vpc_security_group_ids, [])))
launch_template_default_version = try(each.value.launch_template_default_version, var.eks_managed_node_group_defaults.launch_template_default_version, null)
- update_launch_template_default_version = try(each.value.update_launch_template_default_version, var.eks_managed_node_group_defaults.update_launch_template_default_version, null)
+ update_launch_template_default_version = try(each.value.update_launch_template_default_version, var.eks_managed_node_group_defaults.update_launch_template_default_version, true)
disable_api_termination = try(each.value.disable_api_termination, var.eks_managed_node_group_defaults.disable_api_termination, null)
kernel_id = try(each.value.kernel_id, var.eks_managed_node_group_defaults.kernel_id, null)
ram_disk_id = try(each.value.ram_disk_id, var.eks_managed_node_group_defaults.ram_disk_id, null)
@@ -346,7 +349,7 @@ module "self_managed_node_group" {
vpc_security_group_ids = compact(concat([try(aws_security_group.node[0].id, "")], try(each.value.vpc_security_group_ids, var.self_managed_node_group_defaults.vpc_security_group_ids, [])))
cluster_security_group_id = local.cluster_security_group_id
launch_template_default_version = try(each.value.launch_template_default_version, var.self_managed_node_group_defaults.launch_template_default_version, null)
- update_launch_template_default_version = try(each.value.update_launch_template_default_version, var.self_managed_node_group_defaults.update_launch_template_default_version, null)
+ update_launch_template_default_version = try(each.value.update_launch_template_default_version, var.self_managed_node_group_defaults.update_launch_template_default_version, true)
disable_api_termination = try(each.value.disable_api_termination, var.self_managed_node_group_defaults.disable_api_termination, null)
instance_initiated_shutdown_behavior = try(each.value.instance_initiated_shutdown_behavior, var.self_managed_node_group_defaults.instance_initiated_shutdown_behavior, null)
kernel_id = try(each.value.kernel_id, var.self_managed_node_group_defaults.kernel_id, null)
diff --git a/variables.tf b/variables.tf
index f5df58f58e..43b26aea90 100644
--- a/variables.tf
+++ b/variables.tf
@@ -32,6 +32,12 @@ variable "cluster_enabled_log_types" {
default = ["audit", "api", "authenticator"]
}
+variable "cluster_additional_security_group_ids" {
+ description = "List of additional, externally created security group IDs to attach to the cluster control plane"
+ type = list(string)
+ default = []
+}
+
variable "subnet_ids" {
description = "A list of subnet IDs where the EKS cluster (ENIs) will be provisioned along with the nodes/node groups. Node groups can be deployed within a different set of subnet IDs from within the node group configuration"
type = list(string)
From 469beb9cb4f0902906e13603c63eb29ac28161a3 Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Tue, 7 Dec 2021 14:46:22 -0500
Subject: [PATCH 53/83] chore: update defaults on updating launch template
default version
---
examples/complete/main.tf | 12 +++++-------
examples/eks_managed_node_group/main.tf | 19 ++++++++-----------
examples/irsa_autoscale_refresh/main.tf | 7 +++----
modules/self-managed-node-group/README.md | 2 +-
modules/self-managed-node-group/variables.tf | 4 ++--
5 files changed, 19 insertions(+), 25 deletions(-)
diff --git a/examples/complete/main.tf b/examples/complete/main.tf
index e32aaef930..a46c57c5b8 100644
--- a/examples/complete/main.tf
+++ b/examples/complete/main.tf
@@ -48,9 +48,8 @@ module "eks" {
# Self Managed Node Group(s)
self_managed_node_group_defaults = {
- update_launch_template_default_version = true
- vpc_security_group_ids = [aws_security_group.additional.id]
- iam_role_additional_policies = ["arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"]
+ vpc_security_group_ids = [aws_security_group.additional.id]
+ iam_role_additional_policies = ["arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"]
}
self_managed_node_groups = {
@@ -202,10 +201,9 @@ module "self_managed_node_group" {
module.eks.cluster_security_group_id,
]
- create_launch_template = true
- launch_template_name = "separate-self-mng"
- update_launch_template_default_version = true
- instance_type = "m5.large"
+ create_launch_template = true
+ launch_template_name = "separate-self-mng"
+ instance_type = "m5.large"
tags = merge(local.tags, { Separate = "self-managed-node-group" })
}
diff --git a/examples/eks_managed_node_group/main.tf b/examples/eks_managed_node_group/main.tf
index 0b58c0dabc..66e33350b4 100644
--- a/examples/eks_managed_node_group/main.tf
+++ b/examples/eks_managed_node_group/main.tf
@@ -69,9 +69,8 @@ module "eks" {
ami_type = "BOTTLEROCKET_x86_64"
platform = "bottlerocket"
- create_launch_template = true
- launch_template_name = "bottlerocket-custom"
- update_launch_template_default_version = true
+ create_launch_template = true
+ launch_template_name = "bottlerocket-custom"
# this will get added to what AWS provides
bootstrap_extra_args = <<-EOT
@@ -87,9 +86,8 @@ module "eks" {
ami_id = "ami-0ff61e0bcfc81dc94"
platform = "bottlerocket"
- create_launch_template = true
- launch_template_name = "bottlerocket-custom"
- update_launch_template_default_version = true
+ create_launch_template = true
+ launch_template_name = "bottlerocket-custom"
# use module user data template to boostrap
enable_bootstrap_user_data = true
@@ -175,11 +173,10 @@ module "eks" {
max_unavailable_percentage = 50 # or set `max_unavailable`
}
- create_launch_template = true
- launch_template_name = "eks-managed-ex"
- launch_template_use_name_prefix = true
- description = "EKS managed node group example launch template"
- update_launch_template_default_version = true
+ create_launch_template = true
+ launch_template_name = "eks-managed-ex"
+ launch_template_use_name_prefix = true
+ description = "EKS managed node group example launch template"
ebs_optimized = true
vpc_security_group_ids = [aws_security_group.additional.id]
diff --git a/examples/irsa_autoscale_refresh/main.tf b/examples/irsa_autoscale_refresh/main.tf
index 687db41385..4f9c747dc1 100644
--- a/examples/irsa_autoscale_refresh/main.tf
+++ b/examples/irsa_autoscale_refresh/main.tf
@@ -43,10 +43,9 @@ module "eks" {
max_size = 5
desired_size = 1
- instance_type = "m5.large"
- create_launch_template = true
- launch_template_name = "refresh"
- update_launch_template_default_version = true
+ instance_type = "m5.large"
+ create_launch_template = true
+ launch_template_name = "refresh"
instance_refresh = {
strategy = "Rolling"
diff --git a/modules/self-managed-node-group/README.md b/modules/self-managed-node-group/README.md
index 7808290c5a..b40238f844 100644
--- a/modules/self-managed-node-group/README.md
+++ b/modules/self-managed-node-group/README.md
@@ -136,7 +136,7 @@ $ terraform apply
| [tags](#input\_tags) | A map of tags to add to all resources | `map(string)` | `{}` | no |
| [target\_group\_arns](#input\_target\_group\_arns) | A set of `aws_alb_target_group` ARNs, for use with Application or Network Load Balancing | `list(string)` | `[]` | no |
| [termination\_policies](#input\_termination\_policies) | A list of policies to decide how the instances in the Auto Scaling Group should be terminated. The allowed values are `OldestInstance`, `NewestInstance`, `OldestLaunchConfiguration`, `ClosestToNextInstanceHour`, `OldestLaunchTemplate`, `AllocationStrategy`, `Default` | `list(string)` | `null` | no |
-| [update\_launch\_template\_default\_version](#input\_update\_launch\_template\_default\_version) | Whether to update Default Version each update. Conflicts with `launch_template_default_version` | `string` | `null` | no |
+| [update\_launch\_template\_default\_version](#input\_update\_launch\_template\_default\_version) | Whether to update Default Version each update. Conflicts with `launch_template_default_version` | `bool` | `true` | no |
| [use\_mixed\_instances\_policy](#input\_use\_mixed\_instances\_policy) | Determines whether to use a mixed instances policy in the autoscaling group or not | `bool` | `false` | no |
| [use\_name\_prefix](#input\_use\_name\_prefix) | Determines whether to use `name` as is or create a unique name beginning with the `name` as the prefix | `bool` | `true` | no |
| [user\_data\_template\_path](#input\_user\_data\_template\_path) | Path to a local, custom user data template file to use when rendering user data | `string` | `""` | no |
diff --git a/modules/self-managed-node-group/variables.tf b/modules/self-managed-node-group/variables.tf
index 85d816e65f..7e34fff950 100644
--- a/modules/self-managed-node-group/variables.tf
+++ b/modules/self-managed-node-group/variables.tf
@@ -98,8 +98,8 @@ variable "launch_template_default_version" {
variable "update_launch_template_default_version" {
description = "Whether to update Default Version each update. Conflicts with `launch_template_default_version`"
- type = string
- default = null
+ type = bool
+ default = true
}
variable "disable_api_termination" {
From 901dcaac711ff90cda1de21f64a2e9ff8db16868 Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Tue, 7 Dec 2021 15:09:00 -0500
Subject: [PATCH 54/83] chore: update sub-module documentation
---
modules/_user_data/README.md | 12 ++-----
modules/eks-managed-node-group/README.md | 43 +++++++++++++++++++----
modules/fargate-profile/README.md | 31 ++++++++--------
modules/self-managed-node-group/README.md | 35 ++++++++++++++----
4 files changed, 85 insertions(+), 36 deletions(-)
diff --git a/modules/_user_data/README.md b/modules/_user_data/README.md
index 80ea888cc8..991facc6a7 100644
--- a/modules/_user_data/README.md
+++ b/modules/_user_data/README.md
@@ -2,6 +2,8 @@
Configuration in this directory renderes the appropriate user data for the given inputs. There are a number of different ways that user data can be utilized and this internal module is designed to aid in making that flexibility possible as well as providing a means for out of bands testing and validation.
+See the [`examples/user_data/` directory](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/user_data) for various examples of using the module.
+
## Combinations
At a high level, AWS EKS users have two methods for launching nodes within this EKS module (ignoring Fargate profiles):
@@ -74,16 +76,6 @@ The rough flow of logic that is encapsulated within the `_user_data` internal mo
-## Usage
-
-To run this example you need to execute:
-
-```bash
-$ terraform init
-$ terraform plan
-$ terraform apply
-````
-
## Requirements
diff --git a/modules/eks-managed-node-group/README.md b/modules/eks-managed-node-group/README.md
index a5d5c01e0d..b9de533aa5 100644
--- a/modules/eks-managed-node-group/README.md
+++ b/modules/eks-managed-node-group/README.md
@@ -4,12 +4,43 @@ Configuration in this directory creates an EKS Managed Node Group along with an
## Usage
-To run this example you need to execute:
-
-```bash
-$ terraform init
-$ terraform plan
-$ terraform apply
+```hcl
+module "eks_managed_node_group" {
+ source = "terraform-aws-modules/eks/aws//modules/eks-managed-node-group"
+
+ name = "separate-eks-mng"
+ cluster_name = "my-cluster"
+ cluster_version = "1.21"
+
+ vpc_id = "vpc-1234556abcdef"
+ subnet_ids = ["subnet-abcde012", "subnet-bcde012a", "subnet-fghi345a"]
+
+ min_size = 1
+ max_size = 10
+ desired_size = 1
+
+ instance_types = ["t3.large"]
+ capacity_type = "SPOT"
+
+ labels = {
+ Environment = "test"
+ GithubRepo = "terraform-aws-eks"
+ GithubOrg = "terraform-aws-modules"
+ }
+
+ taints = {
+ dedicated = {
+ key = "dedicated"
+ value = "gpuGroup"
+ effect = "NO_SCHEDULE"
+ }
+ }
+
+ tags = {
+ Environment = "dev"
+ Terraform = "true"
+ }
+}
```
diff --git a/modules/fargate-profile/README.md b/modules/fargate-profile/README.md
index 02a06b7730..8a51f317a8 100644
--- a/modules/fargate-profile/README.md
+++ b/modules/fargate-profile/README.md
@@ -4,22 +4,25 @@ Configuration in this directory creates a Fargate EKS Profile
## Usage
-To run this example you need to execute:
-
-```bash
-$ terraform init
-$ terraform plan
-$ terraform apply
+```hcl
+module "fargate_profile" {
+ source = "terraform-aws-modules/eks/aws//modules/fargate-profile"
+
+ name = "separate-fargate-profile"
+ cluster_name = "my-cluster"
+
+ subnet_ids = ["subnet-abcde012", "subnet-bcde012a", "subnet-fghi345a"]
+ selectors = [{
+ namespace = "kube-system"
+ }]
+
+ tags = {
+ Environment = "dev"
+ Terraform = "true"
+ }
+}
```
-| Name | Description | Type | Default | Required |
-|------|-------------|------|---------|:--------:|
-| name | Fargate profile name | `string` | Auto generated in the following format `[cluster_name]-fargate-[fargate_profile_map_key]`| no |
-| selectors | A list of Kubernetes selectors. See examples/fargate/main.tf for example format. |
| `[]` | no |
-| subnets | List of subnet IDs. Will replace the root module subnets. | `list(string)` | `var.subnets` | no |
-| timeouts | A map of timeouts for create/delete operations. | `map(string)` | Provider default behavior | no |
-| tags | Key-value map of resource tags. Will be merged with root module tags. | `map(string)` | `var.tags` | no |
-
## Requirements
diff --git a/modules/self-managed-node-group/README.md b/modules/self-managed-node-group/README.md
index b40238f844..2eca9cd9c3 100644
--- a/modules/self-managed-node-group/README.md
+++ b/modules/self-managed-node-group/README.md
@@ -4,12 +4,35 @@ Configuration in this directory creates a Self Managed Node Group (AutoScaling G
## Usage
-To run this example you need to execute:
-
-```bash
-$ terraform init
-$ terraform plan
-$ terraform apply
+```hcl
+module "self_managed_node_group" {
+ source = "terraform-aws-modules/eks/aws//modules/self-managed-node-group"
+
+ name = "separate-self-mng"
+ cluster_name = "my-cluster"
+ cluster_version = "1.21"
+ cluster_endpoint = "https://012345678903AB2BAE5D1E0BFE0E2B50.gr7.us-east-1.eks.amazonaws.com"
+ cluster_auth_base64 = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKbXFqQ1VqNGdGR2w3ZW5PeWthWnZ2RjROOTVOUEZCM2o0cGhVZUsrWGFtN2ZSQnZya0d6OGxKZmZEZWF2b2plTwpQK2xOZFlqdHZncmxCUEpYdHZIZmFzTzYxVzdIZmdWQ2EvamdRM2w3RmkvL1dpQmxFOG9oWUZkdWpjc0s1SXM2CnNkbk5KTTNYUWN2TysrSitkV09NT2ZlNzlsSWdncmdQLzgvRU9CYkw3eUY1aU1hS3lsb1RHL1V3TlhPUWt3ZUcKblBNcjdiUmdkQ1NCZTlXYXowOGdGRmlxV2FOditsTDhsODBTdFZLcWVNVlUxbjQyejVwOVpQRTd4T2l6L0xTNQpYV2lXWkVkT3pMN0xBWGVCS2gzdkhnczFxMkI2d1BKZnZnS1NzWllQRGFpZTloT1NNOUJkNFNPY3JrZTRYSVBOCkVvcXVhMlYrUDRlTWJEQzhMUkVWRDdCdVZDdWdMTldWOTBoL3VJUy9WU2VOcEdUOGVScE5DakszSjc2aFlsWm8KWjNGRG5QWUY0MWpWTHhiOXF0U1ROdEp6amYwWXBEYnFWci9xZzNmQWlxbVorMzd3YWM1eHlqMDZ4cmlaRUgzZgpUM002d2lCUEVHYVlGeWN5TmNYTk5aYW9DWDJVL0N1d2JsUHAKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ=="
+
+ vpc_id = "vpc-1234556abcdef"
+ subnet_ids = ["subnet-abcde012", "subnet-bcde012a", "subnet-fghi345a"]
+ vpc_security_group_ids = [
+ # cluster_security_group_id,
+ ]
+
+ min_size = 1
+ max_size = 10
+ desired_size = 1
+
+ create_launch_template = true
+ launch_template_name = "separate-self-mng"
+ instance_type = "m5.large"
+
+ tags = {
+ Environment = "dev"
+ Terraform = "true"
+ }
+}
```
From c48704072a15361b25f5e507a21f2cfc8cc2538d Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Tue, 7 Dec 2021 15:28:55 -0500
Subject: [PATCH 55/83] chore: correct spelling/grammatical mistakes
---
README.md | 10 +++++-----
examples/eks_managed_node_group/README.md | 2 +-
examples/self_managed_node_group/README.md | 2 +-
modules/_user_data/README.md | 16 ++++++++--------
4 files changed, 15 insertions(+), 15 deletions(-)
diff --git a/README.md b/README.md
index 4b7042697d..e02c4ef835 100644
--- a/README.md
+++ b/README.md
@@ -174,10 +174,10 @@ module "eks" {
While the module is designed to be flexible and support as many use cases and configurations as possible, there is a limit to what first class support can be provided without over-burdening the complexity of the module. Below are a list of general notes on the design intent captured by this module which hopefully explains some of the decisions that are, or will be made, in terms of what is added/supported natively by the module:
-- Despite the addition of Windows Subsystem for Linux (WSL for short), containerization technology is very much a suite of Linux constrcuts and therefore Linux is the primary OS supported by this module. In addition, due to the first class support provided by AWS, Bottlerocket OS and Fargate Profiles are also very much natively supported by this module. This module does not make any attempt to NOT support Windows, as in preventing the usage of Windows based nodes, however it is up to users to put in additional effort in order to operate Winodws based nodes when using the module. User can refere to the [AWS documentation](https://docs.aws.amazon.com/eks/latest/userguide/windows-support.html) for further details. What does this mean:
+- Despite the addition of Windows Subsystem for Linux (WSL for short), containerization technology is very much a suite of Linux constructs and therefore Linux is the primary OS supported by this module. In addition, due to the first class support provided by AWS, Bottlerocket OS and Fargate Profiles are also very much fully supported by this module. This module does not make any attempt to NOT support Windows, as in preventing the usage of Windows based nodes, however it is up to users to put in additional effort in order to operate Windows based nodes when using the module. User can refer to the [AWS documentation](https://docs.aws.amazon.com/eks/latest/userguide/windows-support.html) for further details. What does this mean:
- AWS EKS Managed Node Groups default to `linux` as the `platform`, but `bottlerocket` is also supported by AWS (`windows` is not supported by AWS EKS Managed Node groups)
- AWS Self Managed Node Groups also default to `linux` and the default AMI used is the latest AMI for the selected Kubernetes version. If you wish to use a different OS or AMI then you will need to opt in to the necessary configurations to ensure the correct AMI is used in conjunction with the necessary user data to ensure the nodes are launched and joined to your cluster successfully.
-- AWS EKS Managed Node groups are current the preffered route over Self Managed Node Groups for compute nodes. Both operate very similarly - both are backed by autoscaling groups and launch templates deployed and visible within your account. However, AWS EKS Managed Node groups provide a better user experience and offer a more "managed service" experience and therefore has precedence over Self Managed Node Groups. That said, there are currently inherent limitations as AWS continues to rollout additional feature support similar to the level of customization you can achieve with Self Managed Node Groups. When reqeusting added feature support for AWS EKS Managed Node groups, please ensure you have verified that the feature(s) are 1) supported by AWS and 2) supported by the Terraform AWS provider before submitting a feature request.
+- AWS EKS Managed Node groups are current the preferred route over Self Managed Node Groups for compute nodes. Both operate very similarly - both are backed by autoscaling groups and launch templates deployed and visible within your account. However, AWS EKS Managed Node groups provide a better user experience and offer a more "managed service" experience and therefore has precedence over Self Managed Node Groups. That said, there are currently inherent limitations as AWS continues to rollout additional feature support similar to the level of customization you can achieve with Self Managed Node Groups. When requesting added feature support for AWS EKS Managed Node groups, please ensure you have verified that the feature(s) are 1) supported by AWS and 2) supported by the Terraform AWS provider before submitting a feature request.
- Due to the plethora of tooling and different manners of configuring your cluster, cluster configuration is intentionally left out of the module in order to simplify the module for a broader user base. Previous module versions provided support for managing the aws-auth configmap via the Kubernetes Terraform provider using the now deprecated aws-iam-authenticator; these are no longer included in the module. This module strictly focuses on the infrastructure resources to provision an EKS cluster as well as any supporting AWS resources - how the internals of the cluster are configured and managed is up to users and is outside the scope of this module. There is an output attribute, `aws_auth_configmap_yaml`, that has been provided that can be useful to help bridge this transition. Please see the various examples provided where this attribute is used to ensure that self managed node groups or external node groups have their IAM roles appropriately mapped to the aws-auth configmap. How users elect to manage the aws-auth configmap is left up to their choosing.
### User Data & Bootstrapping
@@ -186,8 +186,8 @@ There are a multitude of different possible configurations for how module users
In general (tl;dr):
- AWS EKS Managed Node Groups
- - `linux` platform (default) -> user data is pre-pended to the AWS provided bootstrap user data (bash/shell script) when using the AWS EKS provided AMI, otherwise users need to opt in via `enable_bootstrap_user_data` and use the module provided user data template or provide their own user data template to boostrap nodes to join the cluster
- - `bottlerocket` platform -> user data is merged with the AWS provided bootstrap user data (TOML file) when using the AWS EKS provided AMI, otherwise users need to opt in via `enable_bootstrap_user_data` and use the module provided user data template or provide their own user data template to boostrap nodes to join the cluster
+ - `linux` platform (default) -> user data is pre-pended to the AWS provided bootstrap user data (bash/shell script) when using the AWS EKS provided AMI, otherwise users need to opt in via `enable_bootstrap_user_data` and use the module provided user data template or provide their own user data template to bootstrap nodes to join the cluster
+ - `bottlerocket` platform -> user data is merged with the AWS provided bootstrap user data (TOML file) when using the AWS EKS provided AMI, otherwise users need to opt in via `enable_bootstrap_user_data` and use the module provided user data template or provide their own user data template to bootstrap nodes to join the cluster
- Self Managed Node Groups
- `linux` platform (default) -> the user data template (bash/shell script) provided by the module is used as the default; users are able to provide their own user data template
- `bottlerocket` platform -> the user data template (TOML file) provided by the module is used as the default; users are able to provide their own user data template
@@ -207,7 +207,7 @@ Module provided default templates can be found under the [templates directory](h
- Node Group Security Group(s)
- Each node group (EKS Managed Node Group and Self Managed Node Group) by default creates its own security group. By default, this security group does not contain any additional security group rules. It is merely an "empty container" that offers users the ability to opt into any addition inbound our outbound rules as necessary
- - Users also have the option to supply their own, and/or additonal, externally created security group(s) to the node group as well via the `vpc_security_group_ids` variable
+ - Users also have the option to supply their own, and/or additional, externally created security group(s) to the node group as well via the `vpc_security_group_ids` variable
The security groups created by this module are depicted in the image shown below along with their default inbound/outbound rules:
diff --git a/examples/eks_managed_node_group/README.md b/examples/eks_managed_node_group/README.md
index 7d6e863772..b23c13d3f1 100644
--- a/examples/eks_managed_node_group/README.md
+++ b/examples/eks_managed_node_group/README.md
@@ -1,6 +1,6 @@
# EKS Managed Node Group Example
-Configuration in this directory creates an AWS EKS cluster with various EKS Managed Node Groups demonstrating the various methods of configurating/customizing:
+Configuration in this directory creates an AWS EKS cluster with various EKS Managed Node Groups demonstrating the various methods of configuring/customizing:
- A default, "out of the box" EKS managed node group as supplied by AWS EKS
- A default, "out of the box" Bottlerocket EKS managed node group as supplied by AWS EKS
diff --git a/examples/self_managed_node_group/README.md b/examples/self_managed_node_group/README.md
index 63e752fbdb..b9c37150d5 100644
--- a/examples/self_managed_node_group/README.md
+++ b/examples/self_managed_node_group/README.md
@@ -1,6 +1,6 @@
# Self Managed Node Groups Example
-Configuration in this directory creates an AWS EKS cluster with various Self Managed Node Groups (AutoScaling Groups) demonstrating the various methods of configurating/customizing:
+Configuration in this directory creates an AWS EKS cluster with various Self Managed Node Groups (AutoScaling Groups) demonstrating the various methods of configuring/customizing:
- A default, "out of the box" self managed node group as supplied by the `self-managed-node-group` sub-module
- A Bottlerocket self managed node group that demonstrates many of the configuration/customizations offered by the `self-manged-node-group` sub-module for the Bottlerocket OS
diff --git a/modules/_user_data/README.md b/modules/_user_data/README.md
index 991facc6a7..82d827a444 100644
--- a/modules/_user_data/README.md
+++ b/modules/_user_data/README.md
@@ -1,6 +1,6 @@
# Internal User Data Module
-Configuration in this directory renderes the appropriate user data for the given inputs. There are a number of different ways that user data can be utilized and this internal module is designed to aid in making that flexibility possible as well as providing a means for out of bands testing and validation.
+Configuration in this directory renders the appropriate user data for the given inputs. There are a number of different ways that user data can be utilized and this internal module is designed to aid in making that flexibility possible as well as providing a means for out of bands testing and validation.
See the [`examples/user_data/` directory](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/user_data) for various examples of using the module.
@@ -25,7 +25,7 @@ When using an EKS managed node group, users have 2 primary routes for interactin
```
2. If the EKS managed node group does utilize a custom AMI, then per the [AWS documentation](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-custom-ami), users will need to supply the necessary bootstrap configuration via user data to ensure that the node is configured to register with the cluster when launched. There are two routes that users can utilize to facilitate this bootstrapping process:
- - If the AMI used is a derivative of the [AWS EKS Optimized AMI ](https://github.com/awslabs/amazon-eks-ami), users can opt in to using a template provided by the module that provides the minimum necessary configuration to bootstrap the node when launched, with the option to add additional pre and post boostrap user data as well as bootstrap additional args that are supplied to the [AWS EKS boostrap.sh script](https://github.com/awslabs/amazon-eks-ami/blob/master/files/bootstrap.sh)
+ - If the AMI used is a derivative of the [AWS EKS Optimized AMI ](https://github.com/awslabs/amazon-eks-ami), users can opt in to using a template provided by the module that provides the minimum necessary configuration to bootstrap the node when launched, with the option to add additional pre and post bootstrap user data as well as bootstrap additional args that are supplied to the [AWS EKS bootstrap.sh script](https://github.com/awslabs/amazon-eks-ami/blob/master/files/bootstrap.sh)
- Users can use the following variables to facilitate this process:
```hcl
enable_bootstrap_user_data = true # to opt in to using the module supplied bootstrap user data template
@@ -33,8 +33,8 @@ When using an EKS managed node group, users have 2 primary routes for interactin
bootstrap_extra_args = "..."
post_bootstrap_user_data = "..."
```
- - If the AMI is not an AWS EKS Optmized AMI derivative, or if users wish to have more control over the user data that is supplied to the node when launched, users have the ability to supply their own user data template that will be rendered instead of the module supplied template. Note - only the variables that are supplied to the `templatefile()` for the respective platform/OS are available for use in the supplied template, otherwise users will need to pre-render/pre-populate the template before supplying the final template to the module for rendering as user data.
- - Users can use the following variables to faciliate this process:
+ - If the AMI is not an AWS EKS Optimized AMI derivative, or if users wish to have more control over the user data that is supplied to the node when launched, users have the ability to supply their own user data template that will be rendered instead of the module supplied template. Note - only the variables that are supplied to the `templatefile()` for the respective platform/OS are available for use in the supplied template, otherwise users will need to pre-render/pre-populate the template before supplying the final template to the module for rendering as user data.
+ - Users can use the following variables to facilitate this process:
```hcl
user_data_template_path = "./your/user_data.sh" # user supplied bootstrap user data template
pre_bootstrap_user_data = "..."
@@ -42,14 +42,14 @@ When using an EKS managed node group, users have 2 primary routes for interactin
post_bootstrap_user_data = "..."
```
-| ℹ️ When using bottlerocket as the desired platform, since the user data for bottlerocket is TOML, all configurations are merged in the one file supplied as user data. Therefore, `pre_bootstra_user_data` and `post_bootstrap_user_data` are not valid since the bottlerocket OS handles when various settings are applied. If you wish to supply additional configuration settings when using bottlerocket, supply them via the `bootstrap_extra_args` variable. For the linux platform, `bootstrap_extra_args` are settings that will be supplied to the [AWS EKS Optimized AMI boostrap script](https://github.com/awslabs/amazon-eks-ami/blob/master/files/bootstrap.sh#L14) such as kubelet extra args, etc. See the [bottlerocket GitHub repository documentation](https://github.com/bottlerocket-os/bottlerocket#description-of-settings) for more details on what settings can be supplied via the `boostrap_extra_args` variable. |
+| ℹ️ When using bottlerocket as the desired platform, since the user data for bottlerocket is TOML, all configurations are merged in the one file supplied as user data. Therefore, `pre_bootstrap_user_data` and `post_bootstrap_user_data` are not valid since the bottlerocket OS handles when various settings are applied. If you wish to supply additional configuration settings when using bottlerocket, supply them via the `bootstrap_extra_args` variable. For the linux platform, `bootstrap_extra_args` are settings that will be supplied to the [AWS EKS Optimized AMI bootstrap script](https://github.com/awslabs/amazon-eks-ami/blob/master/files/bootstrap.sh#L14) such as kubelet extra args, etc. See the [bottlerocket GitHub repository documentation](https://github.com/bottlerocket-os/bottlerocket#description-of-settings) for more details on what settings can be supplied via the `bootstrap_extra_args` variable. |
| :--- |
### Self Managed Node Group
When using a self managed node group, the options presented to users is very similar to the 2nd option listed above for EKS managed node groups. Since self managed node groups require users to provide the bootstrap user data, there is no concept of appending to user data that AWS provides; users can either elect to use the user data template provided for their platform/OS by the module or provide their own user data template for rendering by the module.
-- If the AMI used is a derivative of the [AWS EKS Optimized AMI ](https://github.com/awslabs/amazon-eks-ami), users can opt in to using a template provided by the module that provides the minimum necessary configuration to bootstrap the node when launched, with the option to add additional pre and post boostrap user data as well as bootstrap additional args that are supplied to the [AWS EKS boostrap.sh script](https://github.com/awslabs/amazon-eks-ami/blob/master/files/bootstrap.sh)
+- If the AMI used is a derivative of the [AWS EKS Optimized AMI ](https://github.com/awslabs/amazon-eks-ami), users can opt in to using a template provided by the module that provides the minimum necessary configuration to bootstrap the node when launched, with the option to add additional pre and post bootstrap user data as well as bootstrap additional args that are supplied to the [AWS EKS bootstrap.sh script](https://github.com/awslabs/amazon-eks-ami/blob/master/files/bootstrap.sh)
- Users can use the following variables to facilitate this process:
```hcl
enable_bootstrap_user_data = true # to opt in to using the module supplied bootstrap user data template
@@ -57,8 +57,8 @@ When using a self managed node group, the options presented to users is very sim
bootstrap_extra_args = "..."
post_bootstrap_user_data = "..."
```
-- If the AMI is not an AWS EKS Optmized AMI derivative, or if users wish to have more control over the user data that is supplied to the node upon launch, users have the ability to supply their own user data template that will be rendered instead of the module supplied template. Note - only the variables that are supplied to the `templatefile()` for the respective platform/OS are available for use in the supplied template, otherwise users will need to pre-render/pre-populate the template before supplying the final template to the module for rendering as user data.
- - Users can use the following variables to faciliate this process:
+- If the AMI is not an AWS EKS Optimized AMI derivative, or if users wish to have more control over the user data that is supplied to the node upon launch, users have the ability to supply their own user data template that will be rendered instead of the module supplied template. Note - only the variables that are supplied to the `templatefile()` for the respective platform/OS are available for use in the supplied template, otherwise users will need to pre-render/pre-populate the template before supplying the final template to the module for rendering as user data.
+ - Users can use the following variables to facilitate this process:
```hcl
user_data_template_path = "./your/user_data.sh" # user supplied bootstrap user data template
pre_bootstrap_user_data = "..."
From eefed02a9dd7caf7b9dbf24a100e13296867288b Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Tue, 7 Dec 2021 16:11:52 -0500
Subject: [PATCH 56/83] chore: add docs on various node group configurations
---
README.md | 158 ++++++++++++++++++++++++
examples/eks_managed_node_group/main.tf | 3 +-
2 files changed, 160 insertions(+), 1 deletion(-)
diff --git a/README.md b/README.md
index e02c4ef835..f03599ffb5 100644
--- a/README.md
+++ b/README.md
@@ -168,6 +168,164 @@ module "eks" {
}
```
+## Node Group Configuration
+
+⚠️ The configurations shown below are referenced from within the root EKS module; there will be slight differences in the default values provided when compared to the underlying sub-modules (`eks-managed-node-group`, `self-managed-node-group`, and `fargate-profile`).
+
+### EKS Managed Node Groups
+
+ℹ️ Only the pertinent attributes are shown for brevity
+
+1. By default, the `eks-managed-node-group` sub-module will use the default configurations provided by AWS EKS Managed Node Groups; EKS MNG will provide its own launch template and utilize the latest AWS EKS Optimized AMI (Linux) for the given Kubernetes version:
+
+```hcl
+ cluster_version = "1.21"
+
+ default = {}
+```
+
+2. AWS EKS Managed Node Group also offers native, default support for Bottlerocket OS by simply specifying the AMI type:
+
+```hcl
+ cluster_version = "1.21"
+
+ bottlerocket_default = {
+ ami_type = "BOTTLEROCKET_x86_64"
+ platform = "bottlerocket"
+ }
+```
+
+3. AWS EKS Managed Node Groups allow you to extend configurations by providing your own launch template and user data that is merged with what the service provides. For example, to provide additional user data before the nodes are bootstrapped as well as supply additional arguments to the bootstrap script:
+
+```hcl
+ cluster_version = "1.21"
+
+ extend_config = {
+ create_launch_template = true
+
+ # This is supplied to the AWS EKS Optimized AMI bootstrap script https://github.com/awslabs/amazon-eks-ami/blob/master/files/bootstrap.sh
+ bootstrap_extra_args = "--container-runtime containerd --kubelet-extra-args '--max-pods=20'"
+
+ # This user data will be injected prior to the user data provided by the AWS EKS Managed Node Group service (contains the actually bootstrap configuration)
+ pre_bootstrap_user_data = <<-EOT
+ export CONTAINER_RUNTIME="containerd"
+ export USE_MAX_PODS=false
+ EOT
+ }
+```
+
+4. The same configurations extension is offered when utilizing Bottlerocket OS AMIs, but the user data is slightly different. Bottlerocket OS uses a TOML user data file and you can provide additional configuration settings via the `bootstrap_extra_args` variable which gets merged into what is provided by the AWS EKS Managed Node Service:
+
+```hcl
+ cluster_version = "1.21"
+
+ bottlerocket_extend_config = {
+ create_launch_template = true
+
+ ami_type = "BOTTLEROCKET_x86_64"
+ platform = "bottlerocket"
+
+ create_launch_template = true
+
+ # this will get added to what AWS provides
+ bootstrap_extra_args = <<-EOT
+ # extra args added
+ [settings.kernel]
+ lockdown = "integrity"
+ EOT
+ }
+```
+
+5. Users can also utilize a custom AMI, but doing so means that AWS EKS Managed Node Group will NOT inject the necessary bootstrap script and configurations into the user data supplied to the launch template. When using a custom AMI, users must also opt in to bootstrapping the nodes via user data and either use the module default user data template or provide your own user data template file:
+
+```hcl
+ cluster_version = "1.21"
+
+ custom_ami = {
+ create_launch_template = true
+
+ ami_id = "ami-0caf35bc73450c396"
+
+ # By default, EKS managed node groups will not append bootstrap script;
+ # this adds it back in using the default template provided by the module
+ # Note: this assumes the AMI provided is an EKS optimized AMI derivative
+ enable_bootstrap_user_data = true
+
+ bootstrap_extra_args = "--container-runtime containerd --kubelet-extra-args '--max-pods=20'"
+
+ pre_bootstrap_user_data = <<-EOT
+ export CONTAINER_RUNTIME="containerd"
+ export USE_MAX_PODS=false
+ EOT
+
+ # Because we have full control over the user data supplied, we can also run additional
+ # scripts/configuration changes after the bootstrap script has been run
+ post_bootstrap_user_data = <<-EOT
+ echo "you are free little kubelet!"
+ EOT
+ }
+```
+
+6. Similarly, for Bottlerocket there is similar support:
+
+```hcl
+ cluster_version = "1.21"
+
+ bottlerocket_custom_ami = {
+ ami_id = "ami-0ff61e0bcfc81dc94"
+ platform = "bottlerocket"
+
+ create_launch_template = true
+
+ # use module user data template to boostrap
+ enable_bootstrap_user_data = true
+ # this will get added to the template
+ bootstrap_extra_args = <<-EOT
+ # extra args added
+ [settings.kernel]
+ lockdown = "integrity"
+
+ [settings.kubernetes.node-labels]
+ "label1" = "foo"
+ "label2" = "bar"
+
+ [settings.kubernetes.node-taints]
+ "dedicated" = "experimental:PreferNoSchedule"
+ "special" = "true:NoSchedule"
+ EOT
+ }
+```
+
+See the [`examples/eks_managed_node_group/ example](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/eks_managed_node_group) for a working example of these configurations.
+
+### Self Managed Node Groups
+
+ℹ️ Only the pertinent attributes are shown for brevity
+
+1. By default, the `self-managed-node-group` sub-module will use the latest AWS EKS Optimized AMI (Linux) for the given Kubernetes version:
+
+```hcl
+ cluster_version = "1.21"
+
+ # This self managed node group will use the latest AWS EKS Optimized AMI for Kubernetes 1.21
+ default = {}
+```
+
+2. To use Bottlerocket, specify the `platform` as `bottlerocket` and supply the Bottlerocket AMI. The module provided user data for Bottlerocket will be used to bootstrap the nodes created:
+
+```hcl
+ cluster_version = "1.21"
+
+ bottle_rocket = {
+ platform = "bottlerocket"
+ ami_id = data.aws_ami.bottlerocket_ami.id
+ }
+```
+
+### Fargate Profiles
+
+Fargate profiles are rather straightforward. Simply supply the necessary information for the desired profile(s). See the [`examples/fargate_profile/ example](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/fargate_profile) for a working example of the various configurations.
+
## Module Design Considerations
### General Notes
diff --git a/examples/eks_managed_node_group/main.tf b/examples/eks_managed_node_group/main.tf
index 66e33350b4..11740af4cb 100644
--- a/examples/eks_managed_node_group/main.tf
+++ b/examples/eks_managed_node_group/main.tf
@@ -124,7 +124,8 @@ module "eks" {
# This will ensure the boostrap user data is used to join the node
# By default, EKS managed node groups will not append bootstrap script;
- # this adds it back in if its an EKS optmized AMI derivative
+ # this adds it back in using the default template provided by the module
+ # Note: this assumes the AMI provided is an EKS optimized AMI derivative
enable_bootstrap_user_data = true
}
From f49b64fa841aaa7a1a3c0773118a6a67985f075d Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Tue, 7 Dec 2021 16:35:29 -0500
Subject: [PATCH 57/83] chore: update docs on various configurations offered
---
README.md | 347 +++++++++++++++++++++++++++++++++++++++++-------------
1 file changed, 266 insertions(+), 81 deletions(-)
diff --git a/README.md b/README.md
index f03599ffb5..8e490e6fd2 100644
--- a/README.md
+++ b/README.md
@@ -179,124 +179,126 @@ module "eks" {
1. By default, the `eks-managed-node-group` sub-module will use the default configurations provided by AWS EKS Managed Node Groups; EKS MNG will provide its own launch template and utilize the latest AWS EKS Optimized AMI (Linux) for the given Kubernetes version:
```hcl
- cluster_version = "1.21"
-
- default = {}
+ eks_managed_node_groups = {
+ default = {}
+ }
```
2. AWS EKS Managed Node Group also offers native, default support for Bottlerocket OS by simply specifying the AMI type:
```hcl
- cluster_version = "1.21"
-
- bottlerocket_default = {
- ami_type = "BOTTLEROCKET_x86_64"
- platform = "bottlerocket"
+ eks_managed_node_groups = {
+ bottlerocket_default = {
+ ami_type = "BOTTLEROCKET_x86_64"
+ platform = "bottlerocket"
+ }
}
```
3. AWS EKS Managed Node Groups allow you to extend configurations by providing your own launch template and user data that is merged with what the service provides. For example, to provide additional user data before the nodes are bootstrapped as well as supply additional arguments to the bootstrap script:
```hcl
- cluster_version = "1.21"
-
- extend_config = {
- create_launch_template = true
+ eks_managed_node_groups = {
+ extend_config = {
+ create_launch_template = true
- # This is supplied to the AWS EKS Optimized AMI bootstrap script https://github.com/awslabs/amazon-eks-ami/blob/master/files/bootstrap.sh
- bootstrap_extra_args = "--container-runtime containerd --kubelet-extra-args '--max-pods=20'"
+ # This is supplied to the AWS EKS Optimized AMI
+ # bootstrap script https://github.com/awslabs/amazon-eks-ami/blob/master/files/bootstrap.sh
+ bootstrap_extra_args = "--container-runtime containerd --kubelet-extra-args '--max-pods=20'"
- # This user data will be injected prior to the user data provided by the AWS EKS Managed Node Group service (contains the actually bootstrap configuration)
- pre_bootstrap_user_data = <<-EOT
- export CONTAINER_RUNTIME="containerd"
- export USE_MAX_PODS=false
- EOT
+ # This user data will be injected prior to the user data provided by the
+ # AWS EKS Managed Node Group service (contains the actually bootstrap configuration)
+ pre_bootstrap_user_data = <<-EOT
+ export CONTAINER_RUNTIME="containerd"
+ export USE_MAX_PODS=false
+ EOT
+ }
}
```
4. The same configurations extension is offered when utilizing Bottlerocket OS AMIs, but the user data is slightly different. Bottlerocket OS uses a TOML user data file and you can provide additional configuration settings via the `bootstrap_extra_args` variable which gets merged into what is provided by the AWS EKS Managed Node Service:
```hcl
- cluster_version = "1.21"
-
- bottlerocket_extend_config = {
- create_launch_template = true
+ eks_managed_node_groups = {
+ bottlerocket_extend_config = {
+ create_launch_template = true
- ami_type = "BOTTLEROCKET_x86_64"
- platform = "bottlerocket"
+ ami_type = "BOTTLEROCKET_x86_64"
+ platform = "bottlerocket"
- create_launch_template = true
+ create_launch_template = true
- # this will get added to what AWS provides
- bootstrap_extra_args = <<-EOT
- # extra args added
- [settings.kernel]
- lockdown = "integrity"
- EOT
+ # this will get added to what AWS provides
+ bootstrap_extra_args = <<-EOT
+ # extra args added
+ [settings.kernel]
+ lockdown = "integrity"
+ EOT
+ }
}
```
5. Users can also utilize a custom AMI, but doing so means that AWS EKS Managed Node Group will NOT inject the necessary bootstrap script and configurations into the user data supplied to the launch template. When using a custom AMI, users must also opt in to bootstrapping the nodes via user data and either use the module default user data template or provide your own user data template file:
```hcl
- cluster_version = "1.21"
-
- custom_ami = {
- create_launch_template = true
+ eks_managed_node_groups = {
+ custom_ami = {
+ create_launch_template = true
- ami_id = "ami-0caf35bc73450c396"
+ ami_id = "ami-0caf35bc73450c396"
- # By default, EKS managed node groups will not append bootstrap script;
- # this adds it back in using the default template provided by the module
- # Note: this assumes the AMI provided is an EKS optimized AMI derivative
- enable_bootstrap_user_data = true
+ # By default, EKS managed node groups will not append bootstrap script;
+ # this adds it back in using the default template provided by the module
+ # Note: this assumes the AMI provided is an EKS optimized AMI derivative
+ enable_bootstrap_user_data = true
- bootstrap_extra_args = "--container-runtime containerd --kubelet-extra-args '--max-pods=20'"
+ bootstrap_extra_args = "--container-runtime containerd --kubelet-extra-args '--max-pods=20'"
- pre_bootstrap_user_data = <<-EOT
- export CONTAINER_RUNTIME="containerd"
- export USE_MAX_PODS=false
- EOT
+ pre_bootstrap_user_data = <<-EOT
+ export CONTAINER_RUNTIME="containerd"
+ export USE_MAX_PODS=false
+ EOT
- # Because we have full control over the user data supplied, we can also run additional
- # scripts/configuration changes after the bootstrap script has been run
- post_bootstrap_user_data = <<-EOT
- echo "you are free little kubelet!"
- EOT
+ # Because we have full control over the user data supplied, we can also run additional
+ # scripts/configuration changes after the bootstrap script has been run
+ post_bootstrap_user_data = <<-EOT
+ echo "you are free little kubelet!"
+ EOT
+ }
}
```
6. Similarly, for Bottlerocket there is similar support:
```hcl
- cluster_version = "1.21"
-
- bottlerocket_custom_ami = {
- ami_id = "ami-0ff61e0bcfc81dc94"
- platform = "bottlerocket"
-
- create_launch_template = true
-
- # use module user data template to boostrap
- enable_bootstrap_user_data = true
- # this will get added to the template
- bootstrap_extra_args = <<-EOT
- # extra args added
- [settings.kernel]
- lockdown = "integrity"
-
- [settings.kubernetes.node-labels]
- "label1" = "foo"
- "label2" = "bar"
-
- [settings.kubernetes.node-taints]
- "dedicated" = "experimental:PreferNoSchedule"
- "special" = "true:NoSchedule"
- EOT
+ eks_managed_node_groups = {
+ bottlerocket_custom_ami = {
+ ami_id = "ami-0ff61e0bcfc81dc94"
+ platform = "bottlerocket"
+
+ create_launch_template = true
+
+ # use module user data template to bootstrap
+ enable_bootstrap_user_data = true
+ # this will get added to the template
+ bootstrap_extra_args = <<-EOT
+ # extra args added
+ [settings.kernel]
+ lockdown = "integrity"
+
+ [settings.kubernetes.node-labels]
+ "label1" = "foo"
+ "label2" = "bar"
+
+ [settings.kubernetes.node-taints]
+ "dedicated" = "experimental:PreferNoSchedule"
+ "special" = "true:NoSchedule"
+ EOT
+ }
}
```
-See the [`examples/eks_managed_node_group/ example](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/eks_managed_node_group) for a working example of these configurations.
+See the [`examples/eks_managed_node_group/` example](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/eks_managed_node_group) for a working example of these configurations.
### Self Managed Node Groups
@@ -308,7 +310,9 @@ See the [`examples/eks_managed_node_group/ example](https://github.com/terraform
cluster_version = "1.21"
# This self managed node group will use the latest AWS EKS Optimized AMI for Kubernetes 1.21
- default = {}
+ self_managed_node_groups = {
+ default = {}
+ }
```
2. To use Bottlerocket, specify the `platform` as `bottlerocket` and supply the Bottlerocket AMI. The module provided user data for Bottlerocket will be used to bootstrap the nodes created:
@@ -316,15 +320,196 @@ See the [`examples/eks_managed_node_group/ example](https://github.com/terraform
```hcl
cluster_version = "1.21"
- bottle_rocket = {
- platform = "bottlerocket"
- ami_id = data.aws_ami.bottlerocket_ami.id
+ self_managed_node_groups = {
+ bottlerocket = {
+ platform = "bottlerocket"
+ ami_id = data.aws_ami.bottlerocket_ami.id
+ }
}
```
### Fargate Profiles
-Fargate profiles are rather straightforward. Simply supply the necessary information for the desired profile(s). See the [`examples/fargate_profile/ example](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/fargate_profile) for a working example of the various configurations.
+Fargate profiles are rather straightforward. Simply supply the necessary information for the desired profile(s). See the [`examples/fargate_profile/` example](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/fargate_profile) for a working example of the various configurations.
+
+### Mixed Node Groups
+
+ℹ️ Only the pertinent attributes are shown for brevity
+
+Users are free to mix and match the different node group types that meet their needs. For example, the following are just an example of the different possibilities:
+- AWS EKS Cluster with one or more AWS EKS Managed Node Groups
+- AWS EKS Cluster with one or more Self Managed Node Groups
+- AWS EKS Cluster with one or more Fargate profiles
+- AWS EKS Cluster with one or more AWS EKS Managed Node Groups, one or more Self Managed Node Groups, one or more Fargate profiles
+
+It is also possible to have the various node groups of each family configured differently, as well as externally defined (outside of the root `eks` module definition) node groups using the provided sub-modules attached to the cluster created - there are no restrictions on the the various different possibilities provided by the module.
+
+```hcl
+ self_managed_node_group_defaults = {
+ vpc_security_group_ids = [aws_security_group.additional.id]
+ iam_role_additional_policies = ["arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"]
+ }
+
+ self_managed_node_groups = {
+ one = {
+ name = "spot-1"
+
+ public_ip = true
+ max_size = 5
+ desired_size = 2
+
+ use_mixed_instances_policy = true
+ mixed_instances_policy = {
+ instances_distribution = {
+ on_demand_base_capacity = 0
+ on_demand_percentage_above_base_capacity = 10
+ spot_allocation_strategy = "capacity-optimized"
+ }
+
+ override = [
+ {
+ instance_type = "m5.large"
+ weighted_capacity = "1"
+ },
+ {
+ instance_type = "m6i.large"
+ weighted_capacity = "2"
+ },
+ ]
+ }
+
+ pre_bootstrap_user_data = <<-EOT
+ echo "foo"
+ export FOO=bar
+ EOT
+
+ bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"
+
+ post_bootstrap_user_data = <<-EOT
+ cd /tmp
+ sudo yum install -y https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_amd64/amazon-ssm-agent.rpm
+ sudo systemctl enable amazon-ssm-agent
+ sudo systemctl start amazon-ssm-agent
+ EOT
+ }
+ }
+
+ # EKS Managed Node Group(s)
+ eks_managed_node_group_defaults = {
+ ami_type = "AL2_x86_64"
+ disk_size = 50
+ instance_types = ["m6i.large", "m5.large", "m5n.large", "m5zn.large"]
+ vpc_security_group_ids = [aws_security_group.additional.id]
+ create_launch_template = true
+ }
+
+ eks_managed_node_groups = {
+ blue = {}
+ green = {
+ min_size = 1
+ max_size = 10
+ desired_size = 1
+
+ instance_types = ["t3.large"]
+ capacity_type = "SPOT"
+ labels = {
+ Environment = "test"
+ GithubRepo = "terraform-aws-eks"
+ GithubOrg = "terraform-aws-modules"
+ }
+
+ taints = {
+ dedicated = {
+ key = "dedicated"
+ value = "gpuGroup"
+ effect = "NO_SCHEDULE"
+ }
+ }
+
+ update_config = {
+ max_unavailable_percentage = 50 # or set `max_unavailable`
+ }
+
+ tags = {
+ ExtraTag = "example"
+ }
+ }
+ }
+
+ # Fargate Profile(s)
+ fargate_profiles = {
+ default = {
+ name = "default"
+ selectors = [
+ {
+ namespace = "kube-system"
+ labels = {
+ k8s-app = "kube-dns"
+ }
+ },
+ {
+ namespace = "default"
+ }
+ ]
+
+ tags = {
+ Owner = "test"
+ }
+
+ timeouts = {
+ create = "20m"
+ delete = "20m"
+ }
+ }
+ }
+```
+
+See the [`examples/complete/` example](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/complete) for a working example of these configurations.
+
+### Default configurations
+
+Each node group type (EKS managed node group, self managed node group, Fargate profile) provides a default configuration setting that allows users to provide their own default configurations instead of the modules default configurations. This allows users to set a common set of defaults for their node groups and still maintain the ability to override these settings within the specific node group definition. The order of precedence for each node group type roughly follows (from highest precedence, to least):
+- Node group individual configuration
+ - Node group family default configuration
+ - Module default configuration
+
+These are provided via the following variables for the respective node group family:
+- `eks_managed_node_group_defaults`
+- `self_managed_node_group_defaults`
+- `fargate_profile_defaults`
+
+For example, the following creates 4 AWS EKS Managed Node Groups:
+
+```hcl
+ eks_managed_node_group_defaults = {
+ ami_type = "AL2_x86_64"
+ disk_size = 50
+ instance_types = ["m6i.large", "m5.large", "m5n.large", "m5zn.large"]
+ create_launch_template = true
+ }
+
+ eks_managed_node_groups = {
+ # Uses defaults provided by module with the default settings above overriding the module defaults
+ default = {}
+
+ # This further overrides the instance types used
+ compute = {
+ instance_types = ["c5.large", "c6i.large", "c6d.large"]
+ }
+
+ # This further overrides the instance types and disk size used
+ persistent = {
+ disk_size = 1024
+ instance_types = ["r5.xlarge", "r6i.xlarge", "r5b.xlarge"]
+ }
+
+ # This overrides the OS used
+ bottlerocket = {
+ ami_type = "BOTTLEROCKET_x86_64"
+ platform = "bottlerocket"
+ }
+ }
+```
## Module Design Considerations
From ee3c44bfc0a917d01d1fb3a6ac3bda0789544b02 Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Wed, 8 Dec 2021 09:51:39 -0500
Subject: [PATCH 58/83] chore: update examples after final validation
---
examples/complete/main.tf | 32 ++++----------
examples/irsa_autoscale_refresh/main.tf | 53 +++++++++++++++++++++++-
examples/self_managed_node_group/main.tf | 1 -
node_groups.tf | 2 +-
4 files changed, 61 insertions(+), 27 deletions(-)
diff --git a/examples/complete/main.tf b/examples/complete/main.tf
index a46c57c5b8..cb68d22531 100644
--- a/examples/complete/main.tf
+++ b/examples/complete/main.tf
@@ -53,31 +53,15 @@ module "eks" {
}
self_managed_node_groups = {
- one = {
- name = "spot-1"
-
- public_ip = true
- max_size = 5
- desired_size = 2
-
- use_mixed_instances_policy = true
- mixed_instances_policy = {
- instances_distribution = {
- on_demand_base_capacity = 0
- on_demand_percentage_above_base_capacity = 10
- spot_allocation_strategy = "capacity-optimized"
+ spot = {
+ create_launch_template = true
+ launch_template_name = "spot"
+
+ instance_market_options = {
+ market_type = "spot"
+ spot_options = {
+ block_duration_minutes = 60
}
-
- override = [
- {
- instance_type = "m5.large"
- weighted_capacity = "1"
- },
- {
- instance_type = "m6i.large"
- weighted_capacity = "2"
- },
- ]
}
pre_bootstrap_user_data = <<-EOT
diff --git a/examples/irsa_autoscale_refresh/main.tf b/examples/irsa_autoscale_refresh/main.tf
index 4f9c747dc1..f344fc1295 100644
--- a/examples/irsa_autoscale_refresh/main.tf
+++ b/examples/irsa_autoscale_refresh/main.tf
@@ -60,7 +60,58 @@ module "eks" {
propogate_tags = [{
key = "aws-node-termination-handler/managed"
- value = ""
+ value = true
+ propagate_at_launch = true
+ }]
+ }
+
+ mixed_instance = {
+ create_launch_template = true
+ launch_template_name = "mixed-instance"
+
+ use_mixed_instances_policy = true
+ mixed_instances_policy = {
+ instances_distribution = {
+ on_demand_base_capacity = 0
+ on_demand_percentage_above_base_capacity = 10
+ spot_allocation_strategy = "capacity-optimized"
+ }
+
+ override = [
+ {
+ instance_type = "m5.large"
+ weighted_capacity = "1"
+ },
+ {
+ instance_type = "m6i.large"
+ weighted_capacity = "2"
+ },
+ ]
+ }
+
+ propogate_tags = [{
+ key = "aws-node-termination-handler/managed"
+ value = true
+ propagate_at_launch = true
+ }]
+ }
+
+ spot = {
+ create_launch_template = true
+ launch_template_name = "spot"
+
+ instance_market_options = {
+ market_type = "spot"
+ spot_options = {
+ block_duration_minutes = 60
+ }
+ }
+
+ bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"
+
+ propogate_tags = [{
+ key = "aws-node-termination-handler/managed"
+ value = true
propagate_at_launch = true
}]
}
diff --git a/examples/self_managed_node_group/main.tf b/examples/self_managed_node_group/main.tf
index 7646d24411..51e23de725 100644
--- a/examples/self_managed_node_group/main.tf
+++ b/examples/self_managed_node_group/main.tf
@@ -125,7 +125,6 @@ module "eks" {
launch_template_name = "self-managed-ex"
launch_template_use_name_prefix = true
description = "Self managed node group example launch template"
- launch_template_default_version = true
ebs_optimized = true
vpc_security_group_ids = [aws_security_group.additional.id]
diff --git a/node_groups.tf b/node_groups.tf
index 12a716c3c1..bcfbb12aa1 100644
--- a/node_groups.tf
+++ b/node_groups.tf
@@ -286,7 +286,7 @@ module "self_managed_node_group" {
# Autoscaling Group
name = try(each.value.name, each.key)
- use_name_prefix = try(each.value.use_name_prefix, var.self_managed_node_group_defaults.use_name_prefix, false)
+ use_name_prefix = try(each.value.use_name_prefix, var.self_managed_node_group_defaults.use_name_prefix, true)
launch_template_name = try(each.value.launch_template_name, each.key)
launch_template_version = try(each.value.launch_template_version, var.self_managed_node_group_defaults.launch_template_version, null)
From 4cafd01b07f0e86e1f8b8da76bef333d5094ed83 Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Wed, 8 Dec 2021 15:53:51 -0500
Subject: [PATCH 59/83] chore: update upgrade document
---
README.md | 5 +-
UPGRADE-18.0.md | 533 +++++++++++++++++--
examples/complete/main.tf | 4 +-
examples/irsa_autoscale_refresh/charts.tf | 6 +-
examples/irsa_autoscale_refresh/main.tf | 4 +-
examples/self_managed_node_group/main.tf | 21 +-
main.tf | 19 +-
modules/eks-managed-node-group/README.md | 8 +-
modules/eks-managed-node-group/variables.tf | 8 +-
modules/self-managed-node-group/README.md | 2 +-
modules/self-managed-node-group/main.tf | 2 +-
modules/self-managed-node-group/variables.tf | 2 +-
node_groups.tf | 2 +-
variables.tf | 4 +-
14 files changed, 524 insertions(+), 96 deletions(-)
diff --git a/README.md b/README.md
index 8e490e6fd2..3748f47674 100644
--- a/README.md
+++ b/README.md
@@ -706,7 +706,6 @@ Full contributing [guidelines are covered here](https://github.com/terraform-aws
| [aws_security_group.node](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
| [aws_security_group_rule.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
| [aws_security_group_rule.node](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_iam_policy_document.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
| [aws_iam_policy_document.assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
| [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source |
| [tls_certificate.this](https://registry.terraform.io/providers/hashicorp/tls/latest/docs/data-sources/certificate) | data source |
@@ -718,7 +717,6 @@ Full contributing [guidelines are covered here](https://github.com/terraform-aws
| [cloudwatch\_log\_group\_kms\_key\_id](#input\_cloudwatch\_log\_group\_kms\_key\_id) | If a KMS Key ARN is set, this key will be used to encrypt the corresponding log group. Please be sure that the KMS Key has an appropriate key policy (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/encrypt-log-data-kms.html) | `string` | `null` | no |
| [cloudwatch\_log\_group\_retention\_in\_days](#input\_cloudwatch\_log\_group\_retention\_in\_days) | Number of days to retain log events. Default retention - 90 days | `number` | `90` | no |
| [cluster\_additional\_security\_group\_ids](#input\_cluster\_additional\_security\_group\_ids) | List of additional, externally created security group IDs to attach to the cluster control plane | `list(string)` | `[]` | no |
-| [cluster\_additional\_security\_group\_rules](#input\_cluster\_additional\_security\_group\_rules) | List of additional security group rules to add to the cluster security group created | `map(any)` | `{}` | no |
| [cluster\_addons](#input\_cluster\_addons) | Map of cluster addon configurations to enable for the cluster. Addon name can be the map keys or set with `name` | `any` | `{}` | no |
| [cluster\_enabled\_log\_types](#input\_cluster\_enabled\_log\_types) | A list of the desired control plane logs to enable. For more information, see Amazon EKS Control Plane Logging documentation (https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html) | `list(string)` |
[ "audit", "api", "authenticator" ]
| no |
| [cluster\_encryption\_config](#input\_cluster\_encryption\_config) | Configuration block with encryption configuration for the cluster |
| `[]` | no |
@@ -727,6 +725,7 @@ Full contributing [guidelines are covered here](https://github.com/terraform-aws
| [cluster\_endpoint\_public\_access\_cidrs](#input\_cluster\_endpoint\_public\_access\_cidrs) | List of CIDR blocks which can access the Amazon EKS public API server endpoint | `list(string)` |
[ "0.0.0.0/0" ]
| no |
| [cluster\_identity\_providers](#input\_cluster\_identity\_providers) | Map of cluster identity provider configurations to enable for the cluster. Note - this is different/separate from IRSA | `any` | `{}` | no |
| [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster | `string` | `""` | no |
+| [cluster\_security\_group\_additional\_rules](#input\_cluster\_security\_group\_additional\_rules) | List of additional security group rules to add to the cluster security group created | `map(any)` | `{}` | no |
| [cluster\_security\_group\_description](#input\_cluster\_security\_group\_description) | Description of the cluster security group created | `string` | `"EKS cluster security group"` | no |
| [cluster\_security\_group\_id](#input\_cluster\_security\_group\_id) | Existing security group ID to be attached to the cluster. Required if `create_cluster_security_group` = `false` | `string` | `""` | no |
| [cluster\_security\_group\_name](#input\_cluster\_security\_group\_name) | Name to use on cluster security group created | `string` | `null` | no |
@@ -754,7 +753,7 @@ Full contributing [guidelines are covered here](https://github.com/terraform-aws
| [iam\_role\_permissions\_boundary](#input\_iam\_role\_permissions\_boundary) | ARN of the policy that is used to set the permissions boundary for the IAM role | `string` | `null` | no |
| [iam\_role\_tags](#input\_iam\_role\_tags) | A map of additional tags to add to the IAM role created | `map(string)` | `{}` | no |
| [iam\_role\_use\_name\_prefix](#input\_iam\_role\_use\_name\_prefix) | Determines whether the IAM role name (`iam_role_name`) is used as a prefix | `string` | `true` | no |
-| [node\_additional\_security\_group\_rules](#input\_node\_additional\_security\_group\_rules) | List of additional security group rules to add to the node security group created | `map(any)` | `{}` | no |
+| [node\_security\_group\_additional\_rules](#input\_node\_security\_group\_additional\_rules) | List of additional security group rules to add to the node security group created | `map(any)` | `{}` | no |
| [node\_security\_group\_description](#input\_node\_security\_group\_description) | Description of the node security group created | `string` | `"EKS node shared security group"` | no |
| [node\_security\_group\_id](#input\_node\_security\_group\_id) | ID of an existing security group to attach to the node groups created | `string` | `""` | no |
| [node\_security\_group\_name](#input\_node\_security\_group\_name) | Name to use on node security group created | `string` | `null` | no |
diff --git a/UPGRADE-18.0.md b/UPGRADE-18.0.md
index 96a0bc5907..52a850c7c6 100644
--- a/UPGRADE-18.0.md
+++ b/UPGRADE-18.0.md
@@ -1,63 +1,417 @@
# Upgrade from v17.x to v18.x
-If you have any questions regarding this upgrade process, please consult the `examples` directory:
-
-- TODO
-
-If you find a bug, please open an issue with supporting configuration to reproduce.
-
-## Changes
-
-- Launch configuration support has been removed and only launch template is supported going forward. AWS is no longer adding new features back into launch configuration and their docs state [`We strongly recommend that you do not use launch configurations. They do not provide full functionality for Amazon EC2 Auto Scaling or Amazon EC2. We provide information about launch configurations for customers who have not yet migrated from launch configurations to launch templates.`](https://docs.aws.amazon.com/autoscaling/ec2/userguide/LaunchConfiguration.html)
-- One IAM role/profile is created as a "default" role (if users opt in to create the role/profile). Otherwise users need to supply the instance profile name/arn to use for the various groups
-- Maps, maps, maps, maps...
+Please consult the `examples` directory for reference example configurations. If you find a bug, please open an issue with supporting configuration to reproduce.
## List of backwards incompatible changes
-- TODO
+- Launch configuration support has been removed and only launch template is supported going forward. AWS is no longer adding new features back into launch configuration and their docs state [`We strongly recommend that you do not use launch configurations. They do not provide full functionality for Amazon EC2 Auto Scaling or Amazon EC2. We provide information about launch configurations for customers who have not yet migrated from launch configurations to launch templates.`](https://docs.aws.amazon.com/autoscaling/ec2/userguide/LaunchConfiguration.html)
+- Support for managing aws-auth configmap has been removed. This change also removes the dependency on the Kubernetes Terraform provider, the local dependency on aws-iam-authenticator for users, as well as the reliance on the forked http provider to wait and poll on cluster creation. To aid users in this change, an output variable `aws_auth_configmap_yaml` has been provided which renders the aws-auth configmap necessary to support at least the IAM roles used by the module (additional mapRoles/mapUsers definitions to be provided by users)
+- Support for managing kubeconfig and its associated `local_file` resources have been removed; users are able to use the awscli provided `aws eks update-kubeconfig --name ` to update their local kubeconfig as necessary
+- The terminology used in the module has been modified to reflect that used by the [AWS documentation](https://docs.aws.amazon.com/eks/latest/userguide/eks-compute.html).
+ - [AWS EKS Managed Node Group](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html), `eks_managed_node_groups`, was previously referred to as simply node group, `node_groups`
+ - [Self Managed Node Group Group](https://docs.aws.amazon.com/eks/latest/userguide/worker.html), `self_managed_node_groups`, was previously referred to as worker group, `worker_groups`
+ - [AWS Fargate Profile](https://docs.aws.amazon.com/eks/latest/userguide/fargate.html), `fargate_profiles`, remains unchanged in terms of naming and terminology
+- The three different node group types supported by AWS and the module have been refactored into standalone sub-modules that are both used by the root `eks` module as well as available for indiviudal, standalone consumption if desired.
+ - The previous `node_groups` sub-module is now named `eks-managed-node-group` and provisions a single AWS EKS Managed Node Group per sub-module definition (previous version utilized `for_each` to create 0 or more node groups)
+ - Additional changes for the `eks-managed-node-group` sub-module over the previous `node_groups` module include:
+ - Variable name changes defined in section `Variable and output changes` below
+ - Support for nearly full control of the IAM role created, or provide the ARN of an existing IAM role, has been added
+ - Support for nearly full control of the security group created, or provide the ID of an existing security group, has been added
+ - User data has been revamped and all user data logic moved to the `_user_data` internal sub-module; the local `userdata.sh.tpl` has been removed entirely
+ - The previous `fargate` sub-module is now named `fargate-profile` and provisions a single AWS EKS Fargate Profile per sub-module definition (previous version utilized `for_eadch` to create 0 or more profiles)
+ - Additional changes for the `fargate-profile` sub-module over the previous `fargate` module include:
+ - Variable name changes defined in section `Variable and output changes` below
+ - Support for nearly full control of the IAM role created, or provide the ARN of an existing IAM role, has been added
+ - Similar to the `eks_managed_node_group_defaults` and `self_managed_node_group_defaults`, a `fargate_profile_defaults` has been provided to allow users to control the default configurations for the Fargate profiles created
+ - A sub-module for `self-managed-node-group` has been created and provisions a single self managed node group (autoscaling group) per sub-module definition
+ - Additional changes for the `self-managed-node-group` sub-module over the previous `node_groups` variable include:
+ - The underlying autoscaling group and launch template have been updated to more closely match that of the [`terraform-aws-autoscaling`](https://github.com/terraform-aws-modules/terraform-aws-autoscaling) module and the features it offers
+ - The previous iteration used a count over a list of node group definitons which was prone to disruptive updates; this is now replaced with a map/for_each to align with that of the EKS managed node group and Fargate profile behaviors/style
+- The user data configuration supported across the module has been completely revamped. A new `_user_data` internal sub-module has been created to consolidate all user data configuration in one location which provides better support for testability (via the [`examples/user_data`](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/user_data) example). The new sub-module supports nearly all possible combinations including the ability to allow users to provide their own user data template which will be rendered by the module. See the `examples/user_data` example project for the full plethora of example configuration possibilities and more details on the logic of the design can be found in the [`modules/_user_data`](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/modules/_user_data_) directory.
+
+## Additional changes
+
+### Added
+
+- Support for AWS EKS Addons has been added
+- Support for AWS EKS Cluster Identity Provider Configuration has been added
+- AWS Terraform provider minimum required version has been updated to 3.64 to support the changes made and additional resoruces supported
+- An example `user_data` project has been added to aid in demonstrating, testing, and validating the various methods of configuring user data with the `_user_data` sub-module as well as the root `eks` module
+- Template for rendering the aws-auth configmap output - `aws_auth_cm.tpl`
+- Template for Bottlerocket OS user data bootstrapping - `bottlerocket_user_data.tpl`
+
+### Modified
+
+- The previous `fargate` example has been renamed to `fargate_profile`
+- The previous `irsa` and `instance_refresh` examples have been merged into one example `irsa_autoscale_refresh`
+- The previous `managed_node_groups` example has been renamed to `self_managed_node_group`
+- The previously hardcoded EKS OIDC root CA thumbprint value and variable has been replaced with a `tls_certificate` data source that refers to the cluster OIDC issuer url. Thumbprint values should remain unchanged however
+- Individual cluster security group resources have been replaced with a single security group resource that takes a map of rules as input. The default ingress/egress rules have had their scope reduced in order to provide the bare minimum of access to permit successful cluster creation and allow users to opt in to any additional network access as needed for a better security posture. This means the `0.0.0.0/0` egress rule has been removed, instead TCP/443 and TCP/10250 egress rules to the node group security group are used instead
+- The Linux/bash user data template has been updated to include the bareminimum necessary for bootstrapping AWS EKS Optimized AMI derivative nodes with provisions for providing additional user data and configurations; was named `userdata.sh.tpl` and is now named `linux_user_data.tpl`
+- The Windows user data template has been renamed from `userdata_windows.tpl` to `windows_user_data.tpl`
+
+### Removed
+
+- Miscellaneous documents on how to configure Kubernetes cluster internals have been removed. Docuemntation related to how to configure the AWS EKS Cluster and its supported infrastructure resources provided by the module are supported, while cluster internal configuration is out of scope for this project
+- The previous `bottlerocket` example has been removed in favor of demonstrating the use and configuration of Bottlerocket nodes via the respective `eks_managed_node_group` and `self_managed_node_group` examples
+- The previous `launch_template` and `launch_templates_with_managed_node_groups` examples have been removed; only launch templates are now supported (default) and launch configuration support has been removed
+- The previous `secrets_encryption` example has been removed; the functionality has been demonstrated in several of the new examples rendering this standlone example redundant
+- The additional, custom IAM role policy for the cluster role has been removed. The permissions are either now provided in the attached managed AWS permission policies used or are no longer required
+- The `kubeconfig.tpl` template; kubeconfig management is no longer supported under this module
+- The HTTP Terraform provider (forked copy) dependency has been removed
### Variable and output changes
1. Removed variables:
- - `var.cluster_create_timeout`, `var.cluster_update_timeout`, and `var.cluster_delete_timeout` have been replaced with `var.cluster_timeouts`
+ - `cluster_create_timeout`, `cluster_update_timeout`, and `cluster_delete_timeout` have been replaced with `cluster_timeouts`
+ - `kubeconfig_name`
+ - `kubeconfig_output_path`
+ - `kubeconfig_file_permission`
+ - `kubeconfig_api_version`
+ - `kubeconfig_aws_authenticator_command`
+ - `kubeconfig_aws_authenticator_command_args`
+ - `kubeconfig_aws_authenticator_additional_args`
+ - `kubeconfig_aws_authenticator_env_variables`
+ - `write_kubeconfig`
+ - `default_platform`
+ - `manage_aws_auth`
+ - `aws_auth_additional_labels`
+ - `map_accounts`
+ - `map_roles`
+ - `map_users`
+ - `fargate_subnets`
+ - `worker_groups_launch_template`
+ - `worker_security_group_id`
+ - `worker_ami_name_filter`
+ - `worker_ami_name_filter_windows`
+ - `worker_ami_owner_id`
+ - `worker_ami_owner_id_windows`
+ - `worker_additional_security_group_ids`
+ - `worker_sg_ingress_from_port`
+ - `workers_additional_policies`
+ - `worker_create_security_group`
+ - `worker_create_initial_lifecycle_hooks`
+ - `worker_create_cluster_primary_security_group_rules`
+ - `cluster_create_endpoint_private_access_sg_rule`
+ - `cluster_endpoint_private_access_cidrs`
+ - `cluster_endpoint_private_access_sg`
+ - `manage_worker_iam_resources`
+ - `workers_role_name`
+ - `attach_worker_cni_policy`
+ - `eks_oidc_root_ca_thumbprint`
+ - `create_fargate_pod_execution_role`
+ - `fargate_pod_execution_role_name`
+ - `cluster_egress_cidrs`
+ - `workers_egress_cidrs`
+ - `wait_for_cluster_timeout`
+ - EKS Managed Node Group sub-module (was `node_groups`)
+ - `default_iam_role_arn`
+ - `workers_group_defaults`
+ - `worker_security_group_id`
+ - `node_groups_defaults`
+ - `node_groups`
+ - `ebs_optimized_not_supported`
+ - Fargate profile sub-module (was `fargate`)
+ - `create_eks` and `create_fargate_pod_execution_role` have been replaced with simply `create`
2. Renamed variables:
- - `create_eks` -> `create`
- - `subnets` -> `subnet_ids`
- - `cluster_create_security_group` -> `create_cluster_security_group`
+ - `create_eks` -> `create`
+ - `subnets` -> `subnet_ids`
+ - `cluster_create_security_group` -> `create_cluster_security_group`
+ - `cluster_log_retention_in_days` -> `cloudwatch_log_group_retention_in_days`
+ - `cluster_log_kms_key_id` -> `cloudwatch_log_group_kms_key_id`
+ - `manage_cluster_iam_resources` -> `create_iam_role`
+ - `cluster_iam_role_name` -> `iam_role_name`
+ - `permissions_boundary` -> `iam_role_permissions_boundary`
+ - `iam_path` -> `iam_role_path`
+ - `pre_userdata` -> `pre_bootstrap_user_data`
+ - `additional_userdata` -> `post_bootstrap_user_data`
+ - `worker_groups` -> `self_managed_node_groups`
+ - `workers_group_defaults` -> `self_managed_node_group_defaults`
+ - `node_groups` -> `eks_managed_node_groups`
+ - `node_groups_defaults` -> `eks_managed_node_group_defaults`
+ - EKS Managed Node Group sub-module (was `node_groups`)
+ - `create_eks` -> `create`
+ - `worker_additional_security_group_ids` -> `vpc_security_group_ids`
+ - Fargate profile sub-module
+ - `fargate_pod_execution_role_name` -> `name`
+ - `create_fargate_pod_execution_role` -> `create_iam_role`
+ - `subnets` -> `subnet_ids`
+ - `iam_path` -> `iam_role_path`
+ - `permissions_boundary` -> `iam_role_permissions_boundary`
3. Added variables:
- - TODO
+ - `cluster_additional_security_group_ids` added to allow users to add additional security groups to the cluster as needed
+ - `cluster_security_group_name`
+ - `cluster_security_group_use_name_prefix` added to allow users to use either the name as specified or default to using the name specified as a prefix
+ - `cluster_security_group_description`
+ - `cluster_security_group_additional_rules`
+ - `cluster_security_group_tags`
+ - `create_cloudwatch_log_group` added in place of the logic that checked if any cluster log types were enabled to allow users to opt in as they see fit
+ - `create_node_security_group` added to create single security group that connects node groups and cluster in central location
+ - `node_security_group_id`
+ - `node_security_group_name`
+ - `node_security_group_use_name_prefix`
+ - `node_security_group_description`
+ - `node_security_group_additional_rules`
+ - `node_security_group_tags`
+ - `iam_role_arn`
+ - `iam_role_use_name_prefix`
+ - `iam_role_description`
+ - `iam_role_additional_policies`
+ - `iam_role_tags`
+ - `cluster_addons`
+ - `cluster_identity_providers`
+ - `fargate_profile_defaults`
+ - EKS Managed Node Group sub-module (was `node_groups`)
+ - `platform`
+ - `enable_bootstrap_user_data`
+ - `pre_bootstrap_user_data`
+ - `post_bootstrap_user_data`
+ - `bootstrap_extra_args`
+ - `user_data_template_path`
+ - `create_launch_template`
+ - `launch_template_name`
+ - `launch_template_use_name_prefix`
+ - `description`
+ - `ebs_optimized`
+ - `ami_id`
+ - `key_name`
+ - `launch_template_default_version`
+ - `update_launch_template_default_version`
+ - `disable_api_termination`
+ - `kernel_id`
+ - `ram_disk_id`
+ - `block_device_mappings`
+ - `capacity_reservation_specification`
+ - `cpu_options`
+ - `credit_specification`
+ - `elastic_gpu_specifications`
+ - `elastic_inference_accelerator`
+ - `enclave_options`
+ - `instance_market_options`
+ - `license_specifications`
+ - `metadata_options`
+ - `enable_monitoring`
+ - `network_interfaces`
+ - `placement`
+ - `min_size`
+ - `max_size`
+ - `desired_size`
+ - `use_name_prefix`
+ - `ami_type`
+ - `ami_release_version`
+ - `capacity_type`
+ - `disk_size`
+ - `force_update_version`
+ - `instance_types`
+ - `labels`
+ - `cluster_version`
+ - `launch_template_version`
+ - `remote_access`
+ - `taints`
+ - `update_config`
+ - `timeouts`
+ - `create_security_group`
+ - `security_group_name`
+ - `security_group_use_name_prefix`
+ - `security_group_description`
+ - `vpc_id`
+ - `security_group_rules`
+ - `cluster_security_group_id`
+ - `security_group_tags`
+ - `create_iam_role`
+ - `iam_role_arn`
+ - `iam_role_name`
+ - `iam_role_use_name_prefix`
+ - `iam_role_path`
+ - `iam_role_description`
+ - `iam_role_permissions_boundary`
+ - `iam_role_additional_policies`
+ - `iam_role_tags`
+ - Fargate profile sub-module (was `fargate`)
+ - `iam_role_arn` (for if `create_iam_role` is `false` to bring your own externally created role)
+ - `iam_role_name`
+ - `iam_role_use_name_prefix`
+ - `iam_role_description`
+ - `iam_role_additional_policies`
+ - `iam_role_tags`
+ - `selectors`
+ - `timeouts`
4. Removed outputs:
- - TODO
+ - `cluster_version`
+ - `kubeconfig`
+ - `kubeconfig_filename`
+ - `workers_asg_arns`
+ - `workers_asg_names`
+ - `workers_user_data`
+ - `workers_default_ami_id`
+ - `workers_default_ami_id_windows`
+ - `workers_launch_template_ids`
+ - `workers_launch_template_arns`
+ - `workers_launch_template_latest_versions`
+ - `worker_security_group_id`
+ - `worker_iam_instance_profile_arns`
+ - `worker_iam_instance_profile_names`
+ - `worker_iam_role_name`
+ - `worker_iam_role_arn`
+ - `fargate_profile_ids`
+ - `fargate_profile_arns`
+ - `fargate_iam_role_name`
+ - `fargate_iam_role_arn`
+ - `node_groups`
+ - `security_group_rule_cluster_https_worker_ingress`
+ - EKS Managed Node Group sub-module (was `node_groups`)
+ - `node_groups`
+ - `aws_auth_roles`
+ - Fargate profile sub-module (was `fargate`)
+ - `aws_auth_roles`
5. Renamed outputs:
- - TODO
+ - `config_map_aws_auth` -> `aws_auth_configmap_yaml`
+ - Fargate profile sub-module (was `fargate`)
+ - `fargate_profile_ids` -> `fargate_profile_id`
+ - `fargate_profile_arns` -> `fargate_profile_arn`
6. Added outputs:
- - TODO
+ - `cluster_platform_version`
+ - `cluster_status`
+ - `cluster_security_group_arn`
+ - `cluster_security_group_id`
+ - `node_security_group_arn`
+ - `node_security_group_id`
+ - `cluster_iam_role_unique_id`
+ - `cluster_addons`
+ - `cluster_identity_providers`
+ - `fargate_profiles`
+ - `eks_managed_node_groups`
+ - `self_managed_node_groups`
+ - EKS Managed Node Group sub-module (was `node_groups`)
+ - `launch_template_id`
+ - `launch_template_arn`
+ - `launch_template_latest_version`
+ - `node_group_arn`
+ - `node_group_id`
+ - `node_group_resources`
+ - `node_group_status`
+ - `security_group_arn`
+ - `security_group_id`
+ - `iam_role_name`
+ - `iam_role_arn`
+ - `iam_role_unique_id`
+ - Fargate profile sub-module (was `fargate`)
+ - `iam_role_unique_id`
+ - `fargate_profile_status`
## Upgrade Migrations
### Before 17.x Example
```hcl
-module "cluster_before" {
+module "eks" {
source = "terraform-aws-modules/eks/aws"
version = "~> 17.0"
- # TODO
+ cluster_name = local.name
+ cluster_version = local.cluster_version
+ cluster_endpoint_private_access = true
+ cluster_endpoint_public_access = true
+
+ vpc_id = module.vpc.vpc_id
+ subnets = module.vpc.private_subnets
+
+ # Managed Node Groups
+ node_groups_defaults = {
+ ami_type = "AL2_x86_64"
+ disk_size = 50
+ }
+
+ node_groups = {
+ node_group = {
+ min_capacity = 1
+ max_capacity = 10
+ desired_capacity = 1
+
+ instance_types = ["t3.large"]
+ capacity_type = "SPOT"
+
+ update_config = {
+ max_unavailable_percentage = 50
+ }
+
+ k8s_labels = {
+ Environment = "test"
+ GithubRepo = "terraform-aws-eks"
+ GithubOrg = "terraform-aws-modules"
+ }
+
+ taints = [
+ {
+ key = "dedicated"
+ value = "gpuGroup"
+ effect = "NO_SCHEDULE"
+ }
+ ]
+
+ additional_tags = {
+ ExtraTag = "example"
+ }
+ }
+ }
+
+ # Worker groups
+ worker_additional_security_group_ids = [aws_security_group.additional.id]
+
+ worker_groups_launch_template = [
+ {
+ name = "worker-group"
+ override_instance_types = ["m5.large", "m5a.large", "m5d.large", "m5ad.large"]
+ spot_instance_pools = 4
+ asg_max_size = 5
+ asg_desired_capacity = 2
+ kubelet_extra_args = "--node-labels=node.kubernetes.io/lifecycle=spot"
+ public_ip = true
+ },
+ ]
+
+ # Fargate
+ fargate_profiles = {
+ default = {
+ name = "default"
+ selectors = [
+ {
+ namespace = "kube-system"
+ labels = {
+ k8s-app = "kube-dns"
+ }
+ },
+ {
+ namespace = "default"
+ }
+ ]
+
+ tags = {
+ Owner = "test"
+ }
+
+ timeouts = {
+ create = "20m"
+ delete = "20m"
+ }
+ }
+ }
tags = {
- Environment = "dev"
- Terraform = "true"
+ Environment = "test"
+ GithubRepo = "terraform-aws-eks"
+ GithubOrg = "terraform-aws-modules"
}
}
```
@@ -69,23 +423,128 @@ module "cluster_after" {
source = "terraform-aws-modules/eks/aws"
version = "~> 18.0"
- # TODO
+ cluster_name = local.name
+ cluster_version = local.cluster_version
+ cluster_endpoint_private_access = true
+ cluster_endpoint_public_access = true
- tags = {
- Environment = "dev"
- Terraform = "true"
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_subnets
+
+ eks_managed_node_group_defaults = {
+ ami_type = "AL2_x86_64"
+ disk_size = 50
}
-}
-```
-### State Changes
+ eks_managed_node_groups = {
+ node_group = {
+ min_size = 1
+ max_size = 10
+ desired_size = 1
+
+ instance_types = ["t3.large"]
+ capacity_type = "SPOT"
+
+ update_config = {
+ max_unavailable_percentage = 50
+ }
+
+ labels = {
+ Environment = "test"
+ GithubRepo = "terraform-aws-eks"
+ GithubOrg = "terraform-aws-modules"
+ }
+
+ taints = [
+ {
+ key = "dedicated"
+ value = "gpuGroup"
+ effect = "NO_SCHEDULE"
+ }
+ ]
+
+ tags = {
+ ExtraTag = "example"
+ }
+ }
+ }
-To migrate from the `v17.x` version to `v18.x` version example shown above, the following state move commands can be performed to maintain the current resources without modification:
+ self_managed_node_group_defaults = {
+ vpc_security_group_ids = [aws_security_group.additional.id]
+ }
-```bash
-terraform state mv 'from' 'to'
-```
+ self_managed_node_groups = {
+ worker_group = {
+ name = "worker-group"
+
+ min_size = 1
+ max_size = 5
+ desired_size = 2
+ instance_type = "m4.large"
+
+ bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"
+
+ block_device_mappings = {
+ xvda = {
+ device_name = "/dev/xvda"
+ ebs = {
+ delete_on_termination = true
+ encrypted = false
+ volume_size = 100
+ volume_type = "gp2"
+ }
+
+ }
+ }
+
+ use_mixed_instances_policy = true
+ mixed_instances_policy = {
+ instances_distribution = {
+ spot_instance_pools = 4
+ }
+
+ override = [
+ { instance_type = "m5.large" },
+ { instance_type = "m5a.large" },
+ { instance_type = "m5d.large" },
+ { instance_type = "m5ad.large" },
+ ]
+ }
+ }
+ }
-### Configuration Changes
+ # Fargate
+ fargate_profiles = {
+ default = {
+ name = "default"
+
+ selectors = [
+ {
+ namespace = "kube-system"
+ labels = {
+ k8s-app = "kube-dns"
+ }
+ },
+ {
+ namespace = "default"
+ }
+ ]
+
+ tags = {
+ Owner = "test"
+ }
+
+ timeouts = {
+ create = "20m"
+ delete = "20m"
+ }
+ }
+ }
-TODO
+ tags = {
+ Environment = "test"
+ GithubRepo = "terraform-aws-eks"
+ GithubOrg = "terraform-aws-modules"
+ }
+}
+```
diff --git a/examples/complete/main.tf b/examples/complete/main.tf
index cb68d22531..98691b2e5d 100644
--- a/examples/complete/main.tf
+++ b/examples/complete/main.tf
@@ -57,11 +57,9 @@ module "eks" {
create_launch_template = true
launch_template_name = "spot"
+ instance_type = "m5.large"
instance_market_options = {
market_type = "spot"
- spot_options = {
- block_duration_minutes = 60
- }
}
pre_bootstrap_user_data = <<-EOT
diff --git a/examples/irsa_autoscale_refresh/charts.tf b/examples/irsa_autoscale_refresh/charts.tf
index 6a98c1a9bf..1ef1195b9e 100644
--- a/examples/irsa_autoscale_refresh/charts.tf
+++ b/examples/irsa_autoscale_refresh/charts.tf
@@ -52,7 +52,8 @@ resource "helm_release" "cluster_autoscaler" {
}
depends_on = [
- module.eks.cluster_id
+ module.eks.cluster_id,
+ null_resource.apply,
]
}
@@ -166,7 +167,8 @@ resource "helm_release" "aws_node_termination_handler" {
}
depends_on = [
- module.eks.cluster_id
+ module.eks.cluster_id,
+ null_resource.apply,
]
}
diff --git a/examples/irsa_autoscale_refresh/main.tf b/examples/irsa_autoscale_refresh/main.tf
index f344fc1295..822186e94b 100644
--- a/examples/irsa_autoscale_refresh/main.tf
+++ b/examples/irsa_autoscale_refresh/main.tf
@@ -100,11 +100,9 @@ module "eks" {
create_launch_template = true
launch_template_name = "spot"
+ instance_type = "m5.large"
instance_market_options = {
market_type = "spot"
- spot_options = {
- block_duration_minutes = 60
- }
}
bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"
diff --git a/examples/self_managed_node_group/main.tf b/examples/self_managed_node_group/main.tf
index 51e23de725..9ce16c674d 100644
--- a/examples/self_managed_node_group/main.tf
+++ b/examples/self_managed_node_group/main.tf
@@ -108,28 +108,17 @@ module "eks" {
echo "you are free little kubelet!"
EOT
- capacity_type = "SPOT"
- disk_size = 256
- force_update_version = true
- instance_type = "m6i.large"
- labels = {
- GithubRepo = "terraform-aws-eks"
- GithubOrg = "terraform-aws-modules"
- }
-
- update_config = {
- max_unavailable_percentage = 50 # or set `max_unavailable`
- }
+ disk_size = 256
+ instance_type = "m6i.large"
create_launch_template = true
launch_template_name = "self-managed-ex"
launch_template_use_name_prefix = true
description = "Self managed node group example launch template"
- ebs_optimized = true
- vpc_security_group_ids = [aws_security_group.additional.id]
- disable_api_termination = false
- enable_monitoring = true
+ ebs_optimized = true
+ vpc_security_group_ids = [aws_security_group.additional.id]
+ enable_monitoring = true
block_device_mappings = {
xvda = {
diff --git a/main.tf b/main.tf
index 7ea9c5a479..286e23b5a4 100644
--- a/main.tf
+++ b/main.tf
@@ -119,7 +119,7 @@ resource "aws_security_group" "cluster" {
}
resource "aws_security_group_rule" "cluster" {
- for_each = local.create_cluster_sg ? merge(local.cluster_security_group_rules, var.cluster_additional_security_group_rules) : {}
+ for_each = local.create_cluster_sg ? merge(local.cluster_security_group_rules, var.cluster_security_group_additional_rules) : {}
# Required
security_group_id = aws_security_group.cluster[0].id
@@ -199,26 +199,9 @@ resource "aws_iam_role" "this" {
permissions_boundary = var.iam_role_permissions_boundary
force_detach_policies = true
- inline_policy {
- name = local.iam_role_name
- policy = data.aws_iam_policy_document.additional[0].json
- }
-
tags = merge(var.tags, var.iam_role_tags)
}
-data "aws_iam_policy_document" "additional" {
- count = var.create && var.create_iam_role ? 1 : 0
-
- # Deny permissions to logs:CreateLogGroup since its created through Terraform
- # in this module, and it causes issues during cleanup/deletion
- statement {
- effect = "Deny"
- actions = ["logs:CreateLogGroup"]
- resources = ["*"]
- }
-}
-
# Policies attached ref https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_node_group
resource "aws_iam_role_policy_attachment" "this" {
for_each = var.create && var.create_iam_role ? toset(compact(distinct(concat([
diff --git a/modules/eks-managed-node-group/README.md b/modules/eks-managed-node-group/README.md
index b9de533aa5..db67c6a06e 100644
--- a/modules/eks-managed-node-group/README.md
+++ b/modules/eks-managed-node-group/README.md
@@ -100,9 +100,9 @@ module "eks_managed_node_group" {
| [create\_security\_group](#input\_create\_security\_group) | Determines whether to create a security group | `bool` | `true` | no |
| [credit\_specification](#input\_credit\_specification) | Customize the credit specification of the instance | `map(string)` | `null` | no |
| [description](#input\_description) | Description of the launch template | `string` | `null` | no |
-| [desired\_size](#input\_desired\_size) | Desired number of worker nodes | `number` | `1` | no |
+| [desired\_size](#input\_desired\_size) | Desired number of instances/nodes | `number` | `1` | no |
| [disable\_api\_termination](#input\_disable\_api\_termination) | If true, enables EC2 instance termination protection | `bool` | `null` | no |
-| [disk\_size](#input\_disk\_size) | Disk size in GiB for worker nodes. Defaults to `20` | `number` | `null` | no |
+| [disk\_size](#input\_disk\_size) | Disk size in GiB for nodes. Defaults to `20` | `number` | `null` | no |
| [ebs\_optimized](#input\_ebs\_optimized) | If true, the launched EC2 instance(s) will be EBS-optimized | `bool` | `null` | no |
| [elastic\_gpu\_specifications](#input\_elastic\_gpu\_specifications) | The elastic GPU to attach to the instance | `map(string)` | `null` | no |
| [elastic\_inference\_accelerator](#input\_elastic\_inference\_accelerator) | Configuration block containing an Elastic Inference Accelerator to attach to the instance | `map(string)` | `null` | no |
@@ -128,9 +128,9 @@ module "eks_managed_node_group" {
| [launch\_template\_use\_name\_prefix](#input\_launch\_template\_use\_name\_prefix) | Determines whether to use `launch_template_name` as is or create a unique name beginning with the `launch_template_name` as the prefix | `bool` | `true` | no |
| [launch\_template\_version](#input\_launch\_template\_version) | Launch template version number. The default is `$Default` | `string` | `null` | no |
| [license\_specifications](#input\_license\_specifications) | A list of license specifications to associate with | `map(string)` | `null` | no |
-| [max\_size](#input\_max\_size) | Maximum number of worker nodes | `number` | `3` | no |
+| [max\_size](#input\_max\_size) | Maximum number of instances/nodes | `number` | `3` | no |
| [metadata\_options](#input\_metadata\_options) | Customize the metadata options for the instance | `map(string)` |
| no |
-| [min\_size](#input\_min\_size) | Minimum number of worker nodes | `number` | `0` | no |
+| [min\_size](#input\_min\_size) | Minimum number of instances/nodes | `number` | `0` | no |
| [name](#input\_name) | Name of the EKS managed node group | `string` | `""` | no |
| [network\_interfaces](#input\_network\_interfaces) | Customize network interfaces to be attached at instance boot time | `list(any)` | `[]` | no |
| [placement](#input\_placement) | The placement of the instance | `map(string)` | `null` | no |
diff --git a/modules/eks-managed-node-group/variables.tf b/modules/eks-managed-node-group/variables.tf
index 47ca3c2991..843cccd4d7 100644
--- a/modules/eks-managed-node-group/variables.tf
+++ b/modules/eks-managed-node-group/variables.tf
@@ -243,19 +243,19 @@ variable "subnet_ids" {
}
variable "min_size" {
- description = "Minimum number of worker nodes"
+ description = "Minimum number of instances/nodes"
type = number
default = 0
}
variable "max_size" {
- description = "Maximum number of worker nodes"
+ description = "Maximum number of instances/nodes"
type = number
default = 3
}
variable "desired_size" {
- description = "Desired number of worker nodes"
+ description = "Desired number of instances/nodes"
type = number
default = 1
}
@@ -291,7 +291,7 @@ variable "capacity_type" {
}
variable "disk_size" {
- description = "Disk size in GiB for worker nodes. Defaults to `20`"
+ description = "Disk size in GiB for nodes. Defaults to `20`"
type = number
default = null
}
diff --git a/modules/self-managed-node-group/README.md b/modules/self-managed-node-group/README.md
index 2eca9cd9c3..247ab16482 100644
--- a/modules/self-managed-node-group/README.md
+++ b/modules/self-managed-node-group/README.md
@@ -148,7 +148,7 @@ module "self_managed_node_group" {
| [protect\_from\_scale\_in](#input\_protect\_from\_scale\_in) | Allows setting instance protection. The autoscaling group will not select instances with this setting for termination during scale in events. | `bool` | `false` | no |
| [ram\_disk\_id](#input\_ram\_disk\_id) | The ID of the ram disk | `string` | `null` | no |
| [schedules](#input\_schedules) | Map of autoscaling group schedule to create | `map(any)` | `{}` | no |
-| [security\_group\_description](#input\_security\_group\_description) | Description for the security group created | `string` | `"EKS worker security group"` | no |
+| [security\_group\_description](#input\_security\_group\_description) | Description for the security group created | `string` | `"EKS self-managed node group security group"` | no |
| [security\_group\_name](#input\_security\_group\_name) | Name to use on security group created | `string` | `null` | no |
| [security\_group\_rules](#input\_security\_group\_rules) | List of security group rules to add to the security group created | `any` | `{}` | no |
| [security\_group\_tags](#input\_security\_group\_tags) | A map of additional tags to add to the security group created | `map(string)` | `{}` | no |
diff --git a/modules/self-managed-node-group/main.tf b/modules/self-managed-node-group/main.tf
index beda37df0c..bfc4312172 100644
--- a/modules/self-managed-node-group/main.tf
+++ b/modules/self-managed-node-group/main.tf
@@ -235,7 +235,7 @@ resource "aws_launch_template" "this" {
}
# Prevent premature access of security group roles and policies by pods that
- # require permissions on create/destroy that depend on workers.
+ # require permissions on create/destroy that depend on nodes
depends_on = [
aws_security_group_rule.this,
aws_iam_role_policy_attachment.this,
diff --git a/modules/self-managed-node-group/variables.tf b/modules/self-managed-node-group/variables.tf
index 7e34fff950..53721cf63a 100644
--- a/modules/self-managed-node-group/variables.tf
+++ b/modules/self-managed-node-group/variables.tf
@@ -487,7 +487,7 @@ variable "security_group_use_name_prefix" {
variable "security_group_description" {
description = "Description for the security group created"
type = string
- default = "EKS worker security group"
+ default = "EKS self-managed node group security group"
}
variable "vpc_id" {
diff --git a/node_groups.tf b/node_groups.tf
index bcfbb12aa1..988942e393 100644
--- a/node_groups.tf
+++ b/node_groups.tf
@@ -121,7 +121,7 @@ resource "aws_security_group" "node" {
}
resource "aws_security_group_rule" "node" {
- for_each = { for k, v in merge(local.node_security_group_rules, var.node_additional_security_group_rules) : k => v if local.create_node_sg }
+ for_each = { for k, v in merge(local.node_security_group_rules, var.node_security_group_additional_rules) : k => v if local.create_node_sg }
# Required
security_group_id = aws_security_group.node[0].id
diff --git a/variables.tf b/variables.tf
index 43b26aea90..12ff69ee33 100644
--- a/variables.tf
+++ b/variables.tf
@@ -151,7 +151,7 @@ variable "cluster_security_group_description" {
default = "EKS cluster security group"
}
-variable "cluster_additional_security_group_rules" {
+variable "cluster_security_group_additional_rules" {
description = "List of additional security group rules to add to the cluster security group created"
type = map(any)
default = {}
@@ -197,7 +197,7 @@ variable "node_security_group_description" {
default = "EKS node shared security group"
}
-variable "node_additional_security_group_rules" {
+variable "node_security_group_additional_rules" {
description = "List of additional security group rules to add to the node security group created"
type = map(any)
default = {}
From 7652eaf6d9c6631b4d551049034faa797c95968c Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Wed, 8 Dec 2021 16:03:22 -0500
Subject: [PATCH 60/83] chore: fix spelling mistakes
---
UPGRADE-18.0.md | 14 +++++++-------
modules/_user_data/README.md | 2 +-
2 files changed, 8 insertions(+), 8 deletions(-)
diff --git a/UPGRADE-18.0.md b/UPGRADE-18.0.md
index 52a850c7c6..0c6d56dde6 100644
--- a/UPGRADE-18.0.md
+++ b/UPGRADE-18.0.md
@@ -11,14 +11,14 @@ Please consult the `examples` directory for reference example configurations. If
- [AWS EKS Managed Node Group](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html), `eks_managed_node_groups`, was previously referred to as simply node group, `node_groups`
- [Self Managed Node Group Group](https://docs.aws.amazon.com/eks/latest/userguide/worker.html), `self_managed_node_groups`, was previously referred to as worker group, `worker_groups`
- [AWS Fargate Profile](https://docs.aws.amazon.com/eks/latest/userguide/fargate.html), `fargate_profiles`, remains unchanged in terms of naming and terminology
-- The three different node group types supported by AWS and the module have been refactored into standalone sub-modules that are both used by the root `eks` module as well as available for indiviudal, standalone consumption if desired.
+- The three different node group types supported by AWS and the module have been refactored into standalone sub-modules that are both used by the root `eks` module as well as available for individual, standalone consumption if desired.
- The previous `node_groups` sub-module is now named `eks-managed-node-group` and provisions a single AWS EKS Managed Node Group per sub-module definition (previous version utilized `for_each` to create 0 or more node groups)
- Additional changes for the `eks-managed-node-group` sub-module over the previous `node_groups` module include:
- Variable name changes defined in section `Variable and output changes` below
- Support for nearly full control of the IAM role created, or provide the ARN of an existing IAM role, has been added
- Support for nearly full control of the security group created, or provide the ID of an existing security group, has been added
- User data has been revamped and all user data logic moved to the `_user_data` internal sub-module; the local `userdata.sh.tpl` has been removed entirely
- - The previous `fargate` sub-module is now named `fargate-profile` and provisions a single AWS EKS Fargate Profile per sub-module definition (previous version utilized `for_eadch` to create 0 or more profiles)
+ - The previous `fargate` sub-module is now named `fargate-profile` and provisions a single AWS EKS Fargate Profile per sub-module definition (previous version utilized `for_each` to create 0 or more profiles)
- Additional changes for the `fargate-profile` sub-module over the previous `fargate` module include:
- Variable name changes defined in section `Variable and output changes` below
- Support for nearly full control of the IAM role created, or provide the ARN of an existing IAM role, has been added
@@ -26,7 +26,7 @@ Please consult the `examples` directory for reference example configurations. If
- A sub-module for `self-managed-node-group` has been created and provisions a single self managed node group (autoscaling group) per sub-module definition
- Additional changes for the `self-managed-node-group` sub-module over the previous `node_groups` variable include:
- The underlying autoscaling group and launch template have been updated to more closely match that of the [`terraform-aws-autoscaling`](https://github.com/terraform-aws-modules/terraform-aws-autoscaling) module and the features it offers
- - The previous iteration used a count over a list of node group definitons which was prone to disruptive updates; this is now replaced with a map/for_each to align with that of the EKS managed node group and Fargate profile behaviors/style
+ - The previous iteration used a count over a list of node group definitions which was prone to disruptive updates; this is now replaced with a map/for_each to align with that of the EKS managed node group and Fargate profile behaviors/style
- The user data configuration supported across the module has been completely revamped. A new `_user_data` internal sub-module has been created to consolidate all user data configuration in one location which provides better support for testability (via the [`examples/user_data`](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/user_data) example). The new sub-module supports nearly all possible combinations including the ability to allow users to provide their own user data template which will be rendered by the module. See the `examples/user_data` example project for the full plethora of example configuration possibilities and more details on the logic of the design can be found in the [`modules/_user_data`](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/modules/_user_data_) directory.
## Additional changes
@@ -35,7 +35,7 @@ Please consult the `examples` directory for reference example configurations. If
- Support for AWS EKS Addons has been added
- Support for AWS EKS Cluster Identity Provider Configuration has been added
-- AWS Terraform provider minimum required version has been updated to 3.64 to support the changes made and additional resoruces supported
+- AWS Terraform provider minimum required version has been updated to 3.64 to support the changes made and additional resources supported
- An example `user_data` project has been added to aid in demonstrating, testing, and validating the various methods of configuring user data with the `_user_data` sub-module as well as the root `eks` module
- Template for rendering the aws-auth configmap output - `aws_auth_cm.tpl`
- Template for Bottlerocket OS user data bootstrapping - `bottlerocket_user_data.tpl`
@@ -47,15 +47,15 @@ Please consult the `examples` directory for reference example configurations. If
- The previous `managed_node_groups` example has been renamed to `self_managed_node_group`
- The previously hardcoded EKS OIDC root CA thumbprint value and variable has been replaced with a `tls_certificate` data source that refers to the cluster OIDC issuer url. Thumbprint values should remain unchanged however
- Individual cluster security group resources have been replaced with a single security group resource that takes a map of rules as input. The default ingress/egress rules have had their scope reduced in order to provide the bare minimum of access to permit successful cluster creation and allow users to opt in to any additional network access as needed for a better security posture. This means the `0.0.0.0/0` egress rule has been removed, instead TCP/443 and TCP/10250 egress rules to the node group security group are used instead
-- The Linux/bash user data template has been updated to include the bareminimum necessary for bootstrapping AWS EKS Optimized AMI derivative nodes with provisions for providing additional user data and configurations; was named `userdata.sh.tpl` and is now named `linux_user_data.tpl`
+- The Linux/bash user data template has been updated to include the bare minimum necessary for bootstrapping AWS EKS Optimized AMI derivative nodes with provisions for providing additional user data and configurations; was named `userdata.sh.tpl` and is now named `linux_user_data.tpl`
- The Windows user data template has been renamed from `userdata_windows.tpl` to `windows_user_data.tpl`
### Removed
-- Miscellaneous documents on how to configure Kubernetes cluster internals have been removed. Docuemntation related to how to configure the AWS EKS Cluster and its supported infrastructure resources provided by the module are supported, while cluster internal configuration is out of scope for this project
+- Miscellaneous documents on how to configure Kubernetes cluster internals have been removed. Documentation related to how to configure the AWS EKS Cluster and its supported infrastructure resources provided by the module are supported, while cluster internal configuration is out of scope for this project
- The previous `bottlerocket` example has been removed in favor of demonstrating the use and configuration of Bottlerocket nodes via the respective `eks_managed_node_group` and `self_managed_node_group` examples
- The previous `launch_template` and `launch_templates_with_managed_node_groups` examples have been removed; only launch templates are now supported (default) and launch configuration support has been removed
-- The previous `secrets_encryption` example has been removed; the functionality has been demonstrated in several of the new examples rendering this standlone example redundant
+- The previous `secrets_encryption` example has been removed; the functionality has been demonstrated in several of the new examples rendering this standalone example redundant
- The additional, custom IAM role policy for the cluster role has been removed. The permissions are either now provided in the attached managed AWS permission policies used or are no longer required
- The `kubeconfig.tpl` template; kubeconfig management is no longer supported under this module
- The HTTP Terraform provider (forked copy) dependency has been removed
diff --git a/modules/_user_data/README.md b/modules/_user_data/README.md
index 82d827a444..b24ec94026 100644
--- a/modules/_user_data/README.md
+++ b/modules/_user_data/README.md
@@ -15,7 +15,7 @@ At a high level, AWS EKS users have two methods for launching nodes within this
When using an EKS managed node group, users have 2 primary routes for interacting with the bootstrap user data:
-1. If the EKS managed node group does **NOT** utilize a custom AMI, then users can elect to supply additional user data that is prepended before the EKS managed node group bootstrap user data. You can read more about this process from the [AWS supplied documentation](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-user-data)
+1. If the EKS managed node group does **NOT** utilize a custom AMI, then users can elect to supply additional user data that is pre-pended before the EKS managed node group bootstrap user data. You can read more about this process from the [AWS supplied documentation](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-user-data)
- Users can use the following variables to facilitate this process:
From 293ba7d31eaa6485a14e19391cbf13576ef4a524 Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Wed, 8 Dec 2021 16:05:08 -0500
Subject: [PATCH 61/83] chore: remove `todo` blocks before finishing
---
README.md | 2 --
modules/_user_data/README.md | 2 --
2 files changed, 4 deletions(-)
diff --git a/README.md b/README.md
index 3748f47674..85143018d3 100644
--- a/README.md
+++ b/README.md
@@ -556,8 +556,6 @@ The security groups created by this module are depicted in the image shown below
-
-
## Notes
diff --git a/modules/_user_data/README.md b/modules/_user_data/README.md
index b24ec94026..995071c155 100644
--- a/modules/_user_data/README.md
+++ b/modules/_user_data/README.md
@@ -72,8 +72,6 @@ The rough flow of logic that is encapsulated within the `_user_data` internal mo
-
-
From 80092aae964929b9ba15439f22b21bfcf7fc47f8 Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Thu, 9 Dec 2021 09:41:21 -0500
Subject: [PATCH 62/83] fix: add cluster ca data and endpoint to windows user
data scripts
---
examples/user_data/templates/windows_custom.tpl | 2 +-
templates/windows_user_data.tpl | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/examples/user_data/templates/windows_custom.tpl b/examples/user_data/templates/windows_custom.tpl
index ab64984135..3c1ca7014a 100644
--- a/examples/user_data/templates/windows_custom.tpl
+++ b/examples/user_data/templates/windows_custom.tpl
@@ -4,7 +4,7 @@ ${pre_bootstrap_user_data ~}
[string]$EKSBinDir = "$env:ProgramFiles\Amazon\EKS"
[string]$EKSBootstrapScriptName = 'Start-EKSBootstrap.ps1'
[string]$EKSBootstrapScriptFile = "$EKSBinDir\$EKSBootstrapScriptName"
-& $EKSBootstrapScriptFile -EKSClusterName ${cluster_name} ${bootstrap_extra_args} 3>&1 4>&1 5>&1 6>&1
+& $EKSBootstrapScriptFile -EKSClusterName ${cluster_name} -APIServerEndpoint ${cluster_endpoint} -Base64ClusterCA ${cluster_auth_base64} ${bootstrap_extra_args} 3>&1 4>&1 5>&1 6>&1
$LastError = if ($?) { 0 } else { $Error[0].Exception.HResult }
${post_bootstrap_user_data ~}
diff --git a/templates/windows_user_data.tpl b/templates/windows_user_data.tpl
index 47b2feca70..5000850604 100644
--- a/templates/windows_user_data.tpl
+++ b/templates/windows_user_data.tpl
@@ -3,7 +3,7 @@ ${pre_bootstrap_user_data ~}
[string]$EKSBinDir = "$env:ProgramFiles\Amazon\EKS"
[string]$EKSBootstrapScriptName = 'Start-EKSBootstrap.ps1'
[string]$EKSBootstrapScriptFile = "$EKSBinDir\$EKSBootstrapScriptName"
-& $EKSBootstrapScriptFile -EKSClusterName ${cluster_name} ${bootstrap_extra_args} 3>&1 4>&1 5>&1 6>&1
+& $EKSBootstrapScriptFile -EKSClusterName ${cluster_name} -APIServerEndpoint ${cluster_endpoint} -Base64ClusterCA ${cluster_auth_base64} ${bootstrap_extra_args} 3>&1 4>&1 5>&1 6>&1
$LastError = if ($?) { 0 } else { $Error[0].Exception.HResult }
${post_bootstrap_user_data ~}
From 9c4c52dc28682511cdb1383a0fa0cc45c7dfb905 Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Fri, 10 Dec 2021 13:26:42 -0500
Subject: [PATCH 63/83] Update README.md
Co-authored-by: John Gardiner Myers
---
README.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/README.md b/README.md
index 85143018d3..d93dac8c1b 100644
--- a/README.md
+++ b/README.md
@@ -342,7 +342,7 @@ Users are free to mix and match the different node group types that meet their n
- AWS EKS Cluster with one or more Fargate profiles
- AWS EKS Cluster with one or more AWS EKS Managed Node Groups, one or more Self Managed Node Groups, one or more Fargate profiles
-It is also possible to have the various node groups of each family configured differently, as well as externally defined (outside of the root `eks` module definition) node groups using the provided sub-modules attached to the cluster created - there are no restrictions on the the various different possibilities provided by the module.
+It is also possible to configure the various node groups of each family differently. Node groups may also be defined outside of the root `eks` module definition by using the provided sub-modules. There are no restrictions on the the various different possibilities provided by the module.
```hcl
self_managed_node_group_defaults = {
From 2d3ae43dbae4bf86433d89c2d979345381dd7035 Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Fri, 10 Dec 2021 13:26:53 -0500
Subject: [PATCH 64/83] Update README.md
Co-authored-by: John Gardiner Myers
---
README.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/README.md b/README.md
index d93dac8c1b..ac182400ff 100644
--- a/README.md
+++ b/README.md
@@ -468,7 +468,7 @@ See the [`examples/complete/` example](https://github.com/terraform-aws-modules/
### Default configurations
-Each node group type (EKS managed node group, self managed node group, Fargate profile) provides a default configuration setting that allows users to provide their own default configurations instead of the modules default configurations. This allows users to set a common set of defaults for their node groups and still maintain the ability to override these settings within the specific node group definition. The order of precedence for each node group type roughly follows (from highest precedence, to least):
+Each node group type (EKS managed node group, self managed node group, or Fargate profile) provides a default configuration setting that allows users to provide their own default configuration instead of the module's default configuration. This allows users to set a common set of defaults for their node groups and still maintain the ability to override these settings within the specific node group definition. The order of precedence for each node group type roughly follows (from highest to least precedence):
- Node group individual configuration
- Node group family default configuration
- Module default configuration
From d70b7645ac6fdf7ceefe7d4439b0864071780c86 Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Fri, 10 Dec 2021 13:27:01 -0500
Subject: [PATCH 65/83] Update README.md
Co-authored-by: John Gardiner Myers
---
README.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/README.md b/README.md
index ac182400ff..06b71c8128 100644
--- a/README.md
+++ b/README.md
@@ -517,7 +517,7 @@ For example, the following creates 4 AWS EKS Managed Node Groups:
While the module is designed to be flexible and support as many use cases and configurations as possible, there is a limit to what first class support can be provided without over-burdening the complexity of the module. Below are a list of general notes on the design intent captured by this module which hopefully explains some of the decisions that are, or will be made, in terms of what is added/supported natively by the module:
-- Despite the addition of Windows Subsystem for Linux (WSL for short), containerization technology is very much a suite of Linux constructs and therefore Linux is the primary OS supported by this module. In addition, due to the first class support provided by AWS, Bottlerocket OS and Fargate Profiles are also very much fully supported by this module. This module does not make any attempt to NOT support Windows, as in preventing the usage of Windows based nodes, however it is up to users to put in additional effort in order to operate Windows based nodes when using the module. User can refer to the [AWS documentation](https://docs.aws.amazon.com/eks/latest/userguide/windows-support.html) for further details. What does this mean:
+- Despite the addition of Windows Subsystem for Linux (WSL for short), containerization technology is very much a suite of Linux constructs and therefore Linux is the primary OS supported by this module. In addition, due to the first class support provided by AWS, Bottlerocket OS and Fargate Profiles are also very much fully supported by this module. This module does not make any attempt to NOT support Windows, as in preventing the usage of Windows based nodes, however it is up to users to put in additional effort in order to operate Windows based nodes when using the module. User can refer to the [AWS documentation](https://docs.aws.amazon.com/eks/latest/userguide/windows-support.html) for further details. What this means is:
- AWS EKS Managed Node Groups default to `linux` as the `platform`, but `bottlerocket` is also supported by AWS (`windows` is not supported by AWS EKS Managed Node groups)
- AWS Self Managed Node Groups also default to `linux` and the default AMI used is the latest AMI for the selected Kubernetes version. If you wish to use a different OS or AMI then you will need to opt in to the necessary configurations to ensure the correct AMI is used in conjunction with the necessary user data to ensure the nodes are launched and joined to your cluster successfully.
- AWS EKS Managed Node groups are current the preferred route over Self Managed Node Groups for compute nodes. Both operate very similarly - both are backed by autoscaling groups and launch templates deployed and visible within your account. However, AWS EKS Managed Node groups provide a better user experience and offer a more "managed service" experience and therefore has precedence over Self Managed Node Groups. That said, there are currently inherent limitations as AWS continues to rollout additional feature support similar to the level of customization you can achieve with Self Managed Node Groups. When requesting added feature support for AWS EKS Managed Node groups, please ensure you have verified that the feature(s) are 1) supported by AWS and 2) supported by the Terraform AWS provider before submitting a feature request.
From c42025f9b1bc32742e1a3822ada965485fecb3ea Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Fri, 10 Dec 2021 13:27:11 -0500
Subject: [PATCH 66/83] Update README.md
Co-authored-by: John Gardiner Myers
---
README.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/README.md b/README.md
index 06b71c8128..697b21aa98 100644
--- a/README.md
+++ b/README.md
@@ -520,7 +520,7 @@ While the module is designed to be flexible and support as many use cases and co
- Despite the addition of Windows Subsystem for Linux (WSL for short), containerization technology is very much a suite of Linux constructs and therefore Linux is the primary OS supported by this module. In addition, due to the first class support provided by AWS, Bottlerocket OS and Fargate Profiles are also very much fully supported by this module. This module does not make any attempt to NOT support Windows, as in preventing the usage of Windows based nodes, however it is up to users to put in additional effort in order to operate Windows based nodes when using the module. User can refer to the [AWS documentation](https://docs.aws.amazon.com/eks/latest/userguide/windows-support.html) for further details. What this means is:
- AWS EKS Managed Node Groups default to `linux` as the `platform`, but `bottlerocket` is also supported by AWS (`windows` is not supported by AWS EKS Managed Node groups)
- AWS Self Managed Node Groups also default to `linux` and the default AMI used is the latest AMI for the selected Kubernetes version. If you wish to use a different OS or AMI then you will need to opt in to the necessary configurations to ensure the correct AMI is used in conjunction with the necessary user data to ensure the nodes are launched and joined to your cluster successfully.
-- AWS EKS Managed Node groups are current the preferred route over Self Managed Node Groups for compute nodes. Both operate very similarly - both are backed by autoscaling groups and launch templates deployed and visible within your account. However, AWS EKS Managed Node groups provide a better user experience and offer a more "managed service" experience and therefore has precedence over Self Managed Node Groups. That said, there are currently inherent limitations as AWS continues to rollout additional feature support similar to the level of customization you can achieve with Self Managed Node Groups. When requesting added feature support for AWS EKS Managed Node groups, please ensure you have verified that the feature(s) are 1) supported by AWS and 2) supported by the Terraform AWS provider before submitting a feature request.
+- AWS EKS Managed Node groups are currently the preferred route over Self Managed Node Groups for compute nodes. Both operate very similarly - both are backed by autoscaling groups and launch templates deployed and visible within your account. However, AWS EKS Managed Node groups provide a better user experience and offer a more "managed service" experience and therefore has precedence over Self Managed Node Groups. That said, there are currently inherent limitations as AWS continues to rollout additional feature support similar to the level of customization you can achieve with Self Managed Node Groups. When requesting added feature support for AWS EKS Managed Node groups, please ensure you have verified that the feature(s) are 1) supported by AWS and 2) supported by the Terraform AWS provider before submitting a feature request.
- Due to the plethora of tooling and different manners of configuring your cluster, cluster configuration is intentionally left out of the module in order to simplify the module for a broader user base. Previous module versions provided support for managing the aws-auth configmap via the Kubernetes Terraform provider using the now deprecated aws-iam-authenticator; these are no longer included in the module. This module strictly focuses on the infrastructure resources to provision an EKS cluster as well as any supporting AWS resources - how the internals of the cluster are configured and managed is up to users and is outside the scope of this module. There is an output attribute, `aws_auth_configmap_yaml`, that has been provided that can be useful to help bridge this transition. Please see the various examples provided where this attribute is used to ensure that self managed node groups or external node groups have their IAM roles appropriately mapped to the aws-auth configmap. How users elect to manage the aws-auth configmap is left up to their choosing.
### User Data & Bootstrapping
From 96210c06a5531141b3c253097470dd0dd5ec5bfc Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Fri, 10 Dec 2021 13:30:51 -0500
Subject: [PATCH 67/83] Update README.md
Co-authored-by: John Gardiner Myers
---
README.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/README.md b/README.md
index 697b21aa98..f1f58fc0fb 100644
--- a/README.md
+++ b/README.md
@@ -585,7 +585,7 @@ You have to interact with the cluster from within the VPC that it is associated
How can I stop Terraform from removing the EKS tags from my VPC and subnets?
-You need to add the tags to the VPC and subnets yourself. See the [basic example](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/basic).
+You need to add the tags to the Terraform definition of the VPC and subnets yourself. See the [basic example](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/basic).
An alternative is to use the aws provider's [`ignore_tags` variable](https://www.terraform.io/docs/providers/aws/#ignore_tags-configuration-block). However this can also cause terraform to display a perpetual difference.
From ab0e21f195b9ac19f25cb7258cbd6586aa78006d Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Fri, 10 Dec 2021 13:31:01 -0500
Subject: [PATCH 68/83] Update README.md
Co-authored-by: John Gardiner Myers
---
README.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/README.md b/README.md
index f1f58fc0fb..db9d68f2e4 100644
--- a/README.md
+++ b/README.md
@@ -615,7 +615,7 @@ If you are using the cluster autoscaler:
- Repeat until all old nodes are drained
- Cluster autoscaler will terminate the old nodes after 10-60 minutes automatically
-You can also use a 3rd party tool like Gruntwork's kubergrunt. See the [`eks deploy`](https://github.com/gruntwork-io/kubergrunt#deploy) subcommand.
+You can also use a third-party tool like Gruntwork's kubergrunt. See the [`eks deploy`](https://github.com/gruntwork-io/kubergrunt#deploy) subcommand.
How can I use Windows workers?
From 2d79144e9ed7ec384a9ee58b54c633c9dbda6852 Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Fri, 10 Dec 2021 13:31:11 -0500
Subject: [PATCH 69/83] Update README.md
Co-authored-by: John Gardiner Myers
---
README.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/README.md b/README.md
index db9d68f2e4..9bd9a3e7a9 100644
--- a/README.md
+++ b/README.md
@@ -621,7 +621,7 @@ You can also use a third-party tool like Gruntwork's kubergrunt. See the [`eks d
To enable Windows support for your EKS cluster, you should apply some configs manually. See the [Enabling Windows Support (Windows/MacOS/Linux)](https://docs.aws.amazon.com/eks/latest/userguide/windows-support.html#enable-windows-support).
-Windows based nodes require additional cluster role (`eks:kube-proxy-windows`).
+Windows based nodes require an additional cluster role (`eks:kube-proxy-windows`).
Worker nodes with labels do not join a 1.16+ cluster
From 37b10aaf657620ceab376f7f0cff4231abfdadaa Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Fri, 10 Dec 2021 13:31:25 -0500
Subject: [PATCH 70/83] Update README.md
Co-authored-by: John Gardiner Myers
---
README.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/README.md b/README.md
index 9bd9a3e7a9..a9dec593f9 100644
--- a/README.md
+++ b/README.md
@@ -619,7 +619,7 @@ You can also use a third-party tool like Gruntwork's kubergrunt. See the [`eks d
How can I use Windows workers?
-To enable Windows support for your EKS cluster, you should apply some configs manually. See the [Enabling Windows Support (Windows/MacOS/Linux)](https://docs.aws.amazon.com/eks/latest/userguide/windows-support.html#enable-windows-support).
+To enable Windows support for your EKS cluster, you should apply some configuration manually. See the [Enabling Windows Support (Windows/MacOS/Linux)](https://docs.aws.amazon.com/eks/latest/userguide/windows-support.html#enable-windows-support).
Windows based nodes require an additional cluster role (`eks:kube-proxy-windows`).
From 742335e6338849bee9261a08f27d490f78146c3d Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Fri, 10 Dec 2021 13:31:42 -0500
Subject: [PATCH 71/83] Update README.md
Co-authored-by: John Gardiner Myers
---
README.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/README.md b/README.md
index a9dec593f9..c08338aa72 100644
--- a/README.md
+++ b/README.md
@@ -625,7 +625,7 @@ Windows based nodes require an additional cluster role (`eks:kube-proxy-windows`
Worker nodes with labels do not join a 1.16+ cluster
-Kubelet restricts the allowed list of labels in the `kubernetes.io` namespace that can be applied to nodes starting in 1.16. Older configurations used labels such as `kubernetes.io/lifecycle=spot` which is no longer allowed; instead, use `node.kubernetes.io/lifecycle=spot`
+As of Kubernetes 1.16, kubelet restricts which labels with names in the `kubernetes.io` namespace can be applied to nodes. Labels such as `kubernetes.io/lifecycle=spot` are no longer allowed; instead use `node.kubernetes.io/lifecycle=spot`
Reference the `--node-labels` argument for your version of Kubernetes for the allowed prefixes. [Documentation for 1.16](https://v1-16.docs.kubernetes.io/docs/reference/command-line-tools-reference/kubelet/)
From d40dd46f91034e739cdf859a52906e20e4e9af8c Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Fri, 10 Dec 2021 13:31:51 -0500
Subject: [PATCH 72/83] Update README.md
Co-authored-by: John Gardiner Myers
---
README.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/README.md b/README.md
index c08338aa72..9e6fe5fd7a 100644
--- a/README.md
+++ b/README.md
@@ -627,7 +627,7 @@ Windows based nodes require an additional cluster role (`eks:kube-proxy-windows`
As of Kubernetes 1.16, kubelet restricts which labels with names in the `kubernetes.io` namespace can be applied to nodes. Labels such as `kubernetes.io/lifecycle=spot` are no longer allowed; instead use `node.kubernetes.io/lifecycle=spot`
-Reference the `--node-labels` argument for your version of Kubernetes for the allowed prefixes. [Documentation for 1.16](https://v1-16.docs.kubernetes.io/docs/reference/command-line-tools-reference/kubelet/)
+See your Kubernetes version's documentation for the `--node-labels` kubelet flag for the allowed prefixes. [Documentation for 1.16](https://v1-16.docs.kubernetes.io/docs/reference/command-line-tools-reference/kubelet/)
From 6423b701c694c9383f0b1cbeea9eb0ff93677c70 Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Fri, 10 Dec 2021 13:34:18 -0500
Subject: [PATCH 73/83] Update README.md
Co-authored-by: John Gardiner Myers
---
README.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/README.md b/README.md
index 9e6fe5fd7a..8e52515e89 100644
--- a/README.md
+++ b/README.md
@@ -521,7 +521,7 @@ While the module is designed to be flexible and support as many use cases and co
- AWS EKS Managed Node Groups default to `linux` as the `platform`, but `bottlerocket` is also supported by AWS (`windows` is not supported by AWS EKS Managed Node groups)
- AWS Self Managed Node Groups also default to `linux` and the default AMI used is the latest AMI for the selected Kubernetes version. If you wish to use a different OS or AMI then you will need to opt in to the necessary configurations to ensure the correct AMI is used in conjunction with the necessary user data to ensure the nodes are launched and joined to your cluster successfully.
- AWS EKS Managed Node groups are currently the preferred route over Self Managed Node Groups for compute nodes. Both operate very similarly - both are backed by autoscaling groups and launch templates deployed and visible within your account. However, AWS EKS Managed Node groups provide a better user experience and offer a more "managed service" experience and therefore has precedence over Self Managed Node Groups. That said, there are currently inherent limitations as AWS continues to rollout additional feature support similar to the level of customization you can achieve with Self Managed Node Groups. When requesting added feature support for AWS EKS Managed Node groups, please ensure you have verified that the feature(s) are 1) supported by AWS and 2) supported by the Terraform AWS provider before submitting a feature request.
-- Due to the plethora of tooling and different manners of configuring your cluster, cluster configuration is intentionally left out of the module in order to simplify the module for a broader user base. Previous module versions provided support for managing the aws-auth configmap via the Kubernetes Terraform provider using the now deprecated aws-iam-authenticator; these are no longer included in the module. This module strictly focuses on the infrastructure resources to provision an EKS cluster as well as any supporting AWS resources - how the internals of the cluster are configured and managed is up to users and is outside the scope of this module. There is an output attribute, `aws_auth_configmap_yaml`, that has been provided that can be useful to help bridge this transition. Please see the various examples provided where this attribute is used to ensure that self managed node groups or external node groups have their IAM roles appropriately mapped to the aws-auth configmap. How users elect to manage the aws-auth configmap is left up to their choosing.
+- Due to the plethora of tooling and different manners of configuring your cluster, cluster configuration is intentionally left out of the module in order to simplify the module for a broader user base. Previous module versions provided support for managing the aws-auth configmap via the Kubernetes Terraform provider using the now deprecated aws-iam-authenticator; these are no longer included in the module. This module strictly focuses on the infrastructure resources to provision an EKS cluster as well as any supporting AWS resources. How the internals of the cluster are configured and managed is up to users and is outside the scope of this module. There is an output attribute, `aws_auth_configmap_yaml`, that has been provided that can be useful to help bridge this transition. Please see the various examples provided where this attribute is used to ensure that self managed node groups or external node groups have their IAM roles appropriately mapped to the aws-auth configmap. How users elect to manage the aws-auth configmap is left up to their choosing.
### User Data & Bootstrapping
From 7b72c07638d0f79d57713ce564dbfda04ea2df99 Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Fri, 10 Dec 2021 15:13:54 -0500
Subject: [PATCH 74/83] Update modules/eks-managed-node-group/variables.tf
Co-authored-by: John Gardiner Myers
---
modules/eks-managed-node-group/variables.tf | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/modules/eks-managed-node-group/variables.tf b/modules/eks-managed-node-group/variables.tf
index 843cccd4d7..579dc16412 100644
--- a/modules/eks-managed-node-group/variables.tf
+++ b/modules/eks-managed-node-group/variables.tf
@@ -73,9 +73,9 @@ variable "user_data_template_path" {
################################################################################
variable "create_launch_template" {
- description = "Determines whether to create a launch template or not. By default, EKS will use its own default launch template"
+ description = "Determines whether to create a launch template or not. If set to `false`, EKS will use its own default launch template"
type = bool
- default = false
+ default = true
}
variable "launch_template_name" {
From fa6b8e45351eed1bebaf4b173f9cbc0a13bf9fc7 Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Fri, 10 Dec 2021 15:14:04 -0500
Subject: [PATCH 75/83] Update README.md
Co-authored-by: John Gardiner Myers
---
README.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/README.md b/README.md
index 8e52515e89..80f623699e 100644
--- a/README.md
+++ b/README.md
@@ -617,6 +617,7 @@ If you are using the cluster autoscaler:
You can also use a third-party tool like Gruntwork's kubergrunt. See the [`eks deploy`](https://github.com/gruntwork-io/kubergrunt#deploy) subcommand.
+Alternatively, use a managed node group instead.
How can I use Windows workers?
To enable Windows support for your EKS cluster, you should apply some configuration manually. See the [Enabling Windows Support (Windows/MacOS/Linux)](https://docs.aws.amazon.com/eks/latest/userguide/windows-support.html#enable-windows-support).
From 7e2462ec2003cb5af8801a532426c8b2036cf7b7 Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Fri, 10 Dec 2021 15:14:12 -0500
Subject: [PATCH 76/83] Update README.md
Co-authored-by: John Gardiner Myers
---
.pre-commit-config.yaml | 4 ++++
README.md | 2 +-
modules/eks-managed-node-group/README.md | 2 +-
3 files changed, 6 insertions(+), 2 deletions(-)
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 75d403e724..c86ca693aa 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -1,6 +1,7 @@
repos:
- repo: https://github.com/antonbabenko/pre-commit-terraform
<<<<<<< HEAD
+<<<<<<< HEAD
<<<<<<< HEAD
rev: v1.57.0
=======
@@ -9,6 +10,9 @@ repos:
=======
rev: v1.59.0
>>>>>>> fb3eb35 (chore: remove karpenter, back to instance refresh and node termination handler)
+=======
+ rev: v1.60.0
+>>>>>>> 5195f78 (chore: update docs for new default)
hooks:
- id: terraform_fmt
- id: terraform_validate
diff --git a/README.md b/README.md
index 80f623699e..26b8962d43 100644
--- a/README.md
+++ b/README.md
@@ -597,7 +597,7 @@ If you are not using autoscaling and want to control the number of nodes via ter
Why are nodes not recreated when the `launch_template` is recreated?
-By default the ASG is not configured to be recreated when the launch configuration or template changes; you will need to use a process to drain and cycle the nodes.
+By default the ASG for a self-managed node group is not configured to be recreated when the launch configuration or template changes; you will need to use a process to drain and cycle the nodes.
If you are NOT using the cluster autoscaler:
diff --git a/modules/eks-managed-node-group/README.md b/modules/eks-managed-node-group/README.md
index db67c6a06e..823056e781 100644
--- a/modules/eks-managed-node-group/README.md
+++ b/modules/eks-managed-node-group/README.md
@@ -96,7 +96,7 @@ module "eks_managed_node_group" {
| [cpu\_options](#input\_cpu\_options) | The CPU options for the instance | `map(string)` | `null` | no |
| [create](#input\_create) | Determines whether to create EKS managed node group or not | `bool` | `true` | no |
| [create\_iam\_role](#input\_create\_iam\_role) | Determines whether an IAM role is created or to use an existing IAM role | `bool` | `true` | no |
-| [create\_launch\_template](#input\_create\_launch\_template) | Determines whether to create a launch template or not. By default, EKS will use its own default launch template | `bool` | `false` | no |
+| [create\_launch\_template](#input\_create\_launch\_template) | Determines whether to create a launch template or not. If set to `false`, EKS will use its own default launch template | `bool` | `true` | no |
| [create\_security\_group](#input\_create\_security\_group) | Determines whether to create a security group | `bool` | `true` | no |
| [credit\_specification](#input\_credit\_specification) | Customize the credit specification of the instance | `map(string)` | `null` | no |
| [description](#input\_description) | Description of the launch template | `string` | `null` | no |
From bd0f8ca38529c3d9a242a9fe2b39d6c84990260d Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Sat, 11 Dec 2021 13:04:13 -0500
Subject: [PATCH 77/83] Update README.md
Co-authored-by: John Gardiner Myers
---
.pre-commit-config.yaml | 4 ++++
README.md | 15 +--------------
examples/complete/main.tf | 10 ++--------
examples/eks_managed_node_group/main.tf | 14 +-------------
examples/irsa_autoscale_refresh/main.tf | 10 +---------
examples/self_managed_node_group/main.tf | 3 +--
modules/eks-managed-node-group/README.md | 2 +-
modules/eks-managed-node-group/main.tf | 2 +-
modules/eks-managed-node-group/variables.tf | 2 +-
modules/self-managed-node-group/README.md | 3 +--
modules/self-managed-node-group/main.tf | 2 +-
modules/self-managed-node-group/variables.tf | 2 +-
node_groups.tf | 18 +++++++++---------
13 files changed, 25 insertions(+), 62 deletions(-)
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index c86ca693aa..ed135ecb6f 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -2,6 +2,7 @@ repos:
- repo: https://github.com/antonbabenko/pre-commit-terraform
<<<<<<< HEAD
<<<<<<< HEAD
+<<<<<<< HEAD
<<<<<<< HEAD
rev: v1.57.0
=======
@@ -13,6 +14,9 @@ repos:
=======
rev: v1.60.0
>>>>>>> 5195f78 (chore: update docs for new default)
+=======
+ rev: v1.61.0
+>>>>>>> 7664f78 (fix: update usage of launch template creation and references)
hooks:
- id: terraform_fmt
- id: terraform_validate
diff --git a/README.md b/README.md
index 26b8962d43..f34eab25ad 100644
--- a/README.md
+++ b/README.md
@@ -104,7 +104,6 @@ module "eks" {
disk_size = 50
instance_types = ["m6i.large", "m5.large", "m5n.large", "m5zn.large"]
vpc_security_group_ids = [aws_security_group.additional.id]
- create_launch_template = true
}
eks_managed_node_groups = {
@@ -176,7 +175,7 @@ module "eks" {
ℹ️ Only the pertinent attributes are shown for brevity
-1. By default, the `eks-managed-node-group` sub-module will use the default configurations provided by AWS EKS Managed Node Groups; EKS MNG will provide its own launch template and utilize the latest AWS EKS Optimized AMI (Linux) for the given Kubernetes version:
+1. AWS EKS Managed Node Group can provide its own launch template and utilize the latest AWS EKS Optimized AMI (Linux) for the given Kubernetes version:
```hcl
eks_managed_node_groups = {
@@ -200,8 +199,6 @@ module "eks" {
```hcl
eks_managed_node_groups = {
extend_config = {
- create_launch_template = true
-
# This is supplied to the AWS EKS Optimized AMI
# bootstrap script https://github.com/awslabs/amazon-eks-ami/blob/master/files/bootstrap.sh
bootstrap_extra_args = "--container-runtime containerd --kubelet-extra-args '--max-pods=20'"
@@ -221,13 +218,9 @@ module "eks" {
```hcl
eks_managed_node_groups = {
bottlerocket_extend_config = {
- create_launch_template = true
-
ami_type = "BOTTLEROCKET_x86_64"
platform = "bottlerocket"
- create_launch_template = true
-
# this will get added to what AWS provides
bootstrap_extra_args = <<-EOT
# extra args added
@@ -243,8 +236,6 @@ module "eks" {
```hcl
eks_managed_node_groups = {
custom_ami = {
- create_launch_template = true
-
ami_id = "ami-0caf35bc73450c396"
# By default, EKS managed node groups will not append bootstrap script;
@@ -276,8 +267,6 @@ module "eks" {
ami_id = "ami-0ff61e0bcfc81dc94"
platform = "bottlerocket"
- create_launch_template = true
-
# use module user data template to bootstrap
enable_bootstrap_user_data = true
# this will get added to the template
@@ -400,7 +389,6 @@ It is also possible to configure the various node groups of each family differen
disk_size = 50
instance_types = ["m6i.large", "m5.large", "m5n.large", "m5zn.large"]
vpc_security_group_ids = [aws_security_group.additional.id]
- create_launch_template = true
}
eks_managed_node_groups = {
@@ -485,7 +473,6 @@ For example, the following creates 4 AWS EKS Managed Node Groups:
ami_type = "AL2_x86_64"
disk_size = 50
instance_types = ["m6i.large", "m5.large", "m5n.large", "m5zn.large"]
- create_launch_template = true
}
eks_managed_node_groups = {
diff --git a/examples/complete/main.tf b/examples/complete/main.tf
index 98691b2e5d..ff32473737 100644
--- a/examples/complete/main.tf
+++ b/examples/complete/main.tf
@@ -54,9 +54,6 @@ module "eks" {
self_managed_node_groups = {
spot = {
- create_launch_template = true
- launch_template_name = "spot"
-
instance_type = "m5.large"
instance_market_options = {
market_type = "spot"
@@ -84,7 +81,6 @@ module "eks" {
disk_size = 50
instance_types = ["m6i.large", "m5.large", "m5n.large", "m5zn.large"]
vpc_security_group_ids = [aws_security_group.additional.id]
- create_launch_template = true
}
eks_managed_node_groups = {
@@ -176,6 +172,8 @@ module "self_managed_node_group" {
cluster_endpoint = module.eks.cluster_endpoint
cluster_auth_base64 = module.eks.cluster_certificate_authority_data
+ instance_type = "m5.large"
+
vpc_id = module.vpc.vpc_id
subnet_ids = module.vpc.private_subnets
vpc_security_group_ids = [
@@ -183,10 +181,6 @@ module "self_managed_node_group" {
module.eks.cluster_security_group_id,
]
- create_launch_template = true
- launch_template_name = "separate-self-mng"
- instance_type = "m5.large"
-
tags = merge(local.tags, { Separate = "self-managed-node-group" })
}
diff --git a/examples/eks_managed_node_group/main.tf b/examples/eks_managed_node_group/main.tf
index 11740af4cb..83db0fe3a9 100644
--- a/examples/eks_managed_node_group/main.tf
+++ b/examples/eks_managed_node_group/main.tf
@@ -69,9 +69,6 @@ module "eks" {
ami_type = "BOTTLEROCKET_x86_64"
platform = "bottlerocket"
- create_launch_template = true
- launch_template_name = "bottlerocket-custom"
-
# this will get added to what AWS provides
bootstrap_extra_args = <<-EOT
# extra args added
@@ -86,9 +83,6 @@ module "eks" {
ami_id = "ami-0ff61e0bcfc81dc94"
platform = "bottlerocket"
- create_launch_template = true
- launch_template_name = "bottlerocket-custom"
-
# use module user data template to boostrap
enable_bootstrap_user_data = true
# this will get added to the template
@@ -116,9 +110,6 @@ module "eks" {
# Use a custom AMI
custom_ami = {
- create_launch_template = true
- launch_template_name = "custom-ami"
-
# Current default AMI used by managed node groups - pseudo "custom"
ami_id = "ami-0caf35bc73450c396"
@@ -174,10 +165,7 @@ module "eks" {
max_unavailable_percentage = 50 # or set `max_unavailable`
}
- create_launch_template = true
- launch_template_name = "eks-managed-ex"
- launch_template_use_name_prefix = true
- description = "EKS managed node group example launch template"
+ description = "EKS managed node group example launch template"
ebs_optimized = true
vpc_security_group_ids = [aws_security_group.additional.id]
diff --git a/examples/irsa_autoscale_refresh/main.tf b/examples/irsa_autoscale_refresh/main.tf
index 822186e94b..9e74e3d9fe 100644
--- a/examples/irsa_autoscale_refresh/main.tf
+++ b/examples/irsa_autoscale_refresh/main.tf
@@ -43,9 +43,7 @@ module "eks" {
max_size = 5
desired_size = 1
- instance_type = "m5.large"
- create_launch_template = true
- launch_template_name = "refresh"
+ instance_type = "m5.large"
instance_refresh = {
strategy = "Rolling"
@@ -66,9 +64,6 @@ module "eks" {
}
mixed_instance = {
- create_launch_template = true
- launch_template_name = "mixed-instance"
-
use_mixed_instances_policy = true
mixed_instances_policy = {
instances_distribution = {
@@ -97,9 +92,6 @@ module "eks" {
}
spot = {
- create_launch_template = true
- launch_template_name = "spot"
-
instance_type = "m5.large"
instance_market_options = {
market_type = "spot"
diff --git a/examples/self_managed_node_group/main.tf b/examples/self_managed_node_group/main.tf
index 9ce16c674d..73b502520c 100644
--- a/examples/self_managed_node_group/main.tf
+++ b/examples/self_managed_node_group/main.tf
@@ -111,10 +111,9 @@ module "eks" {
disk_size = 256
instance_type = "m6i.large"
- create_launch_template = true
launch_template_name = "self-managed-ex"
launch_template_use_name_prefix = true
- description = "Self managed node group example launch template"
+ launch_template_description = "Self managed node group example launch template"
ebs_optimized = true
vpc_security_group_ids = [aws_security_group.additional.id]
diff --git a/modules/eks-managed-node-group/README.md b/modules/eks-managed-node-group/README.md
index 823056e781..f339fcc94a 100644
--- a/modules/eks-managed-node-group/README.md
+++ b/modules/eks-managed-node-group/README.md
@@ -99,7 +99,6 @@ module "eks_managed_node_group" {
| [create\_launch\_template](#input\_create\_launch\_template) | Determines whether to create a launch template or not. If set to `false`, EKS will use its own default launch template | `bool` | `true` | no |
| [create\_security\_group](#input\_create\_security\_group) | Determines whether to create a security group | `bool` | `true` | no |
| [credit\_specification](#input\_credit\_specification) | Customize the credit specification of the instance | `map(string)` | `null` | no |
-| [description](#input\_description) | Description of the launch template | `string` | `null` | no |
| [desired\_size](#input\_desired\_size) | Desired number of instances/nodes | `number` | `1` | no |
| [disable\_api\_termination](#input\_disable\_api\_termination) | If true, enables EC2 instance termination protection | `bool` | `null` | no |
| [disk\_size](#input\_disk\_size) | Disk size in GiB for nodes. Defaults to `20` | `number` | `null` | no |
@@ -124,6 +123,7 @@ module "eks_managed_node_group" {
| [key\_name](#input\_key\_name) | The key name that should be used for the instance(s) | `string` | `null` | no |
| [labels](#input\_labels) | Key-value map of Kubernetes labels. Only labels that are applied with the EKS API are managed by this argument. Other Kubernetes labels applied to the EKS Node Group will not be managed | `map(string)` | `null` | no |
| [launch\_template\_default\_version](#input\_launch\_template\_default\_version) | Default version of the launch template | `string` | `null` | no |
+| [launch\_template\_description](#input\_launch\_template\_description) | Description of the launch template | `string` | `null` | no |
| [launch\_template\_name](#input\_launch\_template\_name) | Launch template name - either to be created (`var.create_launch_template` = `true`) or existing (`var.create_launch_template` = `false`) | `string` | `""` | no |
| [launch\_template\_use\_name\_prefix](#input\_launch\_template\_use\_name\_prefix) | Determines whether to use `launch_template_name` as is or create a unique name beginning with the `launch_template_name` as the prefix | `bool` | `true` | no |
| [launch\_template\_version](#input\_launch\_template\_version) | Launch template version number. The default is `$Default` | `string` | `null` | no |
diff --git a/modules/eks-managed-node-group/main.tf b/modules/eks-managed-node-group/main.tf
index 85244506dd..e93727f82c 100644
--- a/modules/eks-managed-node-group/main.tf
+++ b/modules/eks-managed-node-group/main.tf
@@ -35,7 +35,7 @@ resource "aws_launch_template" "this" {
name = var.launch_template_use_name_prefix ? null : local.launch_template_name_int
name_prefix = var.launch_template_use_name_prefix ? "${local.launch_template_name_int}-" : null
- description = var.description
+ description = var.launch_template_description
ebs_optimized = var.ebs_optimized
image_id = var.ami_id
diff --git a/modules/eks-managed-node-group/variables.tf b/modules/eks-managed-node-group/variables.tf
index 579dc16412..360071cc05 100644
--- a/modules/eks-managed-node-group/variables.tf
+++ b/modules/eks-managed-node-group/variables.tf
@@ -90,7 +90,7 @@ variable "launch_template_use_name_prefix" {
default = true
}
-variable "description" {
+variable "launch_template_description" {
description = "Description of the launch template"
type = string
default = null
diff --git a/modules/self-managed-node-group/README.md b/modules/self-managed-node-group/README.md
index 247ab16482..10a3068825 100644
--- a/modules/self-managed-node-group/README.md
+++ b/modules/self-managed-node-group/README.md
@@ -24,7 +24,6 @@ module "self_managed_node_group" {
max_size = 10
desired_size = 1
- create_launch_template = true
launch_template_name = "separate-self-mng"
instance_type = "m5.large"
@@ -96,7 +95,6 @@ module "self_managed_node_group" {
| [credit\_specification](#input\_credit\_specification) | Customize the credit specification of the instance | `map(string)` | `null` | no |
| [default\_cooldown](#input\_default\_cooldown) | The amount of time, in seconds, after a scaling activity completes before another scaling activity can start | `number` | `null` | no |
| [delete\_timeout](#input\_delete\_timeout) | Delete timeout to wait for destroying autoscaling group | `string` | `null` | no |
-| [description](#input\_description) | Description of the launch template | `string` | `null` | no |
| [desired\_size](#input\_desired\_size) | The number of Amazon EC2 instances that should be running in the autoscaling group | `number` | `1` | no |
| [disable\_api\_termination](#input\_disable\_api\_termination) | If true, enables EC2 instance termination protection | `bool` | `null` | no |
| [ebs\_optimized](#input\_ebs\_optimized) | If true, the launched EC2 instance will be EBS-optimized | `bool` | `null` | no |
@@ -126,6 +124,7 @@ module "self_managed_node_group" {
| [kernel\_id](#input\_kernel\_id) | The kernel ID | `string` | `null` | no |
| [key\_name](#input\_key\_name) | The key name that should be used for the instance | `string` | `null` | no |
| [launch\_template\_default\_version](#input\_launch\_template\_default\_version) | Default Version of the launch template | `string` | `null` | no |
+| [launch\_template\_description](#input\_launch\_template\_description) | Description of the launch template | `string` | `null` | no |
| [launch\_template\_name](#input\_launch\_template\_name) | Launch template name - either to be created (`var.create_launch_template` = `true`) or existing (`var.create_launch_template` = `false`) | `string` | `null` | no |
| [launch\_template\_use\_name\_prefix](#input\_launch\_template\_use\_name\_prefix) | Determines whether to use `launch_template_name` as is or create a unique name beginning with the `launch_template_name` as the prefix | `bool` | `true` | no |
| [launch\_template\_version](#input\_launch\_template\_version) | Launch template version. Can be version number, `$Latest`, or `$Default` | `string` | `null` | no |
diff --git a/modules/self-managed-node-group/main.tf b/modules/self-managed-node-group/main.tf
index bfc4312172..2acaa3d199 100644
--- a/modules/self-managed-node-group/main.tf
+++ b/modules/self-managed-node-group/main.tf
@@ -47,7 +47,7 @@ resource "aws_launch_template" "this" {
name = var.launch_template_use_name_prefix ? null : local.launch_template_name_int
name_prefix = var.launch_template_use_name_prefix ? "${local.launch_template_name_int}-" : null
- description = var.description
+ description = var.launch_template_description
ebs_optimized = var.ebs_optimized
image_id = coalesce(var.ami_id, data.aws_ami.eks_default[0].image_id)
diff --git a/modules/self-managed-node-group/variables.tf b/modules/self-managed-node-group/variables.tf
index 53721cf63a..d5fe48dec5 100644
--- a/modules/self-managed-node-group/variables.tf
+++ b/modules/self-managed-node-group/variables.tf
@@ -84,7 +84,7 @@ variable "launch_template_use_name_prefix" {
default = true
}
-variable "description" {
+variable "launch_template_description" {
description = "Description of the launch template"
type = string
default = null
diff --git a/node_groups.tf b/node_groups.tf
index 988942e393..00d7573fab 100644
--- a/node_groups.tf
+++ b/node_groups.tf
@@ -221,11 +221,11 @@ module "eks_managed_node_group" {
user_data_template_path = try(each.value.user_data_template_path, var.eks_managed_node_group_defaults.user_data_template_path, "")
# Launch Template
- create_launch_template = try(each.value.create_launch_template, var.eks_managed_node_group_defaults.create_launch_template, false)
- launch_template_name = try(each.value.launch_template_name, var.eks_managed_node_group_defaults.launch_template_name, "")
+ create_launch_template = try(each.value.create_launch_template, var.eks_managed_node_group_defaults.create_launch_template, true)
+ launch_template_name = try(each.value.launch_template_name, each.key)
launch_template_use_name_prefix = try(each.value.launch_template_use_name_prefix, var.eks_managed_node_group_defaults.launch_template_use_name_prefix, true)
launch_template_version = try(each.value.launch_template_version, var.eks_managed_node_group_defaults.launch_template_version, null)
- description = try(each.value.description, var.eks_managed_node_group_defaults.description, "Custom launch template for ${try(each.value.name, each.key)} EKS managed node group")
+ launch_template_description = try(each.value.launch_template_description, var.eks_managed_node_group_defaults.launch_template_description, "Custom launch template for ${try(each.value.name, each.key)} EKS managed node group")
ebs_optimized = try(each.value.ebs_optimized, var.eks_managed_node_group_defaults.ebs_optimized, null)
key_name = try(each.value.key_name, var.eks_managed_node_group_defaults.key_name, null)
@@ -288,10 +288,8 @@ module "self_managed_node_group" {
name = try(each.value.name, each.key)
use_name_prefix = try(each.value.use_name_prefix, var.self_managed_node_group_defaults.use_name_prefix, true)
- launch_template_name = try(each.value.launch_template_name, each.key)
- launch_template_version = try(each.value.launch_template_version, var.self_managed_node_group_defaults.launch_template_version, null)
- availability_zones = try(each.value.availability_zones, var.self_managed_node_group_defaults.availability_zones, null)
- subnet_ids = try(each.value.subnet_ids, var.self_managed_node_group_defaults.subnet_ids, var.subnet_ids)
+ availability_zones = try(each.value.availability_zones, var.self_managed_node_group_defaults.availability_zones, null)
+ subnet_ids = try(each.value.subnet_ids, var.self_managed_node_group_defaults.subnet_ids, var.subnet_ids)
min_size = try(each.value.min_size, var.self_managed_node_group_defaults.min_size, 0)
max_size = try(each.value.max_size, var.self_managed_node_group_defaults.max_size, 3)
@@ -337,8 +335,10 @@ module "self_managed_node_group" {
user_data_template_path = try(each.value.user_data_template_path, var.self_managed_node_group_defaults.user_data_template_path, "")
# Launch Template
- create_launch_template = try(each.value.create_launch_template, var.self_managed_node_group_defaults.create_launch_template, true)
- description = try(each.value.description, var.self_managed_node_group_defaults.description, "Custom launch template for ${try(each.value.name, each.key)} self managed node group")
+ create_launch_template = try(each.value.create_launch_template, var.self_managed_node_group_defaults.create_launch_template, true)
+ launch_template_name = try(each.value.launch_template_name, each.key)
+ launch_template_version = try(each.value.launch_template_version, var.self_managed_node_group_defaults.launch_template_version, null)
+ launch_template_description = try(each.value.launch_template_description, var.self_managed_node_group_defaults.launch_template_description, "Custom launch template for ${try(each.value.name, each.key)} self managed node group")
ebs_optimized = try(each.value.ebs_optimized, var.self_managed_node_group_defaults.ebs_optimized, null)
ami_id = try(each.value.ami_id, var.self_managed_node_group_defaults.ami_id, "")
From 6563328127968a6ab38e19efa0372e553365fc51 Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Sat, 11 Dec 2021 13:25:29 -0500
Subject: [PATCH 78/83] chore: add comment on two possible modes if of issues
connecting
---
README.md | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/README.md b/README.md
index f34eab25ad..40b9b00892 100644
--- a/README.md
+++ b/README.md
@@ -553,7 +553,11 @@ The security groups created by this module are depicted in the image shown below
Why are nodes not being registered?
-Often an issue caused by a networking or endpoint mis-configuration. At least one of the cluster public or private endpoints must be enabled to access the cluster to work. If you require a public endpoint, setting up both (public and private) and restricting the public endpoint via setting `cluster_endpoint_public_access_cidrs` is recommended. More info regarding communication with an endpoint is available [here](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html).
+Often an issue caused by one of two reasons:
+1. Networking or endpoint mis-configuration.
+2. Permissions (IAM/RBAC)
+
+At least one of the cluster public or private endpoints must be enabled to access the cluster to work. If you require a public endpoint, setting up both (public and private) and restricting the public endpoint via setting `cluster_endpoint_public_access_cidrs` is recommended. More info regarding communication with an endpoint is available [here](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html).
Nodes need to be able to contact the EKS cluster endpoint. By default, the module only creates a public endpoint. To access the endpoint, the nodes need outgoing internet access:
From 61c0ce20b4db84ec17ddca74a05535654dbd7fe5 Mon Sep 17 00:00:00 2001
From: Anton Babenko
Date: Sun, 12 Dec 2021 21:49:57 +0100
Subject: [PATCH 79/83] chore: Added action to validate PR title (#1715)
---
.github/workflows/pr-title.yml | 53 ++++++++++++++++++++++++++++++++++
1 file changed, 53 insertions(+)
create mode 100644 .github/workflows/pr-title.yml
diff --git a/.github/workflows/pr-title.yml b/.github/workflows/pr-title.yml
new file mode 100644
index 0000000000..7f5aec08ed
--- /dev/null
+++ b/.github/workflows/pr-title.yml
@@ -0,0 +1,53 @@
+name: "Validate PR title"
+
+on:
+ pull_request_target:
+ types:
+ - opened
+ - edited
+ - synchronize
+
+jobs:
+ main:
+ name: Validate PR title
+ runs-on: ubuntu-latest
+ steps:
+ # Please look up the latest version from
+ # https://github.com/amannn/action-semantic-pull-request/releases
+ - uses: amannn/action-semantic-pull-request@v3.4.6
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ with:
+ # Configure which types are allowed.
+ # Default: https://github.com/commitizen/conventional-commit-types
+ types: |
+ fix
+ feat
+ docs
+ ci
+ chore
+ BREAKING CHANGE
+ # Configure that a scope must always be provided.
+ requireScope: false
+ # Configure additional validation for the subject based on a regex.
+ # This example ensures the subject starts with an uppercase character.
+ subjectPattern: ^[A-Z].+$
+ # If `subjectPattern` is configured, you can use this property to override
+ # the default error message that is shown when the pattern doesn't match.
+ # The variables `subject` and `title` can be used within the message.
+ subjectPatternError: |
+ The subject "{subject}" found in the pull request title "{title}"
+ didn't match the configured pattern. Please ensure that the subject
+ starts with an uppercase character.
+ # For work-in-progress PRs you can typically use draft pull requests
+ # from Github. However, private repositories on the free plan don't have
+ # this option and therefore this action allows you to opt-in to using the
+ # special "[WIP]" prefix to indicate this state. This will avoid the
+ # validation of the PR title and the pull request checks remain pending.
+ # Note that a second check will be reported if this is enabled.
+ wip: true
+ # When using "Squash and merge" on a PR with only one commit, GitHub
+ # will suggest using that commit message instead of the PR title for the
+ # merge commit, and it's easy to commit this by mistake. Enable this option
+ # to also validate the commit message for one commit PRs.
+ validateSingleCommit: false
From ea9c7942f34c3fdcfd6118d348ae2cbe7ab5a225 Mon Sep 17 00:00:00 2001
From: Anton Babenko
Date: Sun, 12 Dec 2021 21:55:51 +0100
Subject: [PATCH 80/83] chore: Updated allowed types for PR title (#1716)
---
.github/workflows/pr-title.yml | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/.github/workflows/pr-title.yml b/.github/workflows/pr-title.yml
index 7f5aec08ed..4b16bf7a38 100644
--- a/.github/workflows/pr-title.yml
+++ b/.github/workflows/pr-title.yml
@@ -20,13 +20,13 @@ jobs:
with:
# Configure which types are allowed.
# Default: https://github.com/commitizen/conventional-commit-types
- types: |
- fix
- feat
- docs
- ci
- chore
- BREAKING CHANGE
+ types:
+ - fix
+ - feat
+ - docs
+ - ci
+ - chore
+ - BREAKING CHANGE
# Configure that a scope must always be provided.
requireScope: false
# Configure additional validation for the subject based on a regex.
From 25c360fc93e609270beedc373d9e3f0081162c96 Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Mon, 13 Dec 2021 07:55:15 -0500
Subject: [PATCH 81/83] chore: fixup rebase conflicts
---
.github/workflows/pr-title.yml | 16 ++++
.github/workflows/release.yml | 2 +-
.pre-commit-config.yaml | 22 +-----
.releaserc.json | 4 -
CHANGELOG.md | 9 +++
examples/bottlerocket/README.md | 75 ++++++++++++++++++
examples/bottlerocket/versions.tf | 26 +++++++
examples/complete/README.md | 20 ++---
examples/complete/versions.tf | 12 +--
examples/fargate/README.md | 69 ++++++++++++++++
examples/fargate/versions.tf | 26 +++++++
examples/fargate_profile/README.md | 23 ------
examples/fargate_profile/versions.tf | 20 -----
examples/instance_refresh/README.md | 82 ++++++++++++++++++++
examples/instance_refresh/versions.tf | 26 +++++++
examples/irsa/README.md | 42 ++--------
examples/irsa/versions.tf | 4 -
examples/irsa_autoscale_refresh/README.md | 40 ----------
examples/irsa_autoscale_refresh/versions.tf | 16 ----
examples/secrets_encryption/README.md | 40 ++--------
examples/secrets_encryption/versions.tf | 4 -
examples/self_managed_node_group/README.md | 14 ----
examples/self_managed_node_group/versions.tf | 20 -----
23 files changed, 356 insertions(+), 256 deletions(-)
create mode 100644 examples/bottlerocket/README.md
create mode 100644 examples/bottlerocket/versions.tf
create mode 100644 examples/fargate/README.md
create mode 100644 examples/fargate/versions.tf
create mode 100644 examples/instance_refresh/README.md
create mode 100644 examples/instance_refresh/versions.tf
diff --git a/.github/workflows/pr-title.yml b/.github/workflows/pr-title.yml
index 4b16bf7a38..7619a31fd1 100644
--- a/.github/workflows/pr-title.yml
+++ b/.github/workflows/pr-title.yml
@@ -20,6 +20,10 @@ jobs:
with:
# Configure which types are allowed.
# Default: https://github.com/commitizen/conventional-commit-types
+<<<<<<< HEAD
+<<<<<<< HEAD
+=======
+>>>>>>> 8d33a46 (chore: Updated allowed types for PR title (#1716))
types:
- fix
- feat
@@ -27,6 +31,18 @@ jobs:
- ci
- chore
- BREAKING CHANGE
+<<<<<<< HEAD
+=======
+ types: |
+ fix
+ feat
+ docs
+ ci
+ chore
+ BREAKING CHANGE
+>>>>>>> 3002be5 (chore: Added action to validate PR title (#1715))
+=======
+>>>>>>> 8d33a46 (chore: Updated allowed types for PR title (#1716))
# Configure that a scope must always be provided.
requireScope: false
# Configure additional validation for the subject based on a regex.
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index 170b6bc033..d6f8cc23b9 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -10,8 +10,8 @@ on:
<<<<<<< HEAD
- '**/*.tpl'
=======
- - '**/*.py'
>>>>>>> b876ff9 (fix: update CI/CD process to enable auto-release workflow (#1698))
+ - '**/*.py'
- '**/*.tf'
jobs:
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index ed135ecb6f..006410918f 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -1,22 +1,6 @@
repos:
- repo: https://github.com/antonbabenko/pre-commit-terraform
-<<<<<<< HEAD
-<<<<<<< HEAD
-<<<<<<< HEAD
-<<<<<<< HEAD
- rev: v1.57.0
-=======
- rev: v1.58.0
->>>>>>> b876ff9 (fix: update CI/CD process to enable auto-release workflow (#1698))
-=======
- rev: v1.59.0
->>>>>>> fb3eb35 (chore: remove karpenter, back to instance refresh and node termination handler)
-=======
- rev: v1.60.0
->>>>>>> 5195f78 (chore: update docs for new default)
-=======
- rev: v1.61.0
->>>>>>> 7664f78 (fix: update usage of launch template creation and references)
+ rev: v1.62.0
hooks:
- id: terraform_fmt
- id: terraform_validate
@@ -33,11 +17,7 @@ repos:
- '--args=--only=terraform_documented_variables'
- '--args=--only=terraform_typed_variables'
- '--args=--only=terraform_module_pinned_source'
-<<<<<<< HEAD
- '--args=--only=terraform_naming_convention'
-=======
- # - '--args=--only=terraform_naming_convention'
->>>>>>> b876ff9 (fix: update CI/CD process to enable auto-release workflow (#1698))
- '--args=--only=terraform_required_version'
- '--args=--only=terraform_required_providers'
- '--args=--only=terraform_standard_module_structure'
diff --git a/.releaserc.json b/.releaserc.json
index b5098ee20f..6e39031cf2 100644
--- a/.releaserc.json
+++ b/.releaserc.json
@@ -10,12 +10,8 @@
[
"@semantic-release/github",
{
-<<<<<<< HEAD
- "successComment": "This ${issue.pull_request ? 'PR is included' : 'issue has been resolved'} in version [${nextRelease.version}](${releaseInfos[0].url}) :tada:",
-=======
"successComment":
"This ${issue.pull_request ? 'PR is included' : 'issue has been resolved'} in version ${nextRelease.version} :tada:",
->>>>>>> b876ff9 (fix: update CI/CD process to enable auto-release workflow (#1698))
"labels": false,
"releasedLabels": false
}
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 15937e80c2..84f17e52a4 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,3 +1,7 @@
+<<<<<<< HEAD
+<<<<<<< HEAD
+=======
+>>>>>>> e90c877 (chore(release): version 17.24.0 [skip ci])
# Changelog
All notable changes to this project will be documented in this file.
@@ -16,6 +20,11 @@ All notable changes to this project will be documented in this file.
* Add ability to define custom timeout for fargate profiles ([#1614](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1614)) ([b7539dc](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/b7539dc220f6b5fe199d67569b6f3619ec00fdf0))
* Removed ng_depends_on variable and related hack ([#1672](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1672)) ([56e93d7](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/56e93d77de58f311f1d1d7051f40bf77e7b03524))
+<<<<<<< HEAD
+=======
+>>>>>>> b876ff9 (fix: update CI/CD process to enable auto-release workflow (#1698))
+=======
+>>>>>>> e90c877 (chore(release): version 17.24.0 [skip ci])
## [v17.23.0] - 2021-11-02
FEATURES:
diff --git a/examples/bottlerocket/README.md b/examples/bottlerocket/README.md
new file mode 100644
index 0000000000..88d9d4ee14
--- /dev/null
+++ b/examples/bottlerocket/README.md
@@ -0,0 +1,75 @@
+# AWS EKS cluster running Bottlerocket AMI
+
+Configuration in this directory creates EKS cluster with workers group running [AWS Bottlerocket OS](https://github.com/bottlerocket-os/bottlerocket)
+
+This is a minimalistic example which shows what knobs to turn to make Bottlerocket work.
+
+See [the official documentation](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami-bottlerocket.html) for more details.
+
+## Usage
+
+To run this example you need to execute:
+
+```bash
+$ terraform init
+$ terraform plan
+$ terraform apply
+```
+
+Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources.
+
+
+## Requirements
+
+| Name | Version |
+|------|---------|
+| [terraform](#requirement\_terraform) | >= 0.13.1 |
+| [aws](#requirement\_aws) | >= 3.56 |
+| [kubernetes](#requirement\_kubernetes) | >= 1.11.1 |
+| [local](#requirement\_local) | >= 1.4 |
+| [random](#requirement\_random) | >= 2.1 |
+| [tls](#requirement\_tls) | >= 2.0 |
+
+## Providers
+
+| Name | Version |
+|------|---------|
+| [aws](#provider\_aws) | >= 3.56 |
+| [random](#provider\_random) | >= 2.1 |
+| [tls](#provider\_tls) | >= 2.0 |
+
+## Modules
+
+| Name | Source | Version |
+|------|--------|---------|
+| [eks](#module\_eks) | ../.. | n/a |
+| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 |
+
+## Resources
+
+| Name | Type |
+|------|------|
+| [aws_iam_role_policy_attachment.ssm](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
+| [aws_key_pair.nodes](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/key_pair) | resource |
+| [random_string.suffix](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/string) | resource |
+| [tls_private_key.nodes](https://registry.terraform.io/providers/hashicorp/tls/latest/docs/resources/private_key) | resource |
+| [aws_ami.bottlerocket_ami](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source |
+| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source |
+| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source |
+| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
+| [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source |
+
+## Inputs
+
+No inputs.
+
+## Outputs
+
+| Name | Description |
+|------|-------------|
+| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for EKS control plane. |
+| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Security group ids attached to the cluster control plane. |
+| [config\_map\_aws\_auth](#output\_config\_map\_aws\_auth) | A kubernetes configuration to authenticate to this EKS cluster. |
+| [kubectl\_config](#output\_kubectl\_config) | kubectl config as generated by the module. |
+| [node\_groups](#output\_node\_groups) | Outputs from node groups |
+
diff --git a/examples/bottlerocket/versions.tf b/examples/bottlerocket/versions.tf
new file mode 100644
index 0000000000..cb5115c487
--- /dev/null
+++ b/examples/bottlerocket/versions.tf
@@ -0,0 +1,26 @@
+terraform {
+ required_version = ">= 0.13.1"
+
+ required_providers {
+ aws = {
+ source = "hashicorp/aws"
+ version = ">= 3.56"
+ }
+ local = {
+ source = "hashicorp/local"
+ version = ">= 1.4"
+ }
+ kubernetes = {
+ source = "hashicorp/kubernetes"
+ version = ">= 1.11.1"
+ }
+ random = {
+ source = "hashicorp/random"
+ version = ">= 2.1"
+ }
+ tls = {
+ source = "hashicorp/tls"
+ version = ">= 2.0"
+ }
+ }
+}
diff --git a/examples/complete/README.md b/examples/complete/README.md
index 8ab968f24b..e869b17be0 100644
--- a/examples/complete/README.md
+++ b/examples/complete/README.md
@@ -36,17 +36,13 @@ Note that this example may create resources which cost money. Run `terraform des
| [terraform](#requirement\_terraform) | >= 0.13.1 |
<<<<<<< HEAD
| [aws](#requirement\_aws) | >= 3.64 |
-<<<<<<< HEAD
-| [http](#requirement\_http) | >= 2.4.1 |
+| [null](#requirement\_null) | >= 3.0 |
=======
| [aws](#requirement\_aws) | >= 3.56 |
| [kubernetes](#requirement\_kubernetes) | >= 1.11.1 |
| [local](#requirement\_local) | >= 1.4 |
| [random](#requirement\_random) | >= 2.1 |
>>>>>>> b876ff9 (fix: update CI/CD process to enable auto-release workflow (#1698))
-=======
-| [null](#requirement\_null) | >= 3.0 |
->>>>>>> e831206 (feat: add additional resources, outputs for aws-auth configmap)
## Providers
@@ -54,14 +50,11 @@ Note that this example may create resources which cost money. Run `terraform des
|------|---------|
<<<<<<< HEAD
| [aws](#provider\_aws) | >= 3.64 |
-<<<<<<< HEAD
+| [null](#provider\_null) | >= 3.0 |
=======
| [aws](#provider\_aws) | >= 3.56 |
| [random](#provider\_random) | >= 2.1 |
>>>>>>> b876ff9 (fix: update CI/CD process to enable auto-release workflow (#1698))
-=======
-| [null](#provider\_null) | >= 3.0 |
->>>>>>> e831206 (feat: add additional resources, outputs for aws-auth configmap)
## Modules
@@ -72,14 +65,15 @@ Note that this example may create resources which cost money. Run `terraform des
| [disabled\_eks\_managed\_node\_group](#module\_disabled\_eks\_managed\_node\_group) | ../../modules/eks-managed-node-group | n/a |
| [disabled\_fargate\_profile](#module\_disabled\_fargate\_profile) | ../../modules/fargate-profile | n/a |
| [disabled\_self\_managed\_node\_group](#module\_disabled\_self\_managed\_node\_group) | ../../modules/self-managed-node-group | n/a |
-=======
-| [disabled\_fargate](#module\_disabled\_fargate) | ../../modules/fargate | n/a |
-| [disabled\_node\_groups](#module\_disabled\_node\_groups) | ../../modules/node_groups | n/a |
->>>>>>> b876ff9 (fix: update CI/CD process to enable auto-release workflow (#1698))
| [eks](#module\_eks) | ../.. | n/a |
| [eks\_managed\_node\_group](#module\_eks\_managed\_node\_group) | ../../modules/eks-managed-node-group | n/a |
| [fargate\_profile](#module\_fargate\_profile) | ../../modules/fargate-profile | n/a |
| [self\_managed\_node\_group](#module\_self\_managed\_node\_group) | ../../modules/self-managed-node-group | n/a |
+=======
+| [disabled\_fargate](#module\_disabled\_fargate) | ../../modules/fargate | n/a |
+| [disabled\_node\_groups](#module\_disabled\_node\_groups) | ../../modules/node_groups | n/a |
+| [eks](#module\_eks) | ../.. | n/a |
+>>>>>>> b876ff9 (fix: update CI/CD process to enable auto-release workflow (#1698))
| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 |
## Resources
diff --git a/examples/complete/versions.tf b/examples/complete/versions.tf
index 142c367bee..ee9839d895 100644
--- a/examples/complete/versions.tf
+++ b/examples/complete/versions.tf
@@ -7,10 +7,9 @@ terraform {
<<<<<<< HEAD
version = ">= 3.64"
}
-<<<<<<< HEAD
- http = {
- source = "terraform-aws-modules/http"
- version = ">= 2.4.1"
+ null = {
+ source = "hashicorp/null"
+ version = ">= 3.0"
=======
version = ">= 3.56"
}
@@ -26,11 +25,6 @@ terraform {
source = "hashicorp/random"
version = ">= 2.1"
>>>>>>> b876ff9 (fix: update CI/CD process to enable auto-release workflow (#1698))
-=======
- null = {
- source = "hashicorp/null"
- version = ">= 3.0"
->>>>>>> e831206 (feat: add additional resources, outputs for aws-auth configmap)
}
}
}
diff --git a/examples/fargate/README.md b/examples/fargate/README.md
new file mode 100644
index 0000000000..1228f5c4c1
--- /dev/null
+++ b/examples/fargate/README.md
@@ -0,0 +1,69 @@
+# AWS EKS Cluster with Fargate profiles
+
+Configuration in this directory creates EKS cluster with Fargate profiles in two different ways:
+
+- Using a root module, where EKS Cluster and Fargate profiles should be created at once. This is the default behaviour for most users.
+- Using `modules/fargate` submodule where Fargate profiles should be attached to the existing EKS Cluster.
+
+## Usage
+
+To run this example you need to execute:
+
+```bash
+$ terraform init
+$ terraform plan
+$ terraform apply
+```
+
+Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources.
+
+
+## Requirements
+
+| Name | Version |
+|------|---------|
+| [terraform](#requirement\_terraform) | >= 0.13.1 |
+| [aws](#requirement\_aws) | >= 3.56 |
+| [kubernetes](#requirement\_kubernetes) | >= 1.11.1 |
+| [local](#requirement\_local) | >= 1.4 |
+| [random](#requirement\_random) | >= 2.1 |
+| [tls](#requirement\_tls) | >= 2.0 |
+
+## Providers
+
+| Name | Version |
+|------|---------|
+| [aws](#provider\_aws) | >= 3.56 |
+| [random](#provider\_random) | >= 2.1 |
+
+## Modules
+
+| Name | Source | Version |
+|------|--------|---------|
+| [eks](#module\_eks) | ../.. | n/a |
+| [fargate\_profile\_existing\_cluster](#module\_fargate\_profile\_existing\_cluster) | ../../modules/fargate | n/a |
+| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 |
+
+## Resources
+
+| Name | Type |
+|------|------|
+| [random_string.suffix](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/string) | resource |
+| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source |
+| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source |
+| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
+
+## Inputs
+
+No inputs.
+
+## Outputs
+
+| Name | Description |
+|------|-------------|
+| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for EKS control plane. |
+| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Security group ids attached to the cluster control plane. |
+| [config\_map\_aws\_auth](#output\_config\_map\_aws\_auth) | A kubernetes configuration to authenticate to this EKS cluster. |
+| [fargate\_profile\_arns](#output\_fargate\_profile\_arns) | Outputs from node groups |
+| [kubectl\_config](#output\_kubectl\_config) | kubectl config as generated by the module. |
+
diff --git a/examples/fargate/versions.tf b/examples/fargate/versions.tf
new file mode 100644
index 0000000000..cb5115c487
--- /dev/null
+++ b/examples/fargate/versions.tf
@@ -0,0 +1,26 @@
+terraform {
+ required_version = ">= 0.13.1"
+
+ required_providers {
+ aws = {
+ source = "hashicorp/aws"
+ version = ">= 3.56"
+ }
+ local = {
+ source = "hashicorp/local"
+ version = ">= 1.4"
+ }
+ kubernetes = {
+ source = "hashicorp/kubernetes"
+ version = ">= 1.11.1"
+ }
+ random = {
+ source = "hashicorp/random"
+ version = ">= 2.1"
+ }
+ tls = {
+ source = "hashicorp/tls"
+ version = ">= 2.0"
+ }
+ }
+}
diff --git a/examples/fargate_profile/README.md b/examples/fargate_profile/README.md
index 3f6c00116b..9ff7b03a4c 100644
--- a/examples/fargate_profile/README.md
+++ b/examples/fargate_profile/README.md
@@ -20,42 +20,19 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Version |
|------|---------|
| [terraform](#requirement\_terraform) | >= 0.13.1 |
-<<<<<<< HEAD
| [aws](#requirement\_aws) | >= 3.64 |
## Providers
-<<<<<<< HEAD
-No providers.
-=======
-| [aws](#requirement\_aws) | >= 3.56 |
-| [kubernetes](#requirement\_kubernetes) | >= 1.11.1 |
-| [local](#requirement\_local) | >= 1.4 |
-| [random](#requirement\_random) | >= 2.1 |
-| [tls](#requirement\_tls) | >= 2.0 |
-
-## Providers
-
-| Name | Version |
-|------|---------|
-| [aws](#provider\_aws) | >= 3.56 |
-| [random](#provider\_random) | >= 2.1 |
->>>>>>> b876ff9 (fix: update CI/CD process to enable auto-release workflow (#1698))
-=======
| Name | Version |
|------|---------|
| [aws](#provider\_aws) | >= 3.64 |
->>>>>>> 43280e5 (refactor: splitting out user data to internal module for better testing/validation)
## Modules
| Name | Source | Version |
|------|--------|---------|
| [eks](#module\_eks) | ../.. | n/a |
-<<<<<<< HEAD
-=======
-| [fargate\_profile\_existing\_cluster](#module\_fargate\_profile\_existing\_cluster) | ../../modules/fargate | n/a |
->>>>>>> b876ff9 (fix: update CI/CD process to enable auto-release workflow (#1698))
| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 |
## Resources
diff --git a/examples/fargate_profile/versions.tf b/examples/fargate_profile/versions.tf
index 6387693545..bfce6ae345 100644
--- a/examples/fargate_profile/versions.tf
+++ b/examples/fargate_profile/versions.tf
@@ -4,27 +4,7 @@ terraform {
required_providers {
aws = {
source = "hashicorp/aws"
-<<<<<<< HEAD
version = ">= 3.64"
-=======
- version = ">= 3.56"
- }
- local = {
- source = "hashicorp/local"
- version = ">= 1.4"
- }
- kubernetes = {
- source = "hashicorp/kubernetes"
- version = ">= 1.11.1"
- }
- random = {
- source = "hashicorp/random"
- version = ">= 2.1"
- }
- tls = {
- source = "hashicorp/tls"
- version = ">= 2.0"
->>>>>>> b876ff9 (fix: update CI/CD process to enable auto-release workflow (#1698))
}
}
}
diff --git a/examples/instance_refresh/README.md b/examples/instance_refresh/README.md
new file mode 100644
index 0000000000..cc558693ae
--- /dev/null
+++ b/examples/instance_refresh/README.md
@@ -0,0 +1,82 @@
+# Instance refresh example
+
+This is EKS example using [instance refresh](https://aws.amazon.com/blogs/compute/introducing-instance-refresh-for-ec2-auto-scaling/) feature for worker groups.
+
+See [the official documentation](https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-instance-refresh.html) for more details.
+
+## Usage
+
+To run this example you need to execute:
+
+```bash
+$ terraform init
+$ terraform plan
+$ terraform apply
+```
+
+Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources.
+
+
+## Requirements
+
+| Name | Version |
+|------|---------|
+| [terraform](#requirement\_terraform) | >= 0.13.1 |
+| [aws](#requirement\_aws) | >= 3.56 |
+| [helm](#requirement\_helm) | >= 2.0 |
+| [kubernetes](#requirement\_kubernetes) | >= 1.11.1 |
+| [local](#requirement\_local) | >= 1.4 |
+| [random](#requirement\_random) | >= 2.1 |
+
+## Providers
+
+| Name | Version |
+|------|---------|
+| [aws](#provider\_aws) | >= 3.56 |
+| [helm](#provider\_helm) | >= 2.0 |
+| [random](#provider\_random) | >= 2.1 |
+
+## Modules
+
+| Name | Source | Version |
+|------|--------|---------|
+| [aws\_node\_termination\_handler\_role](#module\_aws\_node\_termination\_handler\_role) | terraform-aws-modules/iam/aws//modules/iam-assumable-role-with-oidc | 4.1.0 |
+| [aws\_node\_termination\_handler\_sqs](#module\_aws\_node\_termination\_handler\_sqs) | terraform-aws-modules/sqs/aws | ~> 3.0.0 |
+| [eks](#module\_eks) | ../.. | n/a |
+| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 |
+
+## Resources
+
+| Name | Type |
+|------|------|
+| [aws_autoscaling_lifecycle_hook.aws_node_termination_handler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/autoscaling_lifecycle_hook) | resource |
+| [aws_cloudwatch_event_rule.aws_node_termination_handler_asg](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_rule) | resource |
+| [aws_cloudwatch_event_rule.aws_node_termination_handler_spot](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_rule) | resource |
+| [aws_cloudwatch_event_target.aws_node_termination_handler_asg](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_target) | resource |
+| [aws_cloudwatch_event_target.aws_node_termination_handler_spot](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_target) | resource |
+| [aws_iam_policy.aws_node_termination_handler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
+| [helm_release.aws_node_termination_handler](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource |
+| [random_string.suffix](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/string) | resource |
+| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source |
+| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source |
+| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source |
+| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
+| [aws_iam_policy_document.aws_node_termination_handler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
+| [aws_iam_policy_document.aws_node_termination_handler_events](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
+| [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source |
+
+## Inputs
+
+No inputs.
+
+## Outputs
+
+| Name | Description |
+|------|-------------|
+| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for EKS control plane. |
+| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Security group ids attached to the cluster control plane. |
+| [config\_map\_aws\_auth](#output\_config\_map\_aws\_auth) | A kubernetes configuration to authenticate to this EKS cluster. |
+| [kubectl\_config](#output\_kubectl\_config) | kubectl config as generated by the module. |
+| [sqs\_queue\_asg\_notification\_arn](#output\_sqs\_queue\_asg\_notification\_arn) | SQS queue ASG notification ARN |
+| [sqs\_queue\_asg\_notification\_url](#output\_sqs\_queue\_asg\_notification\_url) | SQS queue ASG notification URL |
+
diff --git a/examples/instance_refresh/versions.tf b/examples/instance_refresh/versions.tf
new file mode 100644
index 0000000000..67c2d66a82
--- /dev/null
+++ b/examples/instance_refresh/versions.tf
@@ -0,0 +1,26 @@
+terraform {
+ required_version = ">= 0.13.1"
+
+ required_providers {
+ aws = {
+ source = "hashicorp/aws"
+ version = ">= 3.56"
+ }
+ local = {
+ source = "hashicorp/local"
+ version = ">= 1.4"
+ }
+ kubernetes = {
+ source = "hashicorp/kubernetes"
+ version = ">= 1.11.1"
+ }
+ random = {
+ source = "hashicorp/random"
+ version = ">= 2.1"
+ }
+ helm = {
+ source = "hashicorp/helm"
+ version = ">= 2.0"
+ }
+ }
+}
diff --git a/examples/irsa/README.md b/examples/irsa/README.md
index d4dd0f0eac..137f3d63d8 100644
--- a/examples/irsa/README.md
+++ b/examples/irsa/README.md
@@ -22,29 +22,19 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Version |
|------|---------|
| [terraform](#requirement\_terraform) | >= 0.13.1 |
-<<<<<<< HEAD
-| [aws](#requirement\_aws) | >= 3.64 |
-| [helm](#requirement\_helm) | >= 2.0 |
-=======
| [aws](#requirement\_aws) | >= 3.56 |
| [helm](#requirement\_helm) | >= 2.0 |
| [kubernetes](#requirement\_kubernetes) | >= 1.11.1 |
| [local](#requirement\_local) | >= 1.4 |
| [random](#requirement\_random) | >= 2.1 |
->>>>>>> b876ff9 (fix: update CI/CD process to enable auto-release workflow (#1698))
## Providers
| Name | Version |
|------|---------|
-<<<<<<< HEAD
-| [aws](#provider\_aws) | >= 3.64 |
-| [helm](#provider\_helm) | >= 2.0 |
-=======
| [aws](#provider\_aws) | >= 3.56 |
| [helm](#provider\_helm) | >= 2.0 |
| [random](#provider\_random) | >= 2.1 |
->>>>>>> b876ff9 (fix: update CI/CD process to enable auto-release workflow (#1698))
## Modules
@@ -59,9 +49,14 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Type |
|------|------|
| [aws_iam_policy.cluster_autoscaler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
-| [helm_release.cluster_autoscaler](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource |
+| [helm_release.cluster-autoscaler](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource |
+| [random_string.suffix](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/string) | resource |
+| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source |
+| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source |
+| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source |
| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
| [aws_iam_policy_document.cluster_autoscaler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
+| [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source |
## Inputs
@@ -71,28 +66,5 @@ No inputs.
| Name | Description |
|------|-------------|
-| [aws\_auth\_configmap\_yaml](#output\_aws\_auth\_configmap\_yaml) | Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles |
-| [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created |
-| [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created |
-| [cluster\_addons](#output\_cluster\_addons) | Map of attribute maps for all EKS cluster addons enabled |
-| [cluster\_arn](#output\_cluster\_arn) | The Amazon Resource Name (ARN) of the cluster |
-| [cluster\_certificate\_authority\_data](#output\_cluster\_certificate\_authority\_data) | Base64 encoded certificate data required to communicate with the cluster |
-| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for your Kubernetes API server |
-| [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster |
-| [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster |
-| [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role |
-| [cluster\_id](#output\_cluster\_id) | The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready |
-| [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled |
-| [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider |
-| [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster |
-| [cluster\_primary\_security\_group\_id](#output\_cluster\_primary\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console |
-| [cluster\_security\_group\_arn](#output\_cluster\_security\_group\_arn) | Amazon Resource Name (ARN) of the cluster security group |
-| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | ID of the cluster security group |
-| [cluster\_status](#output\_cluster\_status) | Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED` |
-| [eks\_managed\_node\_groups](#output\_eks\_managed\_node\_groups) | Map of attribute maps for all EKS managed node groups created |
-| [fargate\_profiles](#output\_fargate\_profiles) | Map of attribute maps for all EKS Fargate Profiles created |
-| [node\_security\_group\_arn](#output\_node\_security\_group\_arn) | Amazon Resource Name (ARN) of the node shared security group |
-| [node\_security\_group\_id](#output\_node\_security\_group\_id) | ID of the node shared security group |
-| [oidc\_provider\_arn](#output\_oidc\_provider\_arn) | The ARN of the OIDC Provider if `enable_irsa = true` |
-| [self\_managed\_node\_groups](#output\_self\_managed\_node\_groups) | Map of attribute maps for all self managed node groups created |
+| [aws\_account\_id](#output\_aws\_account\_id) | IAM AWS account id |
diff --git a/examples/irsa/versions.tf b/examples/irsa/versions.tf
index 6d593dd4b9..67c2d66a82 100644
--- a/examples/irsa/versions.tf
+++ b/examples/irsa/versions.tf
@@ -4,9 +4,6 @@ terraform {
required_providers {
aws = {
source = "hashicorp/aws"
-<<<<<<< HEAD
- version = ">= 3.64"
-=======
version = ">= 3.56"
}
local = {
@@ -20,7 +17,6 @@ terraform {
random = {
source = "hashicorp/random"
version = ">= 2.1"
->>>>>>> b876ff9 (fix: update CI/CD process to enable auto-release workflow (#1698))
}
helm = {
source = "hashicorp/helm"
diff --git a/examples/irsa_autoscale_refresh/README.md b/examples/irsa_autoscale_refresh/README.md
index ba03602b0d..a21f31f6d3 100644
--- a/examples/irsa_autoscale_refresh/README.md
+++ b/examples/irsa_autoscale_refresh/README.md
@@ -24,64 +24,24 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Version |
|------|---------|
| [terraform](#requirement\_terraform) | >= 0.13.1 |
-<<<<<<< HEAD
| [aws](#requirement\_aws) | >= 3.64 |
| [helm](#requirement\_helm) | >= 2.0 |
-<<<<<<< HEAD:examples/karpenter/README.md
-=======
-| [aws](#requirement\_aws) | >= 3.56 |
-| [helm](#requirement\_helm) | >= 2.0 |
-| [kubernetes](#requirement\_kubernetes) | >= 1.11.1 |
-| [local](#requirement\_local) | >= 1.4 |
-| [random](#requirement\_random) | >= 2.1 |
->>>>>>> b876ff9 (fix: update CI/CD process to enable auto-release workflow (#1698))
-=======
| [null](#requirement\_null) | >= 3.0 |
->>>>>>> fb3eb35 (chore: remove karpenter, back to instance refresh and node termination handler):examples/irsa_autoscale_refresh/README.md
## Providers
| Name | Version |
|------|---------|
-<<<<<<< HEAD
| [aws](#provider\_aws) | >= 3.64 |
| [helm](#provider\_helm) | >= 2.0 |
-<<<<<<< HEAD:examples/karpenter/README.md
-<<<<<<< HEAD:examples/custom/README.md
-<<<<<<< HEAD:examples/instance_refresh/README.md
-=======
-| [aws](#provider\_aws) | >= 3.56 |
-| [helm](#provider\_helm) | >= 2.0 |
-| [random](#provider\_random) | >= 2.1 |
->>>>>>> b876ff9 (fix: update CI/CD process to enable auto-release workflow (#1698))
-=======
-| [null](#provider\_null) | n/a |
->>>>>>> bc35987 (chore: updating custom example):examples/custom/README.md
-=======
->>>>>>> 84f3af3 (chore: ugh, just work already):examples/karpenter/README.md
-=======
| [null](#provider\_null) | >= 3.0 |
->>>>>>> fb3eb35 (chore: remove karpenter, back to instance refresh and node termination handler):examples/irsa_autoscale_refresh/README.md
## Modules
| Name | Source | Version |
|------|--------|---------|
-<<<<<<< HEAD:examples/karpenter/README.md
-<<<<<<< HEAD:examples/instance_refresh/README.md
-<<<<<<< HEAD
-| [aws\_node\_termination\_handler\_role](#module\_aws\_node\_termination\_handler\_role) | terraform-aws-modules/iam/aws//modules/iam-assumable-role-with-oidc | ~> 4.0 |
-| [aws\_node\_termination\_handler\_sqs](#module\_aws\_node\_termination\_handler\_sqs) | terraform-aws-modules/sqs/aws | ~> 3.0 |
-=======
-| [aws\_node\_termination\_handler\_role](#module\_aws\_node\_termination\_handler\_role) | terraform-aws-modules/iam/aws//modules/iam-assumable-role-with-oidc | 4.1.0 |
-| [aws\_node\_termination\_handler\_sqs](#module\_aws\_node\_termination\_handler\_sqs) | terraform-aws-modules/sqs/aws | ~> 3.0.0 |
->>>>>>> b876ff9 (fix: update CI/CD process to enable auto-release workflow (#1698))
-=======
->>>>>>> bc35987 (chore: updating custom example):examples/custom/README.md
-=======
| [aws\_node\_termination\_handler\_role](#module\_aws\_node\_termination\_handler\_role) | terraform-aws-modules/iam/aws//modules/iam-assumable-role-with-oidc | ~> 4.0 |
| [aws\_node\_termination\_handler\_sqs](#module\_aws\_node\_termination\_handler\_sqs) | terraform-aws-modules/sqs/aws | ~> 3.0 |
->>>>>>> fb3eb35 (chore: remove karpenter, back to instance refresh and node termination handler):examples/irsa_autoscale_refresh/README.md
| [eks](#module\_eks) | ../.. | n/a |
| [iam\_assumable\_role\_cluster\_autoscaler](#module\_iam\_assumable\_role\_cluster\_autoscaler) | terraform-aws-modules/iam/aws//modules/iam-assumable-role-with-oidc | ~> 4.0 |
| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 |
diff --git a/examples/irsa_autoscale_refresh/versions.tf b/examples/irsa_autoscale_refresh/versions.tf
index 5229f4454e..4706dec92a 100644
--- a/examples/irsa_autoscale_refresh/versions.tf
+++ b/examples/irsa_autoscale_refresh/versions.tf
@@ -4,23 +4,7 @@ terraform {
required_providers {
aws = {
source = "hashicorp/aws"
-<<<<<<< HEAD
version = ">= 3.64"
-=======
- version = ">= 3.56"
- }
- local = {
- source = "hashicorp/local"
- version = ">= 1.4"
- }
- kubernetes = {
- source = "hashicorp/kubernetes"
- version = ">= 1.11.1"
- }
- random = {
- source = "hashicorp/random"
- version = ">= 2.1"
->>>>>>> b876ff9 (fix: update CI/CD process to enable auto-release workflow (#1698))
}
null = {
source = "hashicorp/null"
diff --git a/examples/secrets_encryption/README.md b/examples/secrets_encryption/README.md
index 133b4316c7..f5f38b0498 100644
--- a/examples/secrets_encryption/README.md
+++ b/examples/secrets_encryption/README.md
@@ -22,25 +22,17 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Version |
|------|---------|
| [terraform](#requirement\_terraform) | >= 0.13.1 |
-<<<<<<< HEAD
-| [aws](#requirement\_aws) | >= 3.64 |
-=======
| [aws](#requirement\_aws) | >= 3.56 |
| [kubernetes](#requirement\_kubernetes) | >= 1.11.1 |
| [local](#requirement\_local) | >= 1.4 |
| [random](#requirement\_random) | >= 2.1 |
->>>>>>> b876ff9 (fix: update CI/CD process to enable auto-release workflow (#1698))
## Providers
| Name | Version |
|------|---------|
-<<<<<<< HEAD
-| [aws](#provider\_aws) | >= 3.64 |
-=======
| [aws](#provider\_aws) | >= 3.56 |
| [random](#provider\_random) | >= 2.1 |
->>>>>>> b876ff9 (fix: update CI/CD process to enable auto-release workflow (#1698))
## Modules
@@ -54,6 +46,10 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Type |
|------|------|
| [aws_kms_key.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource |
+| [random_string.suffix](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/string) | resource |
+| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source |
+| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source |
+| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
## Inputs
@@ -63,28 +59,8 @@ No inputs.
| Name | Description |
|------|-------------|
-| [aws\_auth\_configmap\_yaml](#output\_aws\_auth\_configmap\_yaml) | Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles |
-| [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created |
-| [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created |
-| [cluster\_addons](#output\_cluster\_addons) | Map of attribute maps for all EKS cluster addons enabled |
-| [cluster\_arn](#output\_cluster\_arn) | The Amazon Resource Name (ARN) of the cluster |
-| [cluster\_certificate\_authority\_data](#output\_cluster\_certificate\_authority\_data) | Base64 encoded certificate data required to communicate with the cluster |
-| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for your Kubernetes API server |
-| [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster |
-| [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster |
-| [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role |
-| [cluster\_id](#output\_cluster\_id) | The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready |
-| [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled |
-| [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider |
-| [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster |
-| [cluster\_primary\_security\_group\_id](#output\_cluster\_primary\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console |
-| [cluster\_security\_group\_arn](#output\_cluster\_security\_group\_arn) | Amazon Resource Name (ARN) of the cluster security group |
-| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | ID of the cluster security group |
-| [cluster\_status](#output\_cluster\_status) | Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED` |
-| [eks\_managed\_node\_groups](#output\_eks\_managed\_node\_groups) | Map of attribute maps for all EKS managed node groups created |
-| [fargate\_profiles](#output\_fargate\_profiles) | Map of attribute maps for all EKS Fargate Profiles created |
-| [node\_security\_group\_arn](#output\_node\_security\_group\_arn) | Amazon Resource Name (ARN) of the node shared security group |
-| [node\_security\_group\_id](#output\_node\_security\_group\_id) | ID of the node shared security group |
-| [oidc\_provider\_arn](#output\_oidc\_provider\_arn) | The ARN of the OIDC Provider if `enable_irsa = true` |
-| [self\_managed\_node\_groups](#output\_self\_managed\_node\_groups) | Map of attribute maps for all self managed node groups created |
+| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for EKS control plane. |
+| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Security group ids attached to the cluster control plane. |
+| [config\_map\_aws\_auth](#output\_config\_map\_aws\_auth) | A kubernetes configuration to authenticate to this EKS cluster. |
+| [kubectl\_config](#output\_kubectl\_config) | kubectl config as generated by the module. |
diff --git a/examples/secrets_encryption/versions.tf b/examples/secrets_encryption/versions.tf
index 062b8f1b5e..8e2b837984 100644
--- a/examples/secrets_encryption/versions.tf
+++ b/examples/secrets_encryption/versions.tf
@@ -4,9 +4,6 @@ terraform {
required_providers {
aws = {
source = "hashicorp/aws"
-<<<<<<< HEAD
- version = ">= 3.64"
-=======
version = ">= 3.56"
}
local = {
@@ -20,7 +17,6 @@ terraform {
random = {
source = "hashicorp/random"
version = ">= 2.1"
->>>>>>> b876ff9 (fix: update CI/CD process to enable auto-release workflow (#1698))
}
}
}
diff --git a/examples/self_managed_node_group/README.md b/examples/self_managed_node_group/README.md
index b9c37150d5..ff13d8148a 100644
--- a/examples/self_managed_node_group/README.md
+++ b/examples/self_managed_node_group/README.md
@@ -26,31 +26,17 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Version |
|------|---------|
| [terraform](#requirement\_terraform) | >= 0.13.1 |
-<<<<<<< HEAD
| [aws](#requirement\_aws) | >= 3.64 |
| [null](#requirement\_null) | >= 3.0 |
| [tls](#requirement\_tls) | >= 2.2 |
-=======
-| [aws](#requirement\_aws) | >= 3.56 |
-| [kubernetes](#requirement\_kubernetes) | >= 1.11.1 |
-| [local](#requirement\_local) | >= 1.4 |
-| [random](#requirement\_random) | >= 2.1 |
-| [tls](#requirement\_tls) | >= 2.0 |
->>>>>>> b876ff9 (fix: update CI/CD process to enable auto-release workflow (#1698))
## Providers
| Name | Version |
|------|---------|
-<<<<<<< HEAD
| [aws](#provider\_aws) | >= 3.64 |
| [null](#provider\_null) | >= 3.0 |
| [tls](#provider\_tls) | >= 2.2 |
-=======
-| [aws](#provider\_aws) | >= 3.56 |
-| [random](#provider\_random) | >= 2.1 |
-| [tls](#provider\_tls) | >= 2.0 |
->>>>>>> b876ff9 (fix: update CI/CD process to enable auto-release workflow (#1698))
## Modules
diff --git a/examples/self_managed_node_group/versions.tf b/examples/self_managed_node_group/versions.tf
index 48001da51b..883963f7b0 100644
--- a/examples/self_managed_node_group/versions.tf
+++ b/examples/self_managed_node_group/versions.tf
@@ -4,7 +4,6 @@ terraform {
required_providers {
aws = {
source = "hashicorp/aws"
-<<<<<<< HEAD
version = ">= 3.64"
}
null = {
@@ -14,25 +13,6 @@ terraform {
tls = {
source = "hashicorp/tls"
version = ">= 2.2"
-=======
- version = ">= 3.56"
- }
- local = {
- source = "hashicorp/local"
- version = ">= 1.4"
- }
- kubernetes = {
- source = "hashicorp/kubernetes"
- version = ">= 1.11.1"
- }
- random = {
- source = "hashicorp/random"
- version = ">= 2.1"
- }
- tls = {
- source = "hashicorp/tls"
- version = ">= 2.0"
->>>>>>> b876ff9 (fix: update CI/CD process to enable auto-release workflow (#1698))
}
}
}
From 0c2a270abc4599639743ee888da4310b982250fc Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Mon, 13 Dec 2021 10:37:20 -0500
Subject: [PATCH 82/83] fix: propagate service IPV4 CIDR down to AWS EKS
optimized AMI user data to pull correct cluster DNS IP via bootstrap script
---
examples/eks_managed_node_group/main.tf | 1 +
examples/user_data/main.tf | 12 +++++++-----
modules/_user_data/README.md | 1 +
modules/_user_data/main.tf | 9 ++++++---
modules/_user_data/variables.tf | 6 ++++++
modules/eks-managed-node-group/README.md | 1 +
modules/eks-managed-node-group/main.tf | 2 ++
modules/eks-managed-node-group/variables.tf | 6 ++++++
node_groups.tf | 9 +++++----
templates/linux_user_data.tpl | 3 +++
10 files changed, 38 insertions(+), 12 deletions(-)
diff --git a/examples/eks_managed_node_group/main.tf b/examples/eks_managed_node_group/main.tf
index 83db0fe3a9..fb801c1eb1 100644
--- a/examples/eks_managed_node_group/main.tf
+++ b/examples/eks_managed_node_group/main.tf
@@ -25,6 +25,7 @@ module "eks" {
cluster_name = local.name
cluster_version = local.cluster_version
+ cluster_service_ipv4_cidr = "172.16.0.0/16"
cluster_endpoint_private_access = true
cluster_endpoint_public_access = true
diff --git a/examples/user_data/main.tf b/examples/user_data/main.tf
index 9c572b42ab..4e961a3947 100644
--- a/examples/user_data/main.tf
+++ b/examples/user_data/main.tf
@@ -1,8 +1,9 @@
locals {
name = "ex-${replace(basename(path.cwd), "_", "-")}"
- cluster_endpoint = "https://012345678903AB2BAE5D1E0BFE0E2B50.gr7.us-east-1.eks.amazonaws.com"
- cluster_auth_base64 = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKbXFqQ1VqNGdGR2w3ZW5PeWthWnZ2RjROOTVOUEZCM2o0cGhVZUsrWGFtN2ZSQnZya0d6OGxKZmZEZWF2b2plTwpQK2xOZFlqdHZncmxCUEpYdHZIZmFzTzYxVzdIZmdWQ2EvamdRM2w3RmkvL1dpQmxFOG9oWUZkdWpjc0s1SXM2CnNkbk5KTTNYUWN2TysrSitkV09NT2ZlNzlsSWdncmdQLzgvRU9CYkw3eUY1aU1hS3lsb1RHL1V3TlhPUWt3ZUcKblBNcjdiUmdkQ1NCZTlXYXowOGdGRmlxV2FOditsTDhsODBTdFZLcWVNVlUxbjQyejVwOVpQRTd4T2l6L0xTNQpYV2lXWkVkT3pMN0xBWGVCS2gzdkhnczFxMkI2d1BKZnZnS1NzWllQRGFpZTloT1NNOUJkNFNPY3JrZTRYSVBOCkVvcXVhMlYrUDRlTWJEQzhMUkVWRDdCdVZDdWdMTldWOTBoL3VJUy9WU2VOcEdUOGVScE5DakszSjc2aFlsWm8KWjNGRG5QWUY0MWpWTHhiOXF0U1ROdEp6amYwWXBEYnFWci9xZzNmQWlxbVorMzd3YWM1eHlqMDZ4cmlaRUgzZgpUM002d2lCUEVHYVlGeWN5TmNYTk5aYW9DWDJVL0N1d2JsUHAKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ=="
+ cluster_endpoint = "https://012345678903AB2BAE5D1E0BFE0E2B50.gr7.us-east-1.eks.amazonaws.com"
+ cluster_auth_base64 = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKbXFqQ1VqNGdGR2w3ZW5PeWthWnZ2RjROOTVOUEZCM2o0cGhVZUsrWGFtN2ZSQnZya0d6OGxKZmZEZWF2b2plTwpQK2xOZFlqdHZncmxCUEpYdHZIZmFzTzYxVzdIZmdWQ2EvamdRM2w3RmkvL1dpQmxFOG9oWUZkdWpjc0s1SXM2CnNkbk5KTTNYUWN2TysrSitkV09NT2ZlNzlsSWdncmdQLzgvRU9CYkw3eUY1aU1hS3lsb1RHL1V3TlhPUWt3ZUcKblBNcjdiUmdkQ1NCZTlXYXowOGdGRmlxV2FOditsTDhsODBTdFZLcWVNVlUxbjQyejVwOVpQRTd4T2l6L0xTNQpYV2lXWkVkT3pMN0xBWGVCS2gzdkhnczFxMkI2d1BKZnZnS1NzWllQRGFpZTloT1NNOUJkNFNPY3JrZTRYSVBOCkVvcXVhMlYrUDRlTWJEQzhMUkVWRDdCdVZDdWdMTldWOTBoL3VJUy9WU2VOcEdUOGVScE5DakszSjc2aFlsWm8KWjNGRG5QWUY0MWpWTHhiOXF0U1ROdEp6amYwWXBEYnFWci9xZzNmQWlxbVorMzd3YWM1eHlqMDZ4cmlaRUgzZgpUM002d2lCUEVHYVlGeWN5TmNYTk5aYW9DWDJVL0N1d2JsUHAKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ=="
+ cluster_service_ipv4_cidr = "172.16.0.0/16"
}
################################################################################
@@ -32,9 +33,10 @@ module "eks_mng_linux_additional" {
module "eks_mng_linux_custom_ami" {
source = "../../modules/_user_data"
- cluster_name = local.name
- cluster_endpoint = local.cluster_endpoint
- cluster_auth_base64 = local.cluster_auth_base64
+ cluster_name = local.name
+ cluster_endpoint = local.cluster_endpoint
+ cluster_auth_base64 = local.cluster_auth_base64
+ cluster_service_ipv4_cidr = local.cluster_service_ipv4_cidr
enable_bootstrap_user_data = true
diff --git a/modules/_user_data/README.md b/modules/_user_data/README.md
index 995071c155..f61500517e 100644
--- a/modules/_user_data/README.md
+++ b/modules/_user_data/README.md
@@ -106,6 +106,7 @@ No modules.
| [cluster\_auth\_base64](#input\_cluster\_auth\_base64) | Base64 encoded CA of associated EKS cluster | `string` | `""` | no |
| [cluster\_endpoint](#input\_cluster\_endpoint) | Endpoint of associated EKS cluster | `string` | `""` | no |
| [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster | `string` | `""` | no |
+| [cluster\_service\_ipv4\_cidr](#input\_cluster\_service\_ipv4\_cidr) | The CIDR block to assign Kubernetes service IP addresses from. If you don't specify a block, Kubernetes assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks | `string` | `null` | no |
| [create](#input\_create) | Determines whether to create user-data or not | `bool` | `true` | no |
| [enable\_bootstrap\_user\_data](#input\_enable\_bootstrap\_user\_data) | Determines whether the bootstrap configurations are populated within the user data template | `bool` | `false` | no |
| [is\_eks\_managed\_node\_group](#input\_is\_eks\_managed\_node\_group) | Determines whether the user data is used on nodes in an EKS managed node group. Used to determine if user data will be appended or not | `bool` | `true` | no |
diff --git a/modules/_user_data/main.tf b/modules/_user_data/main.tf
index b5adda2294..1d265bdb54 100644
--- a/modules/_user_data/main.tf
+++ b/modules/_user_data/main.tf
@@ -10,9 +10,10 @@ locals {
cluster_endpoint = var.cluster_endpoint
cluster_auth_base64 = var.cluster_auth_base64
# Optional
- bootstrap_extra_args = var.bootstrap_extra_args
- pre_bootstrap_user_data = var.pre_bootstrap_user_data
- post_bootstrap_user_data = var.post_bootstrap_user_data
+ cluster_service_ipv4_cidr = var.cluster_service_ipv4_cidr != null ? var.cluster_service_ipv4_cidr : ""
+ bootstrap_extra_args = var.bootstrap_extra_args
+ pre_bootstrap_user_data = var.pre_bootstrap_user_data
+ post_bootstrap_user_data = var.post_bootstrap_user_data
}
)) : ""
platform = {
@@ -27,6 +28,7 @@ locals {
cluster_endpoint = var.cluster_endpoint
cluster_auth_base64 = var.cluster_auth_base64
# Optional - is appended if using EKS managed node group without custom AMI
+ # cluster_service_ipv4_cidr = var.cluster_service_ipv4_cidr # Not supported yet: https://github.com/bottlerocket-os/bottlerocket/issues/1866
bootstrap_extra_args = var.bootstrap_extra_args
}
)) : ""
@@ -44,6 +46,7 @@ locals {
cluster_endpoint = var.cluster_endpoint
cluster_auth_base64 = var.cluster_auth_base64
# Optional - is appended if using EKS managed node group without custom AMI
+ # cluster_service_ipv4_cidr = var.cluster_service_ipv4_cidr # Not supported yet: https://github.com/awslabs/amazon-eks-ami/issues/805
bootstrap_extra_args = var.bootstrap_extra_args
pre_bootstrap_user_data = var.pre_bootstrap_user_data
post_bootstrap_user_data = var.post_bootstrap_user_data
diff --git a/modules/_user_data/variables.tf b/modules/_user_data/variables.tf
index f819580eee..232e1e883e 100644
--- a/modules/_user_data/variables.tf
+++ b/modules/_user_data/variables.tf
@@ -40,6 +40,12 @@ variable "cluster_auth_base64" {
default = ""
}
+variable "cluster_service_ipv4_cidr" {
+ description = "The CIDR block to assign Kubernetes service IP addresses from. If you don't specify a block, Kubernetes assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks"
+ type = string
+ default = null
+}
+
variable "pre_bootstrap_user_data" {
description = "User data that is injected into the user data script ahead of the EKS bootstrap script. Not used when `platform` = `bottlerocket`"
type = string
diff --git a/modules/eks-managed-node-group/README.md b/modules/eks-managed-node-group/README.md
index f339fcc94a..43e4a72bc3 100644
--- a/modules/eks-managed-node-group/README.md
+++ b/modules/eks-managed-node-group/README.md
@@ -92,6 +92,7 @@ module "eks_managed_node_group" {
| [cluster\_endpoint](#input\_cluster\_endpoint) | Endpoint of associated EKS cluster | `string` | `""` | no |
| [cluster\_name](#input\_cluster\_name) | Name of associated EKS cluster | `string` | `null` | no |
| [cluster\_security\_group\_id](#input\_cluster\_security\_group\_id) | Cluster control plane security group ID | `string` | `null` | no |
+| [cluster\_service\_ipv4\_cidr](#input\_cluster\_service\_ipv4\_cidr) | The CIDR block to assign Kubernetes service IP addresses from. If you don't specify a block, Kubernetes assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks | `string` | `null` | no |
| [cluster\_version](#input\_cluster\_version) | Kubernetes version. Defaults to EKS Cluster Kubernetes version | `string` | `null` | no |
| [cpu\_options](#input\_cpu\_options) | The CPU options for the instance | `map(string)` | `null` | no |
| [create](#input\_create) | Determines whether to create EKS managed node group or not | `bool` | `true` | no |
diff --git a/modules/eks-managed-node-group/main.tf b/modules/eks-managed-node-group/main.tf
index e93727f82c..e70e93a239 100644
--- a/modules/eks-managed-node-group/main.tf
+++ b/modules/eks-managed-node-group/main.tf
@@ -14,6 +14,8 @@ module "user_data" {
cluster_endpoint = var.cluster_endpoint
cluster_auth_base64 = var.cluster_auth_base64
+ cluster_service_ipv4_cidr = var.cluster_service_ipv4_cidr
+
enable_bootstrap_user_data = var.enable_bootstrap_user_data
pre_bootstrap_user_data = var.pre_bootstrap_user_data
post_bootstrap_user_data = var.post_bootstrap_user_data
diff --git a/modules/eks-managed-node-group/variables.tf b/modules/eks-managed-node-group/variables.tf
index 360071cc05..38d126f269 100644
--- a/modules/eks-managed-node-group/variables.tf
+++ b/modules/eks-managed-node-group/variables.tf
@@ -44,6 +44,12 @@ variable "cluster_auth_base64" {
default = ""
}
+variable "cluster_service_ipv4_cidr" {
+ description = "The CIDR block to assign Kubernetes service IP addresses from. If you don't specify a block, Kubernetes assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks"
+ type = string
+ default = null
+}
+
variable "pre_bootstrap_user_data" {
description = "User data that is injected into the user data script ahead of the EKS bootstrap script. Not used when `platform` = `bottlerocket`"
type = string
diff --git a/node_groups.tf b/node_groups.tf
index 00d7573fab..771fa60bf8 100644
--- a/node_groups.tf
+++ b/node_groups.tf
@@ -212,8 +212,9 @@ module "eks_managed_node_group" {
# User data
platform = try(each.value.platform, var.eks_managed_node_group_defaults.platform, "linux")
- cluster_endpoint = try(aws_eks_cluster.this[0].endpoint, var.eks_managed_node_group_defaults.cluster_endpoint, "")
- cluster_auth_base64 = try(aws_eks_cluster.this[0].certificate_authority[0].data, var.eks_managed_node_group_defaults.cluster_auth_base64, "")
+ cluster_endpoint = try(aws_eks_cluster.this[0].endpoint, "")
+ cluster_auth_base64 = try(aws_eks_cluster.this[0].certificate_authority[0].data, "")
+ cluster_service_ipv4_cidr = var.cluster_service_ipv4_cidr
enable_bootstrap_user_data = try(each.value.enable_bootstrap_user_data, var.eks_managed_node_group_defaults.enable_bootstrap_user_data, false)
pre_bootstrap_user_data = try(each.value.pre_bootstrap_user_data, var.eks_managed_node_group_defaults.pre_bootstrap_user_data, "")
post_bootstrap_user_data = try(each.value.post_bootstrap_user_data, var.eks_managed_node_group_defaults.post_bootstrap_user_data, "")
@@ -327,8 +328,8 @@ module "self_managed_node_group" {
# User data
platform = try(each.value.platform, var.self_managed_node_group_defaults.platform, "linux")
- cluster_endpoint = try(aws_eks_cluster.this[0].endpoint, var.self_managed_node_group_defaults.cluster_endpoint, "")
- cluster_auth_base64 = try(aws_eks_cluster.this[0].certificate_authority[0].data, var.self_managed_node_group_defaults.cluster_auth_base64, "")
+ cluster_endpoint = try(aws_eks_cluster.this[0].endpoint, "")
+ cluster_auth_base64 = try(aws_eks_cluster.this[0].certificate_authority[0].data, "")
pre_bootstrap_user_data = try(each.value.pre_bootstrap_user_data, var.self_managed_node_group_defaults.pre_bootstrap_user_data, "")
post_bootstrap_user_data = try(each.value.post_bootstrap_user_data, var.self_managed_node_group_defaults.post_bootstrap_user_data, "")
bootstrap_extra_args = try(each.value.bootstrap_extra_args, var.self_managed_node_group_defaults.bootstrap_extra_args, "")
diff --git a/templates/linux_user_data.tpl b/templates/linux_user_data.tpl
index d6f367984e..14acbd2aff 100644
--- a/templates/linux_user_data.tpl
+++ b/templates/linux_user_data.tpl
@@ -3,6 +3,9 @@
set -e
%{ endif ~}
${pre_bootstrap_user_data ~}
+%{ if length(cluster_service_ipv4_cidr) > 0 ~}
+export SERVICE_IPV4_CIDR=${cluster_service_ipv4_cidr}
+%{ endif ~}
%{ if enable_bootstrap_user_data ~}
B64_CLUSTER_CA=${cluster_auth_base64}
API_SERVER_URL=${cluster_endpoint}
From d2fa4d2d7aa80a8c34c5396e9786ea39c14cd6fc Mon Sep 17 00:00:00 2001
From: Bryant Biggs
Date: Thu, 16 Dec 2021 13:57:11 -0500
Subject: [PATCH 83/83] chore: update naming on fargate example to avoid DNS
confusion (get it ;)
---
examples/fargate_profile/main.tf | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/examples/fargate_profile/main.tf b/examples/fargate_profile/main.tf
index 13e3e4b7b0..61fbb43671 100644
--- a/examples/fargate_profile/main.tf
+++ b/examples/fargate_profile/main.tf
@@ -27,6 +27,7 @@ module "eks" {
cluster_endpoint_public_access = true
cluster_addons = {
+ # Note: https://docs.aws.amazon.com/eks/latest/userguide/fargate-getting-started.html#fargate-gs-coredns
coredns = {
resolve_conflicts = "OVERWRITE"
}
@@ -70,9 +71,9 @@ module "eks" {
name = "default"
selectors = [
{
- namespace = "kube-system"
+ namespace = "backend"
labels = {
- k8s-app = "kube-dns"
+ Application = "backend"
}
},
{