diff --git a/modules/node_groups/README.md b/modules/node_groups/README.md
index 574be73214..b7443cb326 100644
--- a/modules/node_groups/README.md
+++ b/modules/node_groups/README.md
@@ -23,6 +23,7 @@ The role ARN specified in `var.default_iam_role_arn` will be used by default. In
| capacity\_type | Type of instance capacity to provision. Options are `ON_DEMAND` and `SPOT` | string | Provider default behavior |
| desired\_capacity | Desired number of workers | number | `var.workers_group_defaults[asg_desired_capacity]` |
| disk\_size | Workers' disk size | number | Provider default behavior |
+| disk\_type | Workers' disk type. Require `create_launch_template` to be `true`| number | `gp3` |
| iam\_role\_arn | IAM role ARN for workers | string | `var.default_iam_role_arn` |
| instance\_types | Node group's instance type(s). Multiple types can be specified when `capacity_type="SPOT"`. | list | `[var.workers_group_defaults[instance_type]]` |
| k8s\_labels | Kubernetes labels | map(string) | No labels applied |
@@ -35,6 +36,12 @@ The role ARN specified in `var.default_iam_role_arn` will be used by default. In
| source\_security\_group\_ids | Source security groups for remote access to workers | list(string) | If key\_name is specified: THE REMOTE ACCESS WILL BE OPENED TO THE WORLD |
| subnets | Subnets to contain workers | list(string) | `var.workers_group_defaults[subnets]` |
| version | Kubernetes version | string | Provider default behavior |
+| create_launch_template | Create and use a default launch template | bool | `false` |
+| kubelet_extra_args | This string is passed directly to kubelet if set. Useful for adding labels or taints. Require `create_launch_template` to be `true`| string | "" |
+| enable_monitoring | Enables/disables detailed monitoring. Require `create_launch_template` to be `true`| bool | `true` |
+| eni_delete | Delete the Elastic Network Interface (ENI) on termination (if set to false you will have to manually delete before destroying) | bool | `true` |
+| public_ip | Associate a public ip address with a worker. Require `create_launch_template` to be `true`| string | `false`
+| pre_userdata | userdata to pre-append to the default userdata. Require `create_launch_template` to be `true`| string | "" |
## Requirements
@@ -50,6 +57,7 @@ The role ARN specified in `var.default_iam_role_arn` will be used by default. In
| Name | Version |
|------|---------|
| [aws](#provider\_aws) | >= 3.22.0 |
+| [cloudinit](#provider\_cloudinit) | n/a |
| [random](#provider\_random) | >= 2.1 |
## Modules
@@ -61,7 +69,9 @@ No modules.
| Name | Type |
|------|------|
| [aws_eks_node_group.workers](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_node_group) | resource |
+| [aws_launch_template.workers](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource |
| [random_pet.node_groups](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/pet) | resource |
+| [cloudinit_config.workers_userdata](https://registry.terraform.io/providers/hashicorp/cloudinit/latest/docs/data-sources/config) | data source |
## Inputs
@@ -74,6 +84,8 @@ No modules.
| [node\_groups](#input\_node\_groups) | Map of maps of `eks_node_groups` to create. See "`node_groups` and `node_groups_defaults` keys" section in README.md for more details | `any` | `{}` | no |
| [node\_groups\_defaults](#input\_node\_groups\_defaults) | map of maps of node groups to create. See "`node_groups` and `node_groups_defaults` keys" section in README.md for more details | `any` | n/a | yes |
| [tags](#input\_tags) | A map of tags to add to all resources | `map(string)` | n/a | yes |
+| [worker\_additional\_security\_group\_ids](#input\_worker\_additional\_security\_group\_ids) | A list of additional security group ids to attach to worker instances | `list(string)` | `[]` | no |
+| [worker\_security\_group\_id](#input\_worker\_security\_group\_id) | If provided, all workers will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the EKS cluster. | `string` | `""` | no |
| [workers\_group\_defaults](#input\_workers\_group\_defaults) | Workers group defaults from parent | `any` | n/a | yes |
## Outputs
diff --git a/modules/node_groups/launchtemplate.tf b/modules/node_groups/launchtemplate.tf
new file mode 100644
index 0000000000..1da04e2037
--- /dev/null
+++ b/modules/node_groups/launchtemplate.tf
@@ -0,0 +1,111 @@
+data "cloudinit_config" "workers_userdata" {
+ for_each = { for k, v in local.node_groups_expanded : k => v if v["create_launch_template"] }
+ gzip = false
+ base64_encode = true
+ boundary = "//"
+
+ part {
+ content_type = "text/x-shellscript"
+ content = templatefile("${path.module}/templates/userdata.sh.tpl",
+ {
+ pre_userdata = each.value["pre_userdata"]
+ kubelet_extra_args = each.value["kubelet_extra_args"]
+ }
+ )
+
+ }
+}
+
+# This is based on the LT that EKS would create if no custom one is specified (aws ec2 describe-launch-template-versions --launch-template-id xxx)
+# there are several more options one could set but you probably dont need to modify them
+# you can take the default and add your custom AMI and/or custom tags
+#
+# Trivia: AWS transparently creates a copy of your LaunchTemplate and actually uses that copy then for the node group. If you DONT use a custom AMI,
+# then the default user-data for bootstrapping a cluster is merged in the copy.
+resource "aws_launch_template" "workers" {
+ for_each = { for k, v in local.node_groups_expanded : k => v if v["create_launch_template"] }
+ name_prefix = lookup(each.value, "name", join("-", [var.cluster_name, each.key, random_pet.node_groups[each.key].id]))
+ description = lookup(each.value, "name", join("-", [var.cluster_name, each.key, random_pet.node_groups[each.key].id]))
+ update_default_version = true
+
+ block_device_mappings {
+ device_name = "/dev/xvda"
+
+ ebs {
+ volume_size = lookup(each.value, "disk_size", null)
+ volume_type = lookup(each.value, "disk_type", null)
+ delete_on_termination = true
+ }
+ }
+
+ instance_type = each.value["set_instance_types_on_lt"] ? element(each.value.instance_types, 0) : null
+
+ monitoring {
+ enabled = lookup(each.value, "enable_monitoring", null)
+ }
+
+ network_interfaces {
+ associate_public_ip_address = lookup(each.value, "public_ip", null)
+ delete_on_termination = lookup(each.value, "eni_delete", null)
+ security_groups = flatten([
+ var.worker_security_group_id,
+ var.worker_additional_security_group_ids,
+ lookup(
+ each.value,
+ "additional_security_group_ids",
+ null,
+ ),
+ ])
+ }
+
+ # if you want to use a custom AMI
+ # image_id = var.ami_id
+
+ # If you use a custom AMI, you need to supply via user-data, the bootstrap script as EKS DOESNT merge its managed user-data then
+ # you can add more than the minimum code you see in the template, e.g. install SSM agent, see https://github.com/aws/containers-roadmap/issues/593#issuecomment-577181345
+ #
+ # (optionally you can use https://registry.terraform.io/providers/hashicorp/cloudinit/latest/docs/data-sources/cloudinit_config to render the script, example: https://github.com/terraform-aws-modules/terraform-aws-eks/pull/997#issuecomment-705286151)
+
+ user_data = data.cloudinit_config.workers_userdata[each.key].rendered
+
+ key_name = lookup(each.value, "key_name", null)
+
+ # Supplying custom tags to EKS instances is another use-case for LaunchTemplates
+ tag_specifications {
+ resource_type = "instance"
+
+ tags = merge(
+ var.tags,
+ lookup(var.node_groups_defaults, "additional_tags", {}),
+ lookup(var.node_groups[each.key], "additional_tags", {}),
+ {
+ Name = lookup(each.value, "name", join("-", [var.cluster_name, each.key, random_pet.node_groups[each.key].id]))
+ }
+ )
+ }
+
+ # Supplying custom tags to EKS instances root volumes is another use-case for LaunchTemplates. (doesnt add tags to dynamically provisioned volumes via PVC tho)
+ tag_specifications {
+ resource_type = "volume"
+
+ tags = merge(
+ var.tags,
+ lookup(var.node_groups_defaults, "additional_tags", {}),
+ lookup(var.node_groups[each.key], "additional_tags", {}),
+ {
+ Name = lookup(each.value, "name", join("-", [var.cluster_name, each.key, random_pet.node_groups[each.key].id]))
+ }
+ )
+ }
+
+ # Tag the LT itself
+ tags = merge(
+ var.tags,
+ lookup(var.node_groups_defaults, "additional_tags", {}),
+ lookup(var.node_groups[each.key], "additional_tags", {}),
+ )
+
+ lifecycle {
+ create_before_destroy = true
+ }
+}
diff --git a/modules/node_groups/locals.tf b/modules/node_groups/locals.tf
index 61c633d5db..3c510d70ab 100644
--- a/modules/node_groups/locals.tf
+++ b/modules/node_groups/locals.tf
@@ -2,15 +2,25 @@ locals {
# Merge defaults and per-group values to make code cleaner
node_groups_expanded = { for k, v in var.node_groups : k => merge(
{
- desired_capacity = var.workers_group_defaults["asg_desired_capacity"]
- iam_role_arn = var.default_iam_role_arn
- instance_types = [var.workers_group_defaults["instance_type"]]
- key_name = var.workers_group_defaults["key_name"]
- launch_template_id = var.workers_group_defaults["launch_template_id"]
- launch_template_version = var.workers_group_defaults["launch_template_version"]
- max_capacity = var.workers_group_defaults["asg_max_size"]
- min_capacity = var.workers_group_defaults["asg_min_size"]
- subnets = var.workers_group_defaults["subnets"]
+ desired_capacity = var.workers_group_defaults["asg_desired_capacity"]
+ iam_role_arn = var.default_iam_role_arn
+ instance_types = [var.workers_group_defaults["instance_type"]]
+ key_name = var.workers_group_defaults["key_name"]
+ launch_template_id = var.workers_group_defaults["launch_template_id"]
+ launch_template_version = var.workers_group_defaults["launch_template_version"]
+ set_instance_types_on_lt = false
+ max_capacity = var.workers_group_defaults["asg_max_size"]
+ min_capacity = var.workers_group_defaults["asg_min_size"]
+ subnets = var.workers_group_defaults["subnets"]
+ create_launch_template = false
+ kubelet_extra_args = var.workers_group_defaults["kubelet_extra_args"]
+ disk_size = var.workers_group_defaults["root_volume_size"]
+ disk_type = var.workers_group_defaults["root_volume_type"]
+ enable_monitoring = var.workers_group_defaults["enable_monitoring"]
+ eni_delete = var.workers_group_defaults["eni_delete"]
+ public_ip = var.workers_group_defaults["public_ip"]
+ pre_userdata = var.workers_group_defaults["pre_userdata"]
+ additional_security_group_ids = var.workers_group_defaults["additional_security_group_ids"]
},
var.node_groups_defaults,
v,
diff --git a/modules/node_groups/node_groups.tf b/modules/node_groups/node_groups.tf
index 68abb094d9..8b9904cb17 100644
--- a/modules/node_groups/node_groups.tf
+++ b/modules/node_groups/node_groups.tf
@@ -14,13 +14,13 @@ resource "aws_eks_node_group" "workers" {
}
ami_type = lookup(each.value, "ami_type", null)
- disk_size = lookup(each.value, "disk_size", null)
- instance_types = lookup(each.value, "instance_types", null)
+ disk_size = each.value["launch_template_id"] != null || each.value["create_launch_template"] ? null : lookup(each.value, "disk_size", null)
+ instance_types = !each.value["set_instance_types_on_lt"] ? each.value["instance_types"] : null
release_version = lookup(each.value, "ami_release_version", null)
capacity_type = lookup(each.value, "capacity_type", null)
dynamic "remote_access" {
- for_each = each.value["key_name"] != "" ? [{
+ for_each = each.value["key_name"] != "" && each.value["launch_template_id"] == null && !each.value["create_launch_template"] ? [{
ec2_ssh_key = each.value["key_name"]
source_security_group_ids = lookup(each.value, "source_security_group_ids", [])
}] : []
@@ -43,6 +43,18 @@ resource "aws_eks_node_group" "workers" {
}
}
+ dynamic "launch_template" {
+ for_each = each.value["launch_template_id"] == null && each.value["create_launch_template"] ? [{
+ id = aws_launch_template.workers[each.key].id
+ version = aws_launch_template.workers[each.key].latest_version
+ }] : []
+
+ content {
+ id = launch_template.value["id"]
+ version = launch_template.value["version"]
+ }
+ }
+
version = lookup(each.value, "version", null)
labels = merge(
diff --git a/modules/node_groups/templates/userdata.sh.tpl b/modules/node_groups/templates/userdata.sh.tpl
new file mode 100644
index 0000000000..3aecd0aabb
--- /dev/null
+++ b/modules/node_groups/templates/userdata.sh.tpl
@@ -0,0 +1,6 @@
+#!/bin/bash -e
+
+# Allow user supplied pre userdata code
+${pre_userdata}
+
+sed -i '/^KUBELET_EXTRA_ARGS=/a KUBELET_EXTRA_ARGS+=" ${kubelet_extra_args}"' /etc/eks/bootstrap.sh
diff --git a/modules/node_groups/variables.tf b/modules/node_groups/variables.tf
index fc869d9d99..585beb5f91 100644
--- a/modules/node_groups/variables.tf
+++ b/modules/node_groups/variables.tf
@@ -19,6 +19,18 @@ variable "workers_group_defaults" {
type = any
}
+variable "worker_security_group_id" {
+ description = "If provided, all workers will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the EKS cluster."
+ type = string
+ default = ""
+}
+
+variable "worker_additional_security_group_ids" {
+ description = "A list of additional security group ids to attach to worker instances"
+ type = list(string)
+ default = []
+}
+
variable "tags" {
description = "A map of tags to add to all resources"
type = map(string)
diff --git a/node_groups.tf b/node_groups.tf
index 6721f51aa2..d98979310f 100644
--- a/node_groups.tf
+++ b/node_groups.tf
@@ -1,12 +1,14 @@
module "node_groups" {
- source = "./modules/node_groups"
- create_eks = var.create_eks
- cluster_name = coalescelist(aws_eks_cluster.this[*].name, [""])[0]
- default_iam_role_arn = coalescelist(aws_iam_role.workers[*].arn, [""])[0]
- workers_group_defaults = local.workers_group_defaults
- tags = var.tags
- node_groups_defaults = var.node_groups_defaults
- node_groups = var.node_groups
+ source = "./modules/node_groups"
+ create_eks = var.create_eks
+ cluster_name = coalescelist(aws_eks_cluster.this[*].name, [""])[0]
+ default_iam_role_arn = coalescelist(aws_iam_role.workers[*].arn, [""])[0]
+ workers_group_defaults = local.workers_group_defaults
+ worker_security_group_id = local.worker_security_group_id
+ worker_additional_security_group_ids = var.worker_additional_security_group_ids
+ tags = var.tags
+ node_groups_defaults = var.node_groups_defaults
+ node_groups = var.node_groups
# Hack to ensure ordering of resource creation.
# This is a homemade `depends_on` https://discuss.hashicorp.com/t/tips-howto-implement-module-depends-on-emulation/2305/2