From 862fd6192a1fe63323f57ee87f4d3a37b8ad5df7 Mon Sep 17 00:00:00 2001 From: Bryant Biggs Date: Sat, 20 Aug 2022 09:05:37 -0400 Subject: [PATCH 01/33] refactor: Change default NTP CIDR blocks to use those provided by Amazon --- README.md | 4 ++-- variables.tf | 6 ++---- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index c82ffd5fe0..42a142e541 100644 --- a/README.md +++ b/README.md @@ -336,8 +336,8 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | [node\_security\_group\_description](#input\_node\_security\_group\_description) | Description of the node security group created | `string` | `"EKS node shared security group"` | no | | [node\_security\_group\_id](#input\_node\_security\_group\_id) | ID of an existing security group to attach to the node groups created | `string` | `""` | no | | [node\_security\_group\_name](#input\_node\_security\_group\_name) | Name to use on node security group created | `string` | `null` | no | -| [node\_security\_group\_ntp\_ipv4\_cidr\_block](#input\_node\_security\_group\_ntp\_ipv4\_cidr\_block) | IPv4 CIDR block to allow NTP egress. Default is public IP space, but [Amazon Time Sync Service](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/set-time.html) can be used as well with `["169.254.169.123/32"]` | `list(string)` |
[
"0.0.0.0/0"
]
| no | -| [node\_security\_group\_ntp\_ipv6\_cidr\_block](#input\_node\_security\_group\_ntp\_ipv6\_cidr\_block) | IPv4 CIDR block to allow NTP egress. Default is public IP space, but [Amazon Time Sync Service](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/set-time.html) can be used as well with `["fd00:ec2::123/128"]` | `list(string)` |
[
"::/0"
]
| no | +| [node\_security\_group\_ntp\_ipv4\_cidr\_block](#input\_node\_security\_group\_ntp\_ipv4\_cidr\_block) | IPv4 CIDR block to allow NTP egress. Default is public IP space, but [Amazon Time Sync Service](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/set-time.html) can be used as well with `["169.254.169.123/32"]` | `list(string)` |
[
"169.254.169.123/32"
]
| no | +| [node\_security\_group\_ntp\_ipv6\_cidr\_block](#input\_node\_security\_group\_ntp\_ipv6\_cidr\_block) | IPv4 CIDR block to allow NTP egress. Default is public IP space, but [Amazon Time Sync Service](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/set-time.html) can be used as well with `["fd00:ec2::123/128"]` | `list(string)` |
[
"fd00:ec2::123/128"
]
| no | | [node\_security\_group\_tags](#input\_node\_security\_group\_tags) | A map of additional tags to add to the node security group created | `map(string)` | `{}` | no | | [node\_security\_group\_use\_name\_prefix](#input\_node\_security\_group\_use\_name\_prefix) | Determines whether node security group name (`node_security_group_name`) is used as a prefix | `bool` | `true` | no | | [openid\_connect\_audiences](#input\_openid\_connect\_audiences) | List of OpenID Connect audience client IDs to add to the IRSA provider | `list(string)` | `[]` | no | diff --git a/variables.tf b/variables.tf index df2ee51638..3807618e40 100644 --- a/variables.tf +++ b/variables.tf @@ -322,18 +322,16 @@ variable "node_security_group_tags" { default = {} } -# TODO - at next breaking change, make 169.254.169.123/32 the default variable "node_security_group_ntp_ipv4_cidr_block" { description = "IPv4 CIDR block to allow NTP egress. Default is public IP space, but [Amazon Time Sync Service](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/set-time.html) can be used as well with `[\"169.254.169.123/32\"]`" type = list(string) - default = ["0.0.0.0/0"] + default = ["169.254.169.123/32"] } -# TODO - at next breaking change, make fd00:ec2::123/128 the default variable "node_security_group_ntp_ipv6_cidr_block" { description = "IPv4 CIDR block to allow NTP egress. Default is public IP space, but [Amazon Time Sync Service](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/set-time.html) can be used as well with `[\"fd00:ec2::123/128\"]`" type = list(string) - default = ["::/0"] + default = ["fd00:ec2::123/128"] } ################################################################################ From 136d7ed0611ba31a8b431765193ba5d1a484b056 Mon Sep 17 00:00:00 2001 From: Bryant Biggs Date: Sat, 20 Aug 2022 09:14:56 -0400 Subject: [PATCH 02/33] feat: Add support for specifying `most_recent` and `preserve` arguments on addons; raise minimum requirements to support --- README.md | 7 ++++--- examples/complete/README.md | 6 +++--- examples/complete/versions.tf | 4 ++-- examples/eks_managed_node_group/README.md | 6 +++--- examples/eks_managed_node_group/versions.tf | 4 ++-- examples/fargate_profile/README.md | 6 +++--- examples/fargate_profile/versions.tf | 4 ++-- examples/karpenter/README.md | 6 +++--- examples/karpenter/versions.tf | 4 ++-- examples/self_managed_node_group/README.md | 6 +++--- examples/self_managed_node_group/versions.tf | 4 ++-- examples/user_data/README.md | 4 ++-- examples/user_data/versions.tf | 4 ++-- main.tf | 15 ++++++++++++--- modules/_user_data/README.md | 2 +- modules/_user_data/versions.tf | 2 +- modules/eks-managed-node-group/README.md | 6 +++--- modules/eks-managed-node-group/versions.tf | 4 ++-- modules/fargate-profile/README.md | 6 +++--- modules/fargate-profile/versions.tf | 4 ++-- modules/self-managed-node-group/README.md | 6 +++--- modules/self-managed-node-group/versions.tf | 4 ++-- versions.tf | 4 ++-- 23 files changed, 64 insertions(+), 54 deletions(-) diff --git a/README.md b/README.md index 42a142e541..db0c24aef9 100644 --- a/README.md +++ b/README.md @@ -207,8 +207,8 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 0.13.1 | -| [aws](#requirement\_aws) | >= 3.72 | +| [terraform](#requirement\_terraform) | >= 1.0 | +| [aws](#requirement\_aws) | >= 4.7 | | [kubernetes](#requirement\_kubernetes) | >= 2.10 | | [tls](#requirement\_tls) | >= 3.0 | @@ -216,7 +216,7 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 3.72 | +| [aws](#provider\_aws) | >= 4.7 | | [kubernetes](#provider\_kubernetes) | >= 2.10 | | [tls](#provider\_tls) | >= 3.0 | @@ -252,6 +252,7 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | [kubernetes_config_map_v1_data.aws_auth](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/config_map_v1_data) | resource | | [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | | [aws_default_tags.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/default_tags) | data source | +| [aws_eks_addon_version.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_addon_version) | data source | | [aws_iam_policy_document.assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | | [aws_iam_policy_document.cni_ipv6_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | | [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source | diff --git a/examples/complete/README.md b/examples/complete/README.md index 6961891591..53ad154790 100644 --- a/examples/complete/README.md +++ b/examples/complete/README.md @@ -33,15 +33,15 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 0.13.1 | -| [aws](#requirement\_aws) | >= 3.72 | +| [terraform](#requirement\_terraform) | >= 1.0 | +| [aws](#requirement\_aws) | >= 4.7 | | [kubernetes](#requirement\_kubernetes) | >= 2.10 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 3.72 | +| [aws](#provider\_aws) | >= 4.7 | ## Modules diff --git a/examples/complete/versions.tf b/examples/complete/versions.tf index 6d6dc45be6..248218ac69 100644 --- a/examples/complete/versions.tf +++ b/examples/complete/versions.tf @@ -1,10 +1,10 @@ terraform { - required_version = ">= 0.13.1" + required_version = ">= 1.0" required_providers { aws = { source = "hashicorp/aws" - version = ">= 3.72" + version = ">= 4.7" } kubernetes = { source = "hashicorp/kubernetes" diff --git a/examples/eks_managed_node_group/README.md b/examples/eks_managed_node_group/README.md index 9014bc660b..03f3c91020 100644 --- a/examples/eks_managed_node_group/README.md +++ b/examples/eks_managed_node_group/README.md @@ -57,8 +57,8 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 0.13.1 | -| [aws](#requirement\_aws) | >= 3.72 | +| [terraform](#requirement\_terraform) | >= 1.0 | +| [aws](#requirement\_aws) | >= 4.7 | | [kubernetes](#requirement\_kubernetes) | >= 2.10 | | [tls](#requirement\_tls) | >= 3.0 | @@ -66,7 +66,7 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 3.72 | +| [aws](#provider\_aws) | >= 4.7 | | [tls](#provider\_tls) | >= 3.0 | ## Modules diff --git a/examples/eks_managed_node_group/versions.tf b/examples/eks_managed_node_group/versions.tf index fde7af0f23..7b9aed5fa9 100644 --- a/examples/eks_managed_node_group/versions.tf +++ b/examples/eks_managed_node_group/versions.tf @@ -1,10 +1,10 @@ terraform { - required_version = ">= 0.13.1" + required_version = ">= 1.0" required_providers { aws = { source = "hashicorp/aws" - version = ">= 3.72" + version = ">= 4.7" } tls = { source = "hashicorp/tls" diff --git a/examples/fargate_profile/README.md b/examples/fargate_profile/README.md index d8a1bfecf3..78d868ff9d 100644 --- a/examples/fargate_profile/README.md +++ b/examples/fargate_profile/README.md @@ -19,15 +19,15 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 0.13.1 | -| [aws](#requirement\_aws) | >= 3.72 | +| [terraform](#requirement\_terraform) | >= 1.0 | +| [aws](#requirement\_aws) | >= 4.7 | | [kubernetes](#requirement\_kubernetes) | >= 2.10 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 3.72 | +| [aws](#provider\_aws) | >= 4.7 | ## Modules diff --git a/examples/fargate_profile/versions.tf b/examples/fargate_profile/versions.tf index 6d6dc45be6..248218ac69 100644 --- a/examples/fargate_profile/versions.tf +++ b/examples/fargate_profile/versions.tf @@ -1,10 +1,10 @@ terraform { - required_version = ">= 0.13.1" + required_version = ">= 1.0" required_providers { aws = { source = "hashicorp/aws" - version = ">= 3.72" + version = ">= 4.7" } kubernetes = { source = "hashicorp/kubernetes" diff --git a/examples/karpenter/README.md b/examples/karpenter/README.md index 761299aa54..98f31ad110 100644 --- a/examples/karpenter/README.md +++ b/examples/karpenter/README.md @@ -51,8 +51,8 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 0.13.1 | -| [aws](#requirement\_aws) | >= 3.72 | +| [terraform](#requirement\_terraform) | >= 1.0 | +| [aws](#requirement\_aws) | >= 4.7 | | [helm](#requirement\_helm) | >= 2.4 | | [kubectl](#requirement\_kubectl) | >= 1.14 | @@ -60,7 +60,7 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 3.72 | +| [aws](#provider\_aws) | >= 4.7 | | [helm](#provider\_helm) | >= 2.4 | | [kubectl](#provider\_kubectl) | >= 1.14 | diff --git a/examples/karpenter/versions.tf b/examples/karpenter/versions.tf index fe18abab81..ca6b5e11bf 100644 --- a/examples/karpenter/versions.tf +++ b/examples/karpenter/versions.tf @@ -1,10 +1,10 @@ terraform { - required_version = ">= 0.13.1" + required_version = ">= 1.0" required_providers { aws = { source = "hashicorp/aws" - version = ">= 3.72" + version = ">= 4.7" } helm = { source = "hashicorp/helm" diff --git a/examples/self_managed_node_group/README.md b/examples/self_managed_node_group/README.md index a543d6454b..47353b66a9 100644 --- a/examples/self_managed_node_group/README.md +++ b/examples/self_managed_node_group/README.md @@ -25,8 +25,8 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 0.13.1 | -| [aws](#requirement\_aws) | >= 3.72 | +| [terraform](#requirement\_terraform) | >= 1.0 | +| [aws](#requirement\_aws) | >= 4.7 | | [kubernetes](#requirement\_kubernetes) | >= 2.10 | | [tls](#requirement\_tls) | >= 3.0 | @@ -34,7 +34,7 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 3.72 | +| [aws](#provider\_aws) | >= 4.7 | | [tls](#provider\_tls) | >= 3.0 | ## Modules diff --git a/examples/self_managed_node_group/versions.tf b/examples/self_managed_node_group/versions.tf index fde7af0f23..7b9aed5fa9 100644 --- a/examples/self_managed_node_group/versions.tf +++ b/examples/self_managed_node_group/versions.tf @@ -1,10 +1,10 @@ terraform { - required_version = ">= 0.13.1" + required_version = ">= 1.0" required_providers { aws = { source = "hashicorp/aws" - version = ">= 3.72" + version = ">= 4.7" } tls = { source = "hashicorp/tls" diff --git a/examples/user_data/README.md b/examples/user_data/README.md index 54cd9ec72b..7f2c5de23f 100644 --- a/examples/user_data/README.md +++ b/examples/user_data/README.md @@ -17,8 +17,8 @@ $ terraform apply | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 0.13.1 | -| [aws](#requirement\_aws) | >= 3.72 | +| [terraform](#requirement\_terraform) | >= 1.0 | +| [aws](#requirement\_aws) | >= 4.7 | ## Providers diff --git a/examples/user_data/versions.tf b/examples/user_data/versions.tf index 22e8d7265f..6fba0345fe 100644 --- a/examples/user_data/versions.tf +++ b/examples/user_data/versions.tf @@ -1,10 +1,10 @@ terraform { - required_version = ">= 0.13.1" + required_version = ">= 1.0" required_providers { aws = { source = "hashicorp/aws" - version = ">= 3.72" + version = ">= 4.7" } } } diff --git a/main.tf b/main.tf index 011a26c396..3ed5cdeea2 100644 --- a/main.tf +++ b/main.tf @@ -345,9 +345,10 @@ resource "aws_eks_addon" "this" { cluster_name = aws_eks_cluster.this[0].name addon_name = try(each.value.name, each.key) - addon_version = lookup(each.value, "addon_version", null) - resolve_conflicts = lookup(each.value, "resolve_conflicts", null) - service_account_role_arn = lookup(each.value, "service_account_role_arn", null) + addon_version = try(each.value.addon_version, data.aws_eks_addon_version.this[0].version) + preserve = try(each.value.preserve, null) + resolve_conflicts = try(each.value.resolve_conflicts, null) + service_account_role_arn = try(each.value.service_account_role_arn, null) depends_on = [ module.fargate_profile, @@ -358,6 +359,14 @@ resource "aws_eks_addon" "this" { tags = var.tags } +data "aws_eks_addon_version" "this" { + for_each = { for k, v in var.cluster_addons : k => v if local.create } + + addon_name = try(each.value.name, each.key) + kubernetes_version = aws_eks_cluster.this[0].version + most_recent = try(each.value.most_recent, null) +} + ################################################################################ # EKS Identity Provider # Note - this is different from IRSA diff --git a/modules/_user_data/README.md b/modules/_user_data/README.md index 87da77b447..0853fd9e1a 100644 --- a/modules/_user_data/README.md +++ b/modules/_user_data/README.md @@ -9,7 +9,7 @@ See [`examples/user_data/`](https://github.com/terraform-aws-modules/terraform-a | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 0.13.1 | +| [terraform](#requirement\_terraform) | >= 1.0 | | [cloudinit](#requirement\_cloudinit) | >= 2.0 | ## Providers diff --git a/modules/_user_data/versions.tf b/modules/_user_data/versions.tf index e293dc67ce..2dbd12cdc0 100644 --- a/modules/_user_data/versions.tf +++ b/modules/_user_data/versions.tf @@ -1,5 +1,5 @@ terraform { - required_version = ">= 0.13.1" + required_version = ">= 1.0" required_providers { cloudinit = { diff --git a/modules/eks-managed-node-group/README.md b/modules/eks-managed-node-group/README.md index eb8f35dbd3..d4ab0a8ebd 100644 --- a/modules/eks-managed-node-group/README.md +++ b/modules/eks-managed-node-group/README.md @@ -53,14 +53,14 @@ module "eks_managed_node_group" { | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 0.13.1 | -| [aws](#requirement\_aws) | >= 3.72 | +| [terraform](#requirement\_terraform) | >= 1.0 | +| [aws](#requirement\_aws) | >= 4.7 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 3.72 | +| [aws](#provider\_aws) | >= 4.7 | ## Modules diff --git a/modules/eks-managed-node-group/versions.tf b/modules/eks-managed-node-group/versions.tf index 22e8d7265f..6fba0345fe 100644 --- a/modules/eks-managed-node-group/versions.tf +++ b/modules/eks-managed-node-group/versions.tf @@ -1,10 +1,10 @@ terraform { - required_version = ">= 0.13.1" + required_version = ">= 1.0" required_providers { aws = { source = "hashicorp/aws" - version = ">= 3.72" + version = ">= 4.7" } } } diff --git a/modules/fargate-profile/README.md b/modules/fargate-profile/README.md index 97dd0d3e53..42925d7c32 100644 --- a/modules/fargate-profile/README.md +++ b/modules/fargate-profile/README.md @@ -28,14 +28,14 @@ module "fargate_profile" { | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 0.13.1 | -| [aws](#requirement\_aws) | >= 3.72 | +| [terraform](#requirement\_terraform) | >= 1.0 | +| [aws](#requirement\_aws) | >= 4.7 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 3.72 | +| [aws](#provider\_aws) | >= 4.7 | ## Modules diff --git a/modules/fargate-profile/versions.tf b/modules/fargate-profile/versions.tf index 22e8d7265f..6fba0345fe 100644 --- a/modules/fargate-profile/versions.tf +++ b/modules/fargate-profile/versions.tf @@ -1,10 +1,10 @@ terraform { - required_version = ">= 0.13.1" + required_version = ">= 1.0" required_providers { aws = { source = "hashicorp/aws" - version = ">= 3.72" + version = ">= 4.7" } } } diff --git a/modules/self-managed-node-group/README.md b/modules/self-managed-node-group/README.md index eb0b484443..246f61061b 100644 --- a/modules/self-managed-node-group/README.md +++ b/modules/self-managed-node-group/README.md @@ -39,14 +39,14 @@ module "self_managed_node_group" { | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 0.13.1 | -| [aws](#requirement\_aws) | >= 3.72 | +| [terraform](#requirement\_terraform) | >= 1.0 | +| [aws](#requirement\_aws) | >= 4.7 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 3.72 | +| [aws](#provider\_aws) | >= 4.7 | ## Modules diff --git a/modules/self-managed-node-group/versions.tf b/modules/self-managed-node-group/versions.tf index 22e8d7265f..6fba0345fe 100644 --- a/modules/self-managed-node-group/versions.tf +++ b/modules/self-managed-node-group/versions.tf @@ -1,10 +1,10 @@ terraform { - required_version = ">= 0.13.1" + required_version = ">= 1.0" required_providers { aws = { source = "hashicorp/aws" - version = ">= 3.72" + version = ">= 4.7" } } } diff --git a/versions.tf b/versions.tf index fde7af0f23..7b9aed5fa9 100644 --- a/versions.tf +++ b/versions.tf @@ -1,10 +1,10 @@ terraform { - required_version = ">= 0.13.1" + required_version = ">= 1.0" required_providers { aws = { source = "hashicorp/aws" - version = ">= 3.72" + version = ">= 4.7" } tls = { source = "hashicorp/tls" From ad645c45af6443ad738a41b63be34a7ca3d24f2f Mon Sep 17 00:00:00 2001 From: Bryant Biggs Date: Sat, 20 Aug 2022 13:18:20 -0400 Subject: [PATCH 03/33] refactor: Allow both static and computed additional policies to be attached to roles --- README.md | 2 +- examples/complete/main.tf | 30 ++++++++++++++++++-- main.tf | 12 +++++--- modules/eks-managed-node-group/README.md | 2 +- modules/eks-managed-node-group/main.tf | 28 +++++++++--------- modules/eks-managed-node-group/variables.tf | 4 +-- modules/fargate-profile/README.md | 2 +- modules/fargate-profile/main.tf | 20 +++++++------ modules/fargate-profile/variables.tf | 4 +-- modules/self-managed-node-group/README.md | 2 +- modules/self-managed-node-group/main.tf | 26 ++++++++--------- modules/self-managed-node-group/variables.tf | 4 +-- node_groups.tf | 6 ++-- variables.tf | 4 +-- 14 files changed, 89 insertions(+), 57 deletions(-) diff --git a/README.md b/README.md index db0c24aef9..bf2a156083 100644 --- a/README.md +++ b/README.md @@ -314,7 +314,7 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | [enable\_kms\_key\_rotation](#input\_enable\_kms\_key\_rotation) | Specifies whether key rotation is enabled. Defaults to `true` | `bool` | `true` | no | | [fargate\_profile\_defaults](#input\_fargate\_profile\_defaults) | Map of Fargate Profile default configurations | `any` | `{}` | no | | [fargate\_profiles](#input\_fargate\_profiles) | Map of Fargate Profile definitions to create | `any` | `{}` | no | -| [iam\_role\_additional\_policies](#input\_iam\_role\_additional\_policies) | Additional policies to be added to the IAM role | `list(string)` | `[]` | no | +| [iam\_role\_additional\_policies](#input\_iam\_role\_additional\_policies) | Additional policies to be added to the IAM role | `map(string)` | `{}` | no | | [iam\_role\_arn](#input\_iam\_role\_arn) | Existing IAM role ARN for the cluster. Required if `create_iam_role` is set to `false` | `string` | `null` | no | | [iam\_role\_description](#input\_iam\_role\_description) | Description of the role | `string` | `null` | no | | [iam\_role\_name](#input\_iam\_role\_name) | Name to use on IAM role created | `string` | `null` | no | diff --git a/examples/complete/main.tf b/examples/complete/main.tf index 112e3b531d..e95fe143fb 100644 --- a/examples/complete/main.tf +++ b/examples/complete/main.tf @@ -60,6 +60,10 @@ module "eks" { kms_key_deletion_window_in_days = 7 enable_kms_key_rotation = true + iam_role_additional_policies = { + "additional" = aws_iam_policy.additional.arn + } + vpc_id = module.vpc.vpc_id subnet_ids = module.vpc.private_subnets control_plane_subnet_ids = module.vpc.intra_subnets @@ -100,8 +104,10 @@ module "eks" { # Self Managed Node Group(s) self_managed_node_group_defaults = { - vpc_security_group_ids = [aws_security_group.additional.id] - iam_role_additional_policies = ["arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"] + vpc_security_group_ids = [aws_security_group.additional.id] + iam_role_additional_policies = { + "additional" = aws_iam_policy.additional.arn + } } self_managed_node_groups = { @@ -134,6 +140,9 @@ module "eks" { attach_cluster_primary_security_group = true vpc_security_group_ids = [aws_security_group.additional.id] + iam_role_additional_policies = { + "additional" = aws_iam_policy.additional.arn + } } eks_managed_node_groups = { @@ -383,3 +392,20 @@ resource "aws_security_group" "additional" { tags = local.tags } + +resource "aws_iam_policy" "additional" { + name = "${local.name}-additional" + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Action = [ + "ec2:Describe*", + ] + Effect = "Allow" + Resource = "*" + }, + ] + }) +} diff --git a/main.tf b/main.tf index 3ed5cdeea2..5f1a0a1491 100644 --- a/main.tf +++ b/main.tf @@ -291,10 +291,14 @@ resource "aws_iam_role" "this" { # Policies attached ref https://docs.aws.amazon.com/eks/latest/userguide/service_IAM_role.html resource "aws_iam_role_policy_attachment" "this" { - for_each = local.create_iam_role ? toset(compact(distinct(concat([ - "${local.policy_arn_prefix}/AmazonEKSClusterPolicy", - "${local.policy_arn_prefix}/AmazonEKSVPCResourceController", - ], var.iam_role_additional_policies)))) : toset([]) + for_each = { for k, v in merge( + { + AmazonEKSClusterPolicy = "${local.policy_arn_prefix}/AmazonEKSClusterPolicy", + AmazonEKSVPCResourceController = "${local.policy_arn_prefix}/AmazonEKSVPCResourceController", + } + , + var.iam_role_additional_policies + ) : k => v if local.create_iam_role } policy_arn = each.value role = aws_iam_role.this[0].name diff --git a/modules/eks-managed-node-group/README.md b/modules/eks-managed-node-group/README.md index d4ab0a8ebd..5aae8f1f02 100644 --- a/modules/eks-managed-node-group/README.md +++ b/modules/eks-managed-node-group/README.md @@ -117,7 +117,7 @@ module "eks_managed_node_group" { | [enable\_monitoring](#input\_enable\_monitoring) | Enables/disables detailed monitoring | `bool` | `true` | no | | [enclave\_options](#input\_enclave\_options) | Enable Nitro Enclaves on launched instances | `map(string)` | `{}` | no | | [force\_update\_version](#input\_force\_update\_version) | Force version update if existing pods are unable to be drained due to a pod disruption budget issue | `bool` | `null` | no | -| [iam\_role\_additional\_policies](#input\_iam\_role\_additional\_policies) | Additional policies to be added to the IAM role | `list(string)` | `[]` | no | +| [iam\_role\_additional\_policies](#input\_iam\_role\_additional\_policies) | Additional policies to be added to the IAM role | `map(string)` | `{}` | no | | [iam\_role\_arn](#input\_iam\_role\_arn) | Existing IAM role ARN for the node group. Required if `create_iam_role` is set to `false` | `string` | `null` | no | | [iam\_role\_attach\_cni\_policy](#input\_iam\_role\_attach\_cni\_policy) | Whether to attach the `AmazonEKS_CNI_Policy`/`AmazonEKS_CNI_IPv6_Policy` IAM policy to the IAM IAM role. WARNING: If set `false` the permissions must be assigned to the `aws-node` DaemonSet pods via another method or nodes will not be able to join the cluster | `bool` | `true` | no | | [iam\_role\_description](#input\_iam\_role\_description) | Description of the role | `string` | `null` | no | diff --git a/modules/eks-managed-node-group/main.tf b/modules/eks-managed-node-group/main.tf index f55630876e..a2d271ad91 100644 --- a/modules/eks-managed-node-group/main.tf +++ b/modules/eks-managed-node-group/main.tf @@ -1,5 +1,4 @@ data "aws_partition" "current" {} - data "aws_caller_identity" "current" {} ################################################################################ @@ -36,10 +35,8 @@ locals { # 3. `var.create_launch_template = true && var.launch_template_name == ""` => Custom LT will be used, module will provide a default name # 4. `var.create_launch_template = true && var.launch_template_name == "something"` => Custom LT will be used, LT name is provided by user use_custom_launch_template = var.create_launch_template || var.launch_template_name != "" - - launch_template_name_int = coalesce(var.launch_template_name, "${var.name}-eks-node-group") - - security_group_ids = compact(concat([try(aws_security_group.this[0].id, ""), var.cluster_primary_security_group_id], var.vpc_security_group_ids)) + launch_template_name_int = coalesce(var.launch_template_name, "${var.name}-eks-node-group") + security_group_ids = compact(concat([try(aws_security_group.this[0].id, ""), var.cluster_primary_security_group_id], var.vpc_security_group_ids)) } resource "aws_launch_template" "this" { @@ -407,11 +404,9 @@ resource "aws_security_group_rule" "this" { ################################################################################ locals { - iam_role_name = coalesce(var.iam_role_name, "${var.name}-eks-node-group") - + iam_role_name = coalesce(var.iam_role_name, "${var.name}-eks-node-group") iam_role_policy_prefix = "arn:${data.aws_partition.current.partition}:iam::aws:policy" - - cni_policy = var.cluster_ip_family == "ipv6" ? "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:policy/AmazonEKS_CNI_IPv6_Policy" : "${local.iam_role_policy_prefix}/AmazonEKS_CNI_Policy" + cni_policy = var.cluster_ip_family == "ipv6" ? "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:policy/AmazonEKS_CNI_IPv6_Policy" : "${local.iam_role_policy_prefix}/AmazonEKS_CNI_Policy" } data "aws_iam_policy_document" "assume_role_policy" { @@ -445,11 +440,16 @@ resource "aws_iam_role" "this" { # Policies attached ref https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_node_group resource "aws_iam_role_policy_attachment" "this" { - for_each = var.create && var.create_iam_role ? toset(compact(distinct(concat([ - "${local.iam_role_policy_prefix}/AmazonEKSWorkerNodePolicy", - "${local.iam_role_policy_prefix}/AmazonEC2ContainerRegistryReadOnly", - var.iam_role_attach_cni_policy ? local.cni_policy : "", - ], var.iam_role_additional_policies)))) : toset([]) + for_each = { for k, v in merge( + { + AmazonEKSWorkerNodePolicy = "${local.iam_role_policy_prefix}/AmazonEKSWorkerNodePolicy" + AmazonEC2ContainerRegistryReadOnly = "${local.iam_role_policy_prefix}/AmazonEC2ContainerRegistryReadOnly" + }, + { + for k, v in { AmazonEKS_CNI_Policy = local.cni_policy } : k => v if var.iam_role_attach_cni_policy + }, + var.iam_role_additional_policies + ) : k => v if var.create && var.create_iam_role } policy_arn = each.value role = aws_iam_role.this[0].name diff --git a/modules/eks-managed-node-group/variables.tf b/modules/eks-managed-node-group/variables.tf index 0000827719..5a6bd10a20 100644 --- a/modules/eks-managed-node-group/variables.tf +++ b/modules/eks-managed-node-group/variables.tf @@ -480,8 +480,8 @@ variable "iam_role_attach_cni_policy" { variable "iam_role_additional_policies" { description = "Additional policies to be added to the IAM role" - type = list(string) - default = [] + type = map(string) + default = {} } variable "iam_role_tags" { diff --git a/modules/fargate-profile/README.md b/modules/fargate-profile/README.md index 42925d7c32..4b361afbd4 100644 --- a/modules/fargate-profile/README.md +++ b/modules/fargate-profile/README.md @@ -60,7 +60,7 @@ No modules. | [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster | `string` | `null` | no | | [create](#input\_create) | Determines whether to create Fargate profile or not | `bool` | `true` | no | | [create\_iam\_role](#input\_create\_iam\_role) | Determines whether an IAM role is created or to use an existing IAM role | `bool` | `true` | no | -| [iam\_role\_additional\_policies](#input\_iam\_role\_additional\_policies) | Additional policies to be added to the IAM role | `list(string)` | `[]` | no | +| [iam\_role\_additional\_policies](#input\_iam\_role\_additional\_policies) | Additional policies to be added to the IAM role | `map(string)` | `{}` | no | | [iam\_role\_arn](#input\_iam\_role\_arn) | Existing IAM role ARN for the Fargate profile. Required if `create_iam_role` is set to `false` | `string` | `null` | no | | [iam\_role\_attach\_cni\_policy](#input\_iam\_role\_attach\_cni\_policy) | Whether to attach the `AmazonEKS_CNI_Policy`/`AmazonEKS_CNI_IPv6_Policy` IAM policy to the IAM IAM role. WARNING: If set `false` the permissions must be assigned to the `aws-node` DaemonSet pods via another method or nodes will not be able to join the cluster | `bool` | `true` | no | | [iam\_role\_description](#input\_iam\_role\_description) | Description of the role | `string` | `null` | no | diff --git a/modules/fargate-profile/main.tf b/modules/fargate-profile/main.tf index 554b0e8814..9dd01e54b0 100644 --- a/modules/fargate-profile/main.tf +++ b/modules/fargate-profile/main.tf @@ -1,13 +1,10 @@ data "aws_partition" "current" {} - data "aws_caller_identity" "current" {} locals { - iam_role_name = coalesce(var.iam_role_name, var.name, "fargate-profile") - + iam_role_name = coalesce(var.iam_role_name, var.name, "fargate-profile") iam_role_policy_prefix = "arn:${data.aws_partition.current.partition}:iam::aws:policy" - - cni_policy = var.cluster_ip_family == "ipv6" ? "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:policy/AmazonEKS_CNI_IPv6_Policy" : "${local.iam_role_policy_prefix}/AmazonEKS_CNI_Policy" + cni_policy = var.cluster_ip_family == "ipv6" ? "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:policy/AmazonEKS_CNI_IPv6_Policy" : "${local.iam_role_policy_prefix}/AmazonEKS_CNI_Policy" } ################################################################################ @@ -44,10 +41,15 @@ resource "aws_iam_role" "this" { } resource "aws_iam_role_policy_attachment" "this" { - for_each = var.create && var.create_iam_role ? toset(compact(distinct(concat([ - "${local.iam_role_policy_prefix}/AmazonEKSFargatePodExecutionRolePolicy", - var.iam_role_attach_cni_policy ? local.cni_policy : "", - ], var.iam_role_additional_policies)))) : toset([]) + for_each = { for k, v in merge( + { + AmazonEKSFargatePodExecutionRolePolicy = "${local.iam_role_policy_prefix}/AmazonEKSFargatePodExecutionRolePolicy" + }, + { + for k, v in { AmazonEKS_CNI_Policy = local.cni_policy } : k => v if var.iam_role_attach_cni_policy + }, + var.iam_role_additional_policies + ) : k => v if var.create && var.create_iam_role } policy_arn = each.value role = aws_iam_role.this[0].name diff --git a/modules/fargate-profile/variables.tf b/modules/fargate-profile/variables.tf index 4d5e95ccbe..e22279dc6b 100644 --- a/modules/fargate-profile/variables.tf +++ b/modules/fargate-profile/variables.tf @@ -70,8 +70,8 @@ variable "iam_role_attach_cni_policy" { variable "iam_role_additional_policies" { description = "Additional policies to be added to the IAM role" - type = list(string) - default = [] + type = map(string) + default = {} } variable "iam_role_tags" { diff --git a/modules/self-managed-node-group/README.md b/modules/self-managed-node-group/README.md index 246f61061b..6531beecf0 100644 --- a/modules/self-managed-node-group/README.md +++ b/modules/self-managed-node-group/README.md @@ -113,7 +113,7 @@ module "self_managed_node_group" { | [health\_check\_type](#input\_health\_check\_type) | `EC2` or `ELB`. Controls how health checking is done | `string` | `null` | no | | [hibernation\_options](#input\_hibernation\_options) | The hibernation options for the instance | `map(string)` | `{}` | no | | [iam\_instance\_profile\_arn](#input\_iam\_instance\_profile\_arn) | Amazon Resource Name (ARN) of an existing IAM instance profile that provides permissions for the node group. Required if `create_iam_instance_profile` = `false` | `string` | `null` | no | -| [iam\_role\_additional\_policies](#input\_iam\_role\_additional\_policies) | Additional policies to be added to the IAM role | `list(string)` | `[]` | no | +| [iam\_role\_additional\_policies](#input\_iam\_role\_additional\_policies) | Additional policies to be added to the IAM role | `map(string)` | `{}` | no | | [iam\_role\_attach\_cni\_policy](#input\_iam\_role\_attach\_cni\_policy) | Whether to attach the `AmazonEKS_CNI_Policy`/`AmazonEKS_CNI_IPv6_Policy` IAM policy to the IAM IAM role. WARNING: If set `false` the permissions must be assigned to the `aws-node` DaemonSet pods via another method or nodes will not be able to join the cluster | `bool` | `true` | no | | [iam\_role\_description](#input\_iam\_role\_description) | Description of the role | `string` | `null` | no | | [iam\_role\_name](#input\_iam\_role\_name) | Name to use on IAM role created | `string` | `null` | no | diff --git a/modules/self-managed-node-group/main.tf b/modules/self-managed-node-group/main.tf index f74fb8189e..caf0271ae1 100644 --- a/modules/self-managed-node-group/main.tf +++ b/modules/self-managed-node-group/main.tf @@ -1,7 +1,5 @@ data "aws_partition" "current" {} - data "aws_caller_identity" "current" {} - data "aws_default_tags" "current" {} data "aws_ami" "eks_default" { @@ -44,8 +42,7 @@ module "user_data" { locals { launch_template_name_int = coalesce(var.launch_template_name, "${var.name}-node-group") - - security_group_ids = compact(concat([try(aws_security_group.this[0].id, ""), var.cluster_primary_security_group_id], var.vpc_security_group_ids)) + security_group_ids = compact(concat([try(aws_security_group.this[0].id, ""), var.cluster_primary_security_group_id], var.vpc_security_group_ids)) } resource "aws_launch_template" "this" { @@ -503,11 +500,9 @@ resource "aws_security_group_rule" "this" { ################################################################################ locals { - iam_role_name = coalesce(var.iam_role_name, "${var.name}-node-group") - + iam_role_name = coalesce(var.iam_role_name, "${var.name}-node-group") iam_role_policy_prefix = "arn:${data.aws_partition.current.partition}:iam::aws:policy" - - cni_policy = var.cluster_ip_family == "ipv6" ? "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:policy/AmazonEKS_CNI_IPv6_Policy" : "${local.iam_role_policy_prefix}/AmazonEKS_CNI_Policy" + cni_policy = var.cluster_ip_family == "ipv6" ? "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:policy/AmazonEKS_CNI_IPv6_Policy" : "${local.iam_role_policy_prefix}/AmazonEKS_CNI_Policy" } data "aws_iam_policy_document" "assume_role_policy" { @@ -540,11 +535,16 @@ resource "aws_iam_role" "this" { } resource "aws_iam_role_policy_attachment" "this" { - for_each = var.create && var.create_iam_instance_profile ? toset(compact(distinct(concat([ - "${local.iam_role_policy_prefix}/AmazonEKSWorkerNodePolicy", - "${local.iam_role_policy_prefix}/AmazonEC2ContainerRegistryReadOnly", - var.iam_role_attach_cni_policy ? local.cni_policy : "", - ], var.iam_role_additional_policies)))) : toset([]) + for_each = { for k, v in merge( + { + AmazonEKSWorkerNodePolicy = "${local.iam_role_policy_prefix}/AmazonEKSWorkerNodePolicy" + AmazonEC2ContainerRegistryReadOnly = "${local.iam_role_policy_prefix}/AmazonEC2ContainerRegistryReadOnly" + }, + { + for k, v in { AmazonEKS_CNI_Policy = local.cni_policy } : k => v if var.iam_role_attach_cni_policy + }, + var.iam_role_additional_policies + ) : k => v if var.create && var.create_iam_instance_profile } policy_arn = each.value role = aws_iam_role.this[0].name diff --git a/modules/self-managed-node-group/variables.tf b/modules/self-managed-node-group/variables.tf index 3734d9068c..4cf6eec918 100644 --- a/modules/self-managed-node-group/variables.tf +++ b/modules/self-managed-node-group/variables.tf @@ -598,8 +598,8 @@ variable "iam_role_attach_cni_policy" { variable "iam_role_additional_policies" { description = "Additional policies to be added to the IAM role" - type = list(string) - default = [] + type = map(string) + default = {} } variable "iam_role_tags" { diff --git a/node_groups.tf b/node_groups.tf index 402191f162..70ebbfc274 100644 --- a/node_groups.tf +++ b/node_groups.tf @@ -218,7 +218,7 @@ module "fargate_profile" { iam_role_permissions_boundary = try(each.value.iam_role_permissions_boundary, var.fargate_profile_defaults.iam_role_permissions_boundary, null) iam_role_tags = try(each.value.iam_role_tags, var.fargate_profile_defaults.iam_role_tags, {}) iam_role_attach_cni_policy = try(each.value.iam_role_attach_cni_policy, var.fargate_profile_defaults.iam_role_attach_cni_policy, true) - iam_role_additional_policies = try(each.value.iam_role_additional_policies, var.fargate_profile_defaults.iam_role_additional_policies, []) + iam_role_additional_policies = try(each.value.iam_role_additional_policies, var.fargate_profile_defaults.iam_role_additional_policies, {}) tags = merge(var.tags, try(each.value.tags, var.fargate_profile_defaults.tags, {})) } @@ -315,7 +315,7 @@ module "eks_managed_node_group" { iam_role_permissions_boundary = try(each.value.iam_role_permissions_boundary, var.eks_managed_node_group_defaults.iam_role_permissions_boundary, null) iam_role_tags = try(each.value.iam_role_tags, var.eks_managed_node_group_defaults.iam_role_tags, {}) iam_role_attach_cni_policy = try(each.value.iam_role_attach_cni_policy, var.eks_managed_node_group_defaults.iam_role_attach_cni_policy, true) - iam_role_additional_policies = try(each.value.iam_role_additional_policies, var.eks_managed_node_group_defaults.iam_role_additional_policies, []) + iam_role_additional_policies = try(each.value.iam_role_additional_policies, var.eks_managed_node_group_defaults.iam_role_additional_policies, {}) # Security group vpc_security_group_ids = compact(concat([local.node_security_group_id], try(each.value.vpc_security_group_ids, var.eks_managed_node_group_defaults.vpc_security_group_ids, []))) @@ -446,7 +446,7 @@ module "self_managed_node_group" { iam_role_permissions_boundary = try(each.value.iam_role_permissions_boundary, var.self_managed_node_group_defaults.iam_role_permissions_boundary, null) iam_role_tags = try(each.value.iam_role_tags, var.self_managed_node_group_defaults.iam_role_tags, {}) iam_role_attach_cni_policy = try(each.value.iam_role_attach_cni_policy, var.self_managed_node_group_defaults.iam_role_attach_cni_policy, true) - iam_role_additional_policies = try(each.value.iam_role_additional_policies, var.self_managed_node_group_defaults.iam_role_additional_policies, []) + iam_role_additional_policies = try(each.value.iam_role_additional_policies, var.self_managed_node_group_defaults.iam_role_additional_policies, {}) # Security group vpc_security_group_ids = compact(concat([local.node_security_group_id], try(each.value.vpc_security_group_ids, var.self_managed_node_group_defaults.vpc_security_group_ids, []))) diff --git a/variables.tf b/variables.tf index 3807618e40..79bb8b48e0 100644 --- a/variables.tf +++ b/variables.tf @@ -404,8 +404,8 @@ variable "iam_role_permissions_boundary" { variable "iam_role_additional_policies" { description = "Additional policies to be added to the IAM role" - type = list(string) - default = [] + type = map(string) + default = {} } # TODO - hopefully this can be removed once the AWS endpoint is named properly in China From ba892fa23a9bcedb3f365450f8f5543d7c3417a8 Mon Sep 17 00:00:00 2001 From: Bryant Biggs Date: Sat, 20 Aug 2022 13:48:40 -0400 Subject: [PATCH 04/33] refactor: Remove empty, default security group created per node group --- examples/complete/README.md | 1 + examples/complete/main.tf | 2 - examples/eks_managed_node_group/main.tf | 26 --------- examples/self_managed_node_group/main.tf | 28 ---------- modules/eks-managed-node-group/README.md | 12 ---- modules/eks-managed-node-group/main.tf | 57 +------------------ modules/eks-managed-node-group/outputs.tf | 14 ----- modules/eks-managed-node-group/variables.tf | 52 ----------------- modules/self-managed-node-group/README.md | 12 ---- modules/self-managed-node-group/main.tf | 59 +------------------- modules/self-managed-node-group/outputs.tf | 14 ----- modules/self-managed-node-group/variables.tf | 52 ----------------- node_groups.tf | 22 +------- 13 files changed, 8 insertions(+), 343 deletions(-) diff --git a/examples/complete/README.md b/examples/complete/README.md index 53ad154790..55db516d25 100644 --- a/examples/complete/README.md +++ b/examples/complete/README.md @@ -61,6 +61,7 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Type | |------|------| +| [aws_iam_policy.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | | [aws_security_group.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | ## Inputs diff --git a/examples/complete/main.tf b/examples/complete/main.tf index e95fe143fb..33cbca4dee 100644 --- a/examples/complete/main.tf +++ b/examples/complete/main.tf @@ -263,7 +263,6 @@ module "eks_managed_node_group" { cluster_name = module.eks.cluster_id cluster_version = module.eks.cluster_version - vpc_id = module.vpc.vpc_id subnet_ids = module.vpc.private_subnets cluster_primary_security_group_id = module.eks.cluster_primary_security_group_id vpc_security_group_ids = [ @@ -284,7 +283,6 @@ module "self_managed_node_group" { instance_type = "m5.large" - vpc_id = module.vpc.vpc_id subnet_ids = module.vpc.private_subnets vpc_security_group_ids = [ module.eks.cluster_primary_security_group_id, diff --git a/examples/eks_managed_node_group/main.tf b/examples/eks_managed_node_group/main.tf index 12db52be62..f007a79ded 100644 --- a/examples/eks_managed_node_group/main.tf +++ b/examples/eks_managed_node_group/main.tf @@ -315,32 +315,6 @@ module "eks" { "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" ] - create_security_group = true - security_group_name = "eks-managed-node-group-complete-example" - security_group_use_name_prefix = false - security_group_description = "EKS managed node group complete example security group" - security_group_rules = { - phoneOut = { - description = "Hello CloudFlare" - protocol = "udp" - from_port = 53 - to_port = 53 - type = "egress" - cidr_blocks = ["1.1.1.1/32"] - } - phoneHome = { - description = "Hello cluster" - protocol = "udp" - from_port = 53 - to_port = 53 - type = "egress" - source_cluster_security_group = true # bit of reflection lookup - } - } - security_group_tags = { - Purpose = "Protector of the kubelet" - } - tags = { ExtraTag = "EKS managed node group complete example" } diff --git a/examples/self_managed_node_group/main.tf b/examples/self_managed_node_group/main.tf index 116dbc2553..bef32a9af1 100644 --- a/examples/self_managed_node_group/main.tf +++ b/examples/self_managed_node_group/main.tf @@ -96,8 +96,6 @@ module "eks" { } self_managed_node_group_defaults = { - create_security_group = false - # enable discovery of autoscaling groups by cluster-autoscaler autoscaling_group_tags = { "k8s.io/cluster-autoscaler/enabled" : true, @@ -271,32 +269,6 @@ module "eks" { "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" ] - create_security_group = true - security_group_name = "self-managed-node-group-complete-example" - security_group_use_name_prefix = false - security_group_description = "Self managed node group complete example security group" - security_group_rules = { - phoneOut = { - description = "Hello CloudFlare" - protocol = "udp" - from_port = 53 - to_port = 53 - type = "egress" - cidr_blocks = ["1.1.1.1/32"] - } - phoneHome = { - description = "Hello cluster" - protocol = "udp" - from_port = 53 - to_port = 53 - type = "egress" - source_cluster_security_group = true # bit of reflection lookup - } - } - security_group_tags = { - Purpose = "Protector of the kubelet" - } - timeouts = { create = "80m" update = "80m" diff --git a/modules/eks-managed-node-group/README.md b/modules/eks-managed-node-group/README.md index 5aae8f1f02..0bf446095c 100644 --- a/modules/eks-managed-node-group/README.md +++ b/modules/eks-managed-node-group/README.md @@ -76,8 +76,6 @@ module "eks_managed_node_group" { | [aws_iam_role.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | | [aws_iam_role_policy_attachment.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | | [aws_launch_template.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource | -| [aws_security_group.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | -| [aws_security_group_rule.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource | | [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | | [aws_iam_policy_document.assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | | [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source | @@ -98,14 +96,12 @@ module "eks_managed_node_group" { | [cluster\_ip\_family](#input\_cluster\_ip\_family) | The IP family used to assign Kubernetes pod and service addresses. Valid values are `ipv4` (default) and `ipv6` | `string` | `null` | no | | [cluster\_name](#input\_cluster\_name) | Name of associated EKS cluster | `string` | `null` | no | | [cluster\_primary\_security\_group\_id](#input\_cluster\_primary\_security\_group\_id) | The ID of the EKS cluster primary security group to associate with the instance(s). This is the security group that is automatically created by the EKS service | `string` | `null` | no | -| [cluster\_security\_group\_id](#input\_cluster\_security\_group\_id) | Cluster control plane security group ID | `string` | `null` | no | | [cluster\_service\_ipv4\_cidr](#input\_cluster\_service\_ipv4\_cidr) | The CIDR block to assign Kubernetes service IP addresses from. If you don't specify a block, Kubernetes assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks | `string` | `null` | no | | [cluster\_version](#input\_cluster\_version) | Kubernetes version. Defaults to EKS Cluster Kubernetes version | `string` | `null` | no | | [cpu\_options](#input\_cpu\_options) | The CPU options for the instance | `map(string)` | `{}` | no | | [create](#input\_create) | Determines whether to create EKS managed node group or not | `bool` | `true` | no | | [create\_iam\_role](#input\_create\_iam\_role) | Determines whether an IAM role is created or to use an existing IAM role | `bool` | `true` | no | | [create\_launch\_template](#input\_create\_launch\_template) | Determines whether to create a launch template or not. If set to `false`, EKS will use its own default launch template | `bool` | `true` | no | -| [create\_security\_group](#input\_create\_security\_group) | Determines whether to create a security group | `bool` | `true` | no | | [credit\_specification](#input\_credit\_specification) | Customize the credit specification of the instance | `map(string)` | `{}` | no | | [desired\_size](#input\_desired\_size) | Desired number of instances/nodes | `number` | `1` | no | | [disable\_api\_termination](#input\_disable\_api\_termination) | If true, enables EC2 instance termination protection | `bool` | `null` | no | @@ -149,11 +145,6 @@ module "eks_managed_node_group" { | [pre\_bootstrap\_user\_data](#input\_pre\_bootstrap\_user\_data) | User data that is injected into the user data script ahead of the EKS bootstrap script. Not used when `platform` = `bottlerocket` | `string` | `""` | no | | [ram\_disk\_id](#input\_ram\_disk\_id) | The ID of the ram disk | `string` | `null` | no | | [remote\_access](#input\_remote\_access) | Configuration block with remote access settings | `any` | `{}` | no | -| [security\_group\_description](#input\_security\_group\_description) | Description for the security group created | `string` | `"EKS managed node group security group"` | no | -| [security\_group\_name](#input\_security\_group\_name) | Name to use on security group created | `string` | `null` | no | -| [security\_group\_rules](#input\_security\_group\_rules) | List of security group rules to add to the security group created | `any` | `{}` | no | -| [security\_group\_tags](#input\_security\_group\_tags) | A map of additional tags to add to the security group created | `map(string)` | `{}` | no | -| [security\_group\_use\_name\_prefix](#input\_security\_group\_use\_name\_prefix) | Determines whether the security group name (`security_group_name`) is used as a prefix | `bool` | `true` | no | | [subnet\_ids](#input\_subnet\_ids) | Identifiers of EC2 Subnets to associate with the EKS Node Group. These subnets must have the following resource tag: `kubernetes.io/cluster/CLUSTER_NAME` | `list(string)` | `null` | no | | [tags](#input\_tags) | A map of tags to add to all resources | `map(string)` | `{}` | no | | [taints](#input\_taints) | The Kubernetes taints to be applied to the nodes in the node group. Maximum of 50 taints per node group | `any` | `{}` | no | @@ -162,7 +153,6 @@ module "eks_managed_node_group" { | [update\_launch\_template\_default\_version](#input\_update\_launch\_template\_default\_version) | Whether to update the launch templates default version on each update. Conflicts with `launch_template_default_version` | `bool` | `true` | no | | [use\_name\_prefix](#input\_use\_name\_prefix) | Determines whether to use `name` as is or create a unique name beginning with the `name` as the prefix | `bool` | `true` | no | | [user\_data\_template\_path](#input\_user\_data\_template\_path) | Path to a local, custom user data template file to use when rendering user data | `string` | `""` | no | -| [vpc\_id](#input\_vpc\_id) | ID of the VPC where the security group/nodes will be provisioned | `string` | `null` | no | | [vpc\_security\_group\_ids](#input\_vpc\_security\_group\_ids) | A list of security group IDs to associate | `list(string)` | `[]` | no | ## Outputs @@ -183,6 +173,4 @@ module "eks_managed_node_group" { | [node\_group\_resources](#output\_node\_group\_resources) | List of objects containing information about underlying resources | | [node\_group\_status](#output\_node\_group\_status) | Status of the EKS Node Group | | [node\_group\_taints](#output\_node\_group\_taints) | List of objects containing information about taints applied to the node group | -| [security\_group\_arn](#output\_security\_group\_arn) | Amazon Resource Name (ARN) of the security group | -| [security\_group\_id](#output\_security\_group\_id) | ID of the security group | diff --git a/modules/eks-managed-node-group/main.tf b/modules/eks-managed-node-group/main.tf index a2d271ad91..cbf46c3642 100644 --- a/modules/eks-managed-node-group/main.tf +++ b/modules/eks-managed-node-group/main.tf @@ -36,7 +36,7 @@ locals { # 4. `var.create_launch_template = true && var.launch_template_name == "something"` => Custom LT will be used, LT name is provided by user use_custom_launch_template = var.create_launch_template || var.launch_template_name != "" launch_template_name_int = coalesce(var.launch_template_name, "${var.name}-eks-node-group") - security_group_ids = compact(concat([try(aws_security_group.this[0].id, ""), var.cluster_primary_security_group_id], var.vpc_security_group_ids)) + security_group_ids = compact(concat([var.cluster_primary_security_group_id], var.vpc_security_group_ids)) } resource "aws_launch_template" "this" { @@ -246,10 +246,9 @@ resource "aws_launch_template" "this" { create_before_destroy = true } - # Prevent premature access of security group roles and policies by pods that + # Prevent premature access of policies by pods that # require permissions on create/destroy that depend on nodes depends_on = [ - aws_security_group_rule.this, aws_iam_role_policy_attachment.this, ] @@ -347,58 +346,6 @@ resource "aws_eks_node_group" "this" { ) } -################################################################################ -# Security Group -################################################################################ - -locals { - security_group_name = coalesce(var.security_group_name, "${var.name}-eks-node-group") - create_security_group = var.create && var.create_security_group -} - -resource "aws_security_group" "this" { - count = local.create_security_group ? 1 : 0 - - name = var.security_group_use_name_prefix ? null : local.security_group_name - name_prefix = var.security_group_use_name_prefix ? "${local.security_group_name}-" : null - description = var.security_group_description - vpc_id = var.vpc_id - - tags = merge( - var.tags, - { "Name" = local.security_group_name }, - var.security_group_tags - ) - - # https://github.com/hashicorp/terraform-provider-aws/issues/2445 - # https://github.com/hashicorp/terraform-provider-aws/issues/9692 - lifecycle { - create_before_destroy = true - } -} - -resource "aws_security_group_rule" "this" { - for_each = { for k, v in var.security_group_rules : k => v if local.create_security_group } - - # Required - security_group_id = aws_security_group.this[0].id - protocol = each.value.protocol - from_port = each.value.from_port - to_port = each.value.to_port - type = each.value.type - - # Optional - description = try(each.value.description, null) - cidr_blocks = try(each.value.cidr_blocks, null) - ipv6_cidr_blocks = try(each.value.ipv6_cidr_blocks, null) - prefix_list_ids = try(each.value.prefix_list_ids, []) - self = try(each.value.self, null) - source_security_group_id = try( - each.value.source_security_group_id, - try(each.value.source_cluster_security_group, false) ? var.cluster_security_group_id : null - ) -} - ################################################################################ # IAM Role ################################################################################ diff --git a/modules/eks-managed-node-group/outputs.tf b/modules/eks-managed-node-group/outputs.tf index 9d7535332f..c9bffb3ec9 100644 --- a/modules/eks-managed-node-group/outputs.tf +++ b/modules/eks-managed-node-group/outputs.tf @@ -61,20 +61,6 @@ output "node_group_taints" { value = try(aws_eks_node_group.this[0].taint, []) } -################################################################################ -# Security Group -################################################################################ - -output "security_group_arn" { - description = "Amazon Resource Name (ARN) of the security group" - value = try(aws_security_group.this[0].arn, "") -} - -output "security_group_id" { - description = "ID of the security group" - value = try(aws_security_group.this[0].id, "") -} - ################################################################################ # IAM Role ################################################################################ diff --git a/modules/eks-managed-node-group/variables.tf b/modules/eks-managed-node-group/variables.tf index 5a6bd10a20..7c2e14ee8c 100644 --- a/modules/eks-managed-node-group/variables.tf +++ b/modules/eks-managed-node-group/variables.tf @@ -368,58 +368,6 @@ variable "timeouts" { default = {} } -################################################################################ -# Security Group -################################################################################ - -variable "create_security_group" { - description = "Determines whether to create a security group" - type = bool - default = true -} - -variable "security_group_name" { - description = "Name to use on security group created" - type = string - default = null -} - -variable "security_group_use_name_prefix" { - description = "Determines whether the security group name (`security_group_name`) is used as a prefix" - type = bool - default = true -} - -variable "security_group_description" { - description = "Description for the security group created" - type = string - default = "EKS managed node group security group" -} - -variable "vpc_id" { - description = "ID of the VPC where the security group/nodes will be provisioned" - type = string - default = null -} - -variable "security_group_rules" { - description = "List of security group rules to add to the security group created" - type = any - default = {} -} - -variable "cluster_security_group_id" { - description = "Cluster control plane security group ID" - type = string - default = null -} - -variable "security_group_tags" { - description = "A map of additional tags to add to the security group created" - type = map(string) - default = {} -} - ################################################################################ # IAM Role ################################################################################ diff --git a/modules/self-managed-node-group/README.md b/modules/self-managed-node-group/README.md index 6531beecf0..1d3ac272d0 100644 --- a/modules/self-managed-node-group/README.md +++ b/modules/self-managed-node-group/README.md @@ -64,8 +64,6 @@ module "self_managed_node_group" { | [aws_iam_role.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | | [aws_iam_role_policy_attachment.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | | [aws_launch_template.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource | -| [aws_security_group.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | -| [aws_security_group_rule.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource | | [aws_ami.eks_default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source | | [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | | [aws_default_tags.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/default_tags) | data source | @@ -88,7 +86,6 @@ module "self_managed_node_group" { | [cluster\_ip\_family](#input\_cluster\_ip\_family) | The IP family used to assign Kubernetes pod and service addresses. Valid values are `ipv4` (default) and `ipv6` | `string` | `null` | no | | [cluster\_name](#input\_cluster\_name) | Name of associated EKS cluster | `string` | `""` | no | | [cluster\_primary\_security\_group\_id](#input\_cluster\_primary\_security\_group\_id) | The ID of the EKS cluster primary security group to associate with the instance(s). This is the security group that is automatically created by the EKS service | `string` | `null` | no | -| [cluster\_security\_group\_id](#input\_cluster\_security\_group\_id) | Cluster control plane security group ID | `string` | `null` | no | | [cluster\_version](#input\_cluster\_version) | Kubernetes cluster version - used to lookup default AMI ID if one is not provided | `string` | `null` | no | | [cpu\_options](#input\_cpu\_options) | The CPU options for the instance | `map(string)` | `{}` | no | | [create](#input\_create) | Determines whether to create self managed node group or not | `bool` | `true` | no | @@ -96,7 +93,6 @@ module "self_managed_node_group" { | [create\_iam\_instance\_profile](#input\_create\_iam\_instance\_profile) | Determines whether an IAM instance profile is created or to use an existing IAM instance profile | `bool` | `true` | no | | [create\_launch\_template](#input\_create\_launch\_template) | Determines whether to create launch template or not | `bool` | `true` | no | | [create\_schedule](#input\_create\_schedule) | Determines whether to create autoscaling group schedule or not | `bool` | `true` | no | -| [create\_security\_group](#input\_create\_security\_group) | Determines whether to create a security group | `bool` | `true` | no | | [credit\_specification](#input\_credit\_specification) | Customize the credit specification of the instance | `map(string)` | `{}` | no | | [default\_cooldown](#input\_default\_cooldown) | The amount of time, in seconds, after a scaling activity completes before another scaling activity can start | `number` | `null` | no | | [delete\_timeout](#input\_delete\_timeout) | Delete timeout to wait for destroying autoscaling group | `string` | `null` | no | @@ -152,11 +148,6 @@ module "self_managed_node_group" { | [protect\_from\_scale\_in](#input\_protect\_from\_scale\_in) | Allows setting instance protection. The autoscaling group will not select instances with this setting for termination during scale in events. | `bool` | `false` | no | | [ram\_disk\_id](#input\_ram\_disk\_id) | The ID of the ram disk | `string` | `null` | no | | [schedules](#input\_schedules) | Map of autoscaling group schedule to create | `map(any)` | `{}` | no | -| [security\_group\_description](#input\_security\_group\_description) | Description for the security group created | `string` | `"EKS self-managed node group security group"` | no | -| [security\_group\_name](#input\_security\_group\_name) | Name to use on security group created | `string` | `null` | no | -| [security\_group\_rules](#input\_security\_group\_rules) | List of security group rules to add to the security group created | `any` | `{}` | no | -| [security\_group\_tags](#input\_security\_group\_tags) | A map of additional tags to add to the security group created | `map(string)` | `{}` | no | -| [security\_group\_use\_name\_prefix](#input\_security\_group\_use\_name\_prefix) | Determines whether the security group name (`security_group_name`) is used as a prefix | `bool` | `true` | no | | [service\_linked\_role\_arn](#input\_service\_linked\_role\_arn) | The ARN of the service-linked role that the ASG will use to call other AWS services | `string` | `null` | no | | [subnet\_ids](#input\_subnet\_ids) | A list of subnet IDs to launch resources in. Subnets automatically determine which availability zones the group will reside. Conflicts with `availability_zones` | `list(string)` | `null` | no | | [suspended\_processes](#input\_suspended\_processes) | A list of processes to suspend for the Auto Scaling Group. The allowed values are `Launch`, `Terminate`, `HealthCheck`, `ReplaceUnhealthy`, `AZRebalance`, `AlarmNotification`, `ScheduledActions`, `AddToLoadBalancer`. Note that if you suspend either the `Launch` or `Terminate` process types, it can prevent your Auto Scaling Group from functioning properly | `list(string)` | `[]` | no | @@ -168,7 +159,6 @@ module "self_managed_node_group" { | [use\_mixed\_instances\_policy](#input\_use\_mixed\_instances\_policy) | Determines whether to use a mixed instances policy in the autoscaling group or not | `bool` | `false` | no | | [use\_name\_prefix](#input\_use\_name\_prefix) | Determines whether to use `name` as is or create a unique name beginning with the `name` as the prefix | `bool` | `true` | no | | [user\_data\_template\_path](#input\_user\_data\_template\_path) | Path to a local, custom user data template file to use when rendering user data | `string` | `""` | no | -| [vpc\_id](#input\_vpc\_id) | ID of the VPC where the security group/nodes will be provisioned | `string` | `null` | no | | [vpc\_security\_group\_ids](#input\_vpc\_security\_group\_ids) | A list of security group IDs to associate | `list(string)` | `[]` | no | | [wait\_for\_capacity\_timeout](#input\_wait\_for\_capacity\_timeout) | A maximum duration that Terraform should wait for ASG instances to be healthy before timing out. (See also Waiting for Capacity below.) Setting this to '0' causes Terraform to skip all Capacity Waiting behavior. | `string` | `null` | no | | [wait\_for\_elb\_capacity](#input\_wait\_for\_elb\_capacity) | Setting this will cause Terraform to wait for exactly this number of healthy instances in all attached load balancers on both create and update operations. Takes precedence over `min_elb_capacity` behavior. | `number` | `null` | no | @@ -202,7 +192,5 @@ module "self_managed_node_group" { | [launch\_template\_latest\_version](#output\_launch\_template\_latest\_version) | The latest version of the launch template | | [launch\_template\_name](#output\_launch\_template\_name) | The name of the launch template | | [platform](#output\_platform) | Identifies if the OS platform is `bottlerocket`, `linux`, or `windows` based | -| [security\_group\_arn](#output\_security\_group\_arn) | Amazon Resource Name (ARN) of the security group | -| [security\_group\_id](#output\_security\_group\_id) | ID of the security group | | [user\_data](#output\_user\_data) | Base64 encoded user data | diff --git a/modules/self-managed-node-group/main.tf b/modules/self-managed-node-group/main.tf index caf0271ae1..2f044cd03c 100644 --- a/modules/self-managed-node-group/main.tf +++ b/modules/self-managed-node-group/main.tf @@ -42,7 +42,7 @@ module "user_data" { locals { launch_template_name_int = coalesce(var.launch_template_name, "${var.name}-node-group") - security_group_ids = compact(concat([try(aws_security_group.this[0].id, ""), var.cluster_primary_security_group_id], var.vpc_security_group_ids)) + security_group_ids = compact(concat([var.cluster_primary_security_group_id], var.vpc_security_group_ids)) } resource "aws_launch_template" "this" { @@ -239,10 +239,9 @@ resource "aws_launch_template" "this" { create_before_destroy = true } - # Prevent premature access of security group roles and policies by pods that + # Prevent premature access of policies by pods that # require permissions on create/destroy that depend on nodes depends_on = [ - aws_security_group_rule.this, aws_iam_role_policy_attachment.this, ] @@ -441,60 +440,6 @@ resource "aws_autoscaling_schedule" "this" { recurrence = lookup(each.value, "recurrence", null) } -################################################################################ -# Security Group -################################################################################ - -locals { - security_group_name = coalesce(var.security_group_name, "${var.name}-node-group") - create_security_group = var.create && var.create_security_group -} - -resource "aws_security_group" "this" { - count = local.create_security_group ? 1 : 0 - - name = var.security_group_use_name_prefix ? null : local.security_group_name - name_prefix = var.security_group_use_name_prefix ? "${local.security_group_name}-" : null - description = var.security_group_description - vpc_id = var.vpc_id - - tags = merge( - var.tags, - { - "Name" = local.security_group_name - }, - var.security_group_tags - ) - - # https://github.com/hashicorp/terraform-provider-aws/issues/2445 - # https://github.com/hashicorp/terraform-provider-aws/issues/9692 - lifecycle { - create_before_destroy = true - } -} - -resource "aws_security_group_rule" "this" { - for_each = { for k, v in var.security_group_rules : k => v if local.create_security_group } - - # Required - security_group_id = aws_security_group.this[0].id - protocol = each.value.protocol - from_port = each.value.from_port - to_port = each.value.to_port - type = each.value.type - - # Optional - description = try(each.value.description, null) - cidr_blocks = try(each.value.cidr_blocks, null) - ipv6_cidr_blocks = try(each.value.ipv6_cidr_blocks, null) - prefix_list_ids = try(each.value.prefix_list_ids, []) - self = try(each.value.self, null) - source_security_group_id = try( - each.value.source_security_group_id, - try(each.value.source_cluster_security_group, false) ? var.cluster_security_group_id : null - ) -} - ################################################################################ # IAM Role ################################################################################ diff --git a/modules/self-managed-node-group/outputs.tf b/modules/self-managed-node-group/outputs.tf index e9f52db14f..f10816b522 100644 --- a/modules/self-managed-node-group/outputs.tf +++ b/modules/self-managed-node-group/outputs.tf @@ -90,20 +90,6 @@ output "autoscaling_group_schedule_arns" { value = { for k, v in aws_autoscaling_schedule.this : k => v.arn } } -################################################################################ -# Security Group -################################################################################ - -output "security_group_arn" { - description = "Amazon Resource Name (ARN) of the security group" - value = try(aws_security_group.this[0].arn, "") -} - -output "security_group_id" { - description = "ID of the security group" - value = try(aws_security_group.this[0].id, "") -} - ################################################################################ # IAM Role ################################################################################ diff --git a/modules/self-managed-node-group/variables.tf b/modules/self-managed-node-group/variables.tf index 4cf6eec918..eb7ec029fe 100644 --- a/modules/self-managed-node-group/variables.tf +++ b/modules/self-managed-node-group/variables.tf @@ -486,58 +486,6 @@ variable "schedules" { default = {} } -################################################################################ -# Security Group -################################################################################ - -variable "create_security_group" { - description = "Determines whether to create a security group" - type = bool - default = true -} - -variable "security_group_name" { - description = "Name to use on security group created" - type = string - default = null -} - -variable "security_group_use_name_prefix" { - description = "Determines whether the security group name (`security_group_name`) is used as a prefix" - type = bool - default = true -} - -variable "security_group_description" { - description = "Description for the security group created" - type = string - default = "EKS self-managed node group security group" -} - -variable "vpc_id" { - description = "ID of the VPC where the security group/nodes will be provisioned" - type = string - default = null -} - -variable "security_group_rules" { - description = "List of security group rules to add to the security group created" - type = any - default = {} -} - -variable "cluster_security_group_id" { - description = "Cluster control plane security group ID" - type = string - default = null -} - -variable "security_group_tags" { - description = "A map of additional tags to add to the security group created" - type = map(string) - default = {} -} - ################################################################################ # IAM Role ################################################################################ diff --git a/node_groups.tf b/node_groups.tf index 70ebbfc274..9bfe27cf7b 100644 --- a/node_groups.tf +++ b/node_groups.tf @@ -234,10 +234,9 @@ module "eks_managed_node_group" { create = try(each.value.create, true) - cluster_name = aws_eks_cluster.this[0].name - cluster_version = try(each.value.cluster_version, var.eks_managed_node_group_defaults.cluster_version, aws_eks_cluster.this[0].version) - cluster_security_group_id = local.cluster_security_group_id - cluster_ip_family = var.cluster_ip_family + cluster_name = aws_eks_cluster.this[0].name + cluster_version = try(each.value.cluster_version, var.eks_managed_node_group_defaults.cluster_version, aws_eks_cluster.this[0].version) + cluster_ip_family = var.cluster_ip_family # EKS Managed Node Group name = try(each.value.name, each.key) @@ -320,13 +319,6 @@ module "eks_managed_node_group" { # Security group vpc_security_group_ids = compact(concat([local.node_security_group_id], try(each.value.vpc_security_group_ids, var.eks_managed_node_group_defaults.vpc_security_group_ids, []))) cluster_primary_security_group_id = try(each.value.attach_cluster_primary_security_group, var.eks_managed_node_group_defaults.attach_cluster_primary_security_group, false) ? aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id : null - create_security_group = try(each.value.create_security_group, var.eks_managed_node_group_defaults.create_security_group, true) - security_group_name = try(each.value.security_group_name, var.eks_managed_node_group_defaults.security_group_name, null) - security_group_use_name_prefix = try(each.value.security_group_use_name_prefix, var.eks_managed_node_group_defaults.security_group_use_name_prefix, true) - security_group_description = try(each.value.security_group_description, var.eks_managed_node_group_defaults.security_group_description, "EKS managed node group security group") - vpc_id = try(each.value.vpc_id, var.eks_managed_node_group_defaults.vpc_id, var.vpc_id) - security_group_rules = try(each.value.security_group_rules, var.eks_managed_node_group_defaults.security_group_rules, {}) - security_group_tags = try(each.value.security_group_tags, var.eks_managed_node_group_defaults.security_group_tags, {}) tags = merge(var.tags, try(each.value.tags, var.eks_managed_node_group_defaults.tags, {})) } @@ -450,15 +442,7 @@ module "self_managed_node_group" { # Security group vpc_security_group_ids = compact(concat([local.node_security_group_id], try(each.value.vpc_security_group_ids, var.self_managed_node_group_defaults.vpc_security_group_ids, []))) - cluster_security_group_id = local.cluster_security_group_id cluster_primary_security_group_id = try(each.value.attach_cluster_primary_security_group, var.self_managed_node_group_defaults.attach_cluster_primary_security_group, false) ? aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id : null - create_security_group = try(each.value.create_security_group, var.self_managed_node_group_defaults.create_security_group, true) - security_group_name = try(each.value.security_group_name, var.self_managed_node_group_defaults.security_group_name, null) - security_group_use_name_prefix = try(each.value.security_group_use_name_prefix, var.self_managed_node_group_defaults.security_group_use_name_prefix, true) - security_group_description = try(each.value.security_group_description, var.self_managed_node_group_defaults.security_group_description, "Self managed node group security group") - vpc_id = try(each.value.vpc_id, var.self_managed_node_group_defaults.vpc_id, var.vpc_id) - security_group_rules = try(each.value.security_group_rules, var.self_managed_node_group_defaults.security_group_rules, {}) - security_group_tags = try(each.value.security_group_tags, var.self_managed_node_group_defaults.security_group_tags, {}) tags = merge(var.tags, try(each.value.tags, var.self_managed_node_group_defaults.tags, {})) } From fc800203b97ca59e6adb705ca551355e4ee61bdd Mon Sep 17 00:00:00 2001 From: Bryant Biggs Date: Sat, 20 Aug 2022 13:49:59 -0400 Subject: [PATCH 05/33] chore: Remove Karpenter example from examples --- examples/karpenter/README.md | 121 --------------- examples/karpenter/main.tf | 254 -------------------------------- examples/karpenter/outputs.tf | 187 ----------------------- examples/karpenter/variables.tf | 0 examples/karpenter/versions.tf | 18 --- 5 files changed, 580 deletions(-) delete mode 100644 examples/karpenter/README.md delete mode 100644 examples/karpenter/main.tf delete mode 100644 examples/karpenter/outputs.tf delete mode 100644 examples/karpenter/variables.tf delete mode 100644 examples/karpenter/versions.tf diff --git a/examples/karpenter/README.md b/examples/karpenter/README.md deleted file mode 100644 index 98f31ad110..0000000000 --- a/examples/karpenter/README.md +++ /dev/null @@ -1,121 +0,0 @@ -# Karpenter Example - -Configuration in this directory creates an AWS EKS cluster with [Karpenter](https://karpenter.sh/) provisioned for managing compute resource scaling. - -## Usage - -To run this example you need to execute: - -```bash -$ terraform init -$ terraform plan -$ terraform apply -``` - -Once the cluster is up and running, you can check that Karpenter is functioning as intended with the following command: - -```bash -# First, make sure you have updated your local kubeconfig -aws eks --region eu-west-1 update-kubeconfig --name ex-karpenter - -# Second, scale the example deployment -kubectl scale deployment inflate --replicas 5 - -# You can watch Karpenter's controller logs with -kubectl logs -f -n karpenter -l app.kubernetes.io/name=karpenter -c controller -``` - -You should see a new node named `karpenter.sh/provisioner-name/default` eventually come up in the console; this was provisioned by Karpenter in response to the scaled deployment above. - -### Tear Down & Clean-Up - -Because Karpenter manages the state of node resources outside of Terraform, Karpenter created resources will need to be de-provisioned first before removing the remaining resources with Terraform. - -1. Remove the example deployment created above and any nodes created by Karpenter - -```bash -kubectl delete deployment inflate -kubectl delete node -l karpenter.sh/provisioner-name=default -``` - -2. Remove the resources created by Terraform - -```bash -terraform destroy -``` - -Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources. - - -## Requirements - -| Name | Version | -|------|---------| -| [terraform](#requirement\_terraform) | >= 1.0 | -| [aws](#requirement\_aws) | >= 4.7 | -| [helm](#requirement\_helm) | >= 2.4 | -| [kubectl](#requirement\_kubectl) | >= 1.14 | - -## Providers - -| Name | Version | -|------|---------| -| [aws](#provider\_aws) | >= 4.7 | -| [helm](#provider\_helm) | >= 2.4 | -| [kubectl](#provider\_kubectl) | >= 1.14 | - -## Modules - -| Name | Source | Version | -|------|--------|---------| -| [eks](#module\_eks) | ../.. | n/a | -| [karpenter\_irsa](#module\_karpenter\_irsa) | terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks | ~> 4.21.1 | -| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 | - -## Resources - -| Name | Type | -|------|------| -| [aws_iam_instance_profile.karpenter](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_instance_profile) | resource | -| [helm_release.karpenter](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource | -| [kubectl_manifest.karpenter_example_deployment](https://registry.terraform.io/providers/gavinbunney/kubectl/latest/docs/resources/manifest) | resource | -| [kubectl_manifest.karpenter_provisioner](https://registry.terraform.io/providers/gavinbunney/kubectl/latest/docs/resources/manifest) | resource | -| [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source | - -## Inputs - -No inputs. - -## Outputs - -| Name | Description | -|------|-------------| -| [aws\_auth\_configmap\_yaml](#output\_aws\_auth\_configmap\_yaml) | Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles | -| [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created | -| [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created | -| [cluster\_addons](#output\_cluster\_addons) | Map of attribute maps for all EKS cluster addons enabled | -| [cluster\_arn](#output\_cluster\_arn) | The Amazon Resource Name (ARN) of the cluster | -| [cluster\_certificate\_authority\_data](#output\_cluster\_certificate\_authority\_data) | Base64 encoded certificate data required to communicate with the cluster | -| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for your Kubernetes API server | -| [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster | -| [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster | -| [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role | -| [cluster\_id](#output\_cluster\_id) | The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready | -| [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled | -| [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider | -| [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster | -| [cluster\_primary\_security\_group\_id](#output\_cluster\_primary\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console | -| [cluster\_security\_group\_arn](#output\_cluster\_security\_group\_arn) | Amazon Resource Name (ARN) of the cluster security group | -| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | ID of the cluster security group | -| [cluster\_status](#output\_cluster\_status) | Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED` | -| [cluster\_tls\_certificate\_sha1\_fingerprint](#output\_cluster\_tls\_certificate\_sha1\_fingerprint) | The SHA1 fingerprint of the public key of the cluster's certificate | -| [eks\_managed\_node\_groups](#output\_eks\_managed\_node\_groups) | Map of attribute maps for all EKS managed node groups created | -| [eks\_managed\_node\_groups\_autoscaling\_group\_names](#output\_eks\_managed\_node\_groups\_autoscaling\_group\_names) | List of the autoscaling group names created by EKS managed node groups | -| [fargate\_profiles](#output\_fargate\_profiles) | Map of attribute maps for all EKS Fargate Profiles created | -| [node\_security\_group\_arn](#output\_node\_security\_group\_arn) | Amazon Resource Name (ARN) of the node shared security group | -| [node\_security\_group\_id](#output\_node\_security\_group\_id) | ID of the node shared security group | -| [oidc\_provider](#output\_oidc\_provider) | The OpenID Connect identity provider (issuer URL without leading `https://`) | -| [oidc\_provider\_arn](#output\_oidc\_provider\_arn) | The ARN of the OIDC Provider if `enable_irsa = true` | -| [self\_managed\_node\_groups](#output\_self\_managed\_node\_groups) | Map of attribute maps for all self managed node groups created | -| [self\_managed\_node\_groups\_autoscaling\_group\_names](#output\_self\_managed\_node\_groups\_autoscaling\_group\_names) | List of the autoscaling group names created by self-managed node groups | - diff --git a/examples/karpenter/main.tf b/examples/karpenter/main.tf deleted file mode 100644 index 3f43d80b2a..0000000000 --- a/examples/karpenter/main.tf +++ /dev/null @@ -1,254 +0,0 @@ -provider "aws" { - region = local.region -} - -data "aws_partition" "current" {} - -locals { - name = "ex-${replace(basename(path.cwd), "_", "-")}" - cluster_version = "1.22" - region = "eu-west-1" - partition = data.aws_partition.current.partition - - tags = { - Example = local.name - GithubRepo = "terraform-aws-eks" - GithubOrg = "terraform-aws-modules" - } -} - -################################################################################ -# EKS Module -################################################################################ - -module "eks" { - source = "../.." - - cluster_name = local.name - cluster_version = local.cluster_version - cluster_endpoint_private_access = true - cluster_endpoint_public_access = true - - vpc_id = module.vpc.vpc_id - subnet_ids = module.vpc.private_subnets - - node_security_group_additional_rules = { - # Control plane invoke Karpenter webhook - ingress_karpenter_webhook_tcp = { - description = "Control plane invoke Karpenter webhook" - protocol = "tcp" - from_port = 8443 - to_port = 8443 - type = "ingress" - source_cluster_security_group = true - } - } - - eks_managed_node_groups = { - karpenter = { - instance_types = ["t3.medium"] - - min_size = 1 - max_size = 2 - desired_size = 1 - - iam_role_additional_policies = [ - # Required by Karpenter - "arn:${local.partition}:iam::aws:policy/AmazonSSMManagedInstanceCore" - ] - } - } - - tags = merge(local.tags, { - # NOTE - if creating multiple security groups with this module, only tag the - # security group that Karpenter should utilize with the following tag - # (i.e. - at most, only one security group should have this tag in your account) - "karpenter.sh/discovery" = local.name - }) -} - -################################################################################ -# Karpenter -################################################################################ - -provider "helm" { - kubernetes { - host = module.eks.cluster_endpoint - cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) - - exec { - api_version = "client.authentication.k8s.io/v1beta1" - command = "aws" - # This requires the awscli to be installed locally where Terraform is executed - args = ["eks", "get-token", "--cluster-name", module.eks.cluster_id] - } - } -} - -provider "kubectl" { - apply_retry_count = 5 - host = module.eks.cluster_endpoint - cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) - load_config_file = false - - exec { - api_version = "client.authentication.k8s.io/v1beta1" - command = "aws" - # This requires the awscli to be installed locally where Terraform is executed - args = ["eks", "get-token", "--cluster-name", module.eks.cluster_id] - } -} - -module "karpenter_irsa" { - source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" - version = "~> 4.21.1" - - role_name = "karpenter-controller-${local.name}" - attach_karpenter_controller_policy = true - - karpenter_controller_cluster_id = module.eks.cluster_id - karpenter_controller_ssm_parameter_arns = [ - "arn:${local.partition}:ssm:*:*:parameter/aws/service/*" - ] - karpenter_controller_node_iam_role_arns = [ - module.eks.eks_managed_node_groups["karpenter"].iam_role_arn - ] - - oidc_providers = { - ex = { - provider_arn = module.eks.oidc_provider_arn - namespace_service_accounts = ["karpenter:karpenter"] - } - } -} - -resource "aws_iam_instance_profile" "karpenter" { - name = "KarpenterNodeInstanceProfile-${local.name}" - role = module.eks.eks_managed_node_groups["karpenter"].iam_role_name -} - -resource "helm_release" "karpenter" { - namespace = "karpenter" - create_namespace = true - - name = "karpenter" - repository = "https://charts.karpenter.sh" - chart = "karpenter" - version = "0.8.2" - - set { - name = "serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn" - value = module.karpenter_irsa.iam_role_arn - } - - set { - name = "clusterName" - value = module.eks.cluster_id - } - - set { - name = "clusterEndpoint" - value = module.eks.cluster_endpoint - } - - set { - name = "aws.defaultInstanceProfile" - value = aws_iam_instance_profile.karpenter.name - } -} - -# Workaround - https://github.com/hashicorp/terraform-provider-kubernetes/issues/1380#issuecomment-967022975 -resource "kubectl_manifest" "karpenter_provisioner" { - yaml_body = <<-YAML - apiVersion: karpenter.sh/v1alpha5 - kind: Provisioner - metadata: - name: default - spec: - requirements: - - key: karpenter.sh/capacity-type - operator: In - values: ["spot"] - limits: - resources: - cpu: 1000 - provider: - subnetSelector: - karpenter.sh/discovery: ${local.name} - securityGroupSelector: - karpenter.sh/discovery: ${local.name} - tags: - karpenter.sh/discovery: ${local.name} - ttlSecondsAfterEmpty: 30 - YAML - - depends_on = [ - helm_release.karpenter - ] -} - -# Example deployment using the [pause image](https://www.ianlewis.org/en/almighty-pause-container) -# and starts with zero replicas -resource "kubectl_manifest" "karpenter_example_deployment" { - yaml_body = <<-YAML - apiVersion: apps/v1 - kind: Deployment - metadata: - name: inflate - spec: - replicas: 0 - selector: - matchLabels: - app: inflate - template: - metadata: - labels: - app: inflate - spec: - terminationGracePeriodSeconds: 0 - containers: - - name: inflate - image: public.ecr.aws/eks-distro/kubernetes/pause:3.2 - resources: - requests: - cpu: 1 - YAML - - depends_on = [ - helm_release.karpenter - ] -} - -################################################################################ -# Supporting Resources -################################################################################ - -module "vpc" { - source = "terraform-aws-modules/vpc/aws" - version = "~> 3.0" - - name = local.name - cidr = "10.0.0.0/16" - - azs = ["${local.region}a", "${local.region}b", "${local.region}c"] - private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"] - public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"] - - enable_nat_gateway = true - single_nat_gateway = true - enable_dns_hostnames = true - - public_subnet_tags = { - "kubernetes.io/cluster/${local.name}" = "shared" - "kubernetes.io/role/elb" = 1 - } - - private_subnet_tags = { - "kubernetes.io/cluster/${local.name}" = "shared" - "kubernetes.io/role/internal-elb" = 1 - # Tags subnets for Karpenter auto-discovery - "karpenter.sh/discovery" = local.name - } - - tags = local.tags -} diff --git a/examples/karpenter/outputs.tf b/examples/karpenter/outputs.tf deleted file mode 100644 index dbbec2334e..0000000000 --- a/examples/karpenter/outputs.tf +++ /dev/null @@ -1,187 +0,0 @@ -################################################################################ -# Cluster -################################################################################ - -output "cluster_arn" { - description = "The Amazon Resource Name (ARN) of the cluster" - value = module.eks.cluster_arn -} - -output "cluster_certificate_authority_data" { - description = "Base64 encoded certificate data required to communicate with the cluster" - value = module.eks.cluster_certificate_authority_data -} - -output "cluster_endpoint" { - description = "Endpoint for your Kubernetes API server" - value = module.eks.cluster_endpoint -} - -output "cluster_id" { - description = "The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready" - value = module.eks.cluster_id -} - -output "cluster_oidc_issuer_url" { - description = "The URL on the EKS cluster for the OpenID Connect identity provider" - value = module.eks.cluster_oidc_issuer_url -} - -output "cluster_platform_version" { - description = "Platform version for the cluster" - value = module.eks.cluster_platform_version -} - -output "cluster_status" { - description = "Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED`" - value = module.eks.cluster_status -} - -output "cluster_primary_security_group_id" { - description = "Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console" - value = module.eks.cluster_primary_security_group_id -} - -################################################################################ -# Security Group -################################################################################ - -output "cluster_security_group_arn" { - description = "Amazon Resource Name (ARN) of the cluster security group" - value = module.eks.cluster_security_group_arn -} - -output "cluster_security_group_id" { - description = "ID of the cluster security group" - value = module.eks.cluster_security_group_id -} - -################################################################################ -# Node Security Group -################################################################################ - -output "node_security_group_arn" { - description = "Amazon Resource Name (ARN) of the node shared security group" - value = module.eks.node_security_group_arn -} - -output "node_security_group_id" { - description = "ID of the node shared security group" - value = module.eks.node_security_group_id -} - -################################################################################ -# IRSA -################################################################################ - -output "oidc_provider" { - description = "The OpenID Connect identity provider (issuer URL without leading `https://`)" - value = module.eks.oidc_provider -} - -output "oidc_provider_arn" { - description = "The ARN of the OIDC Provider if `enable_irsa = true`" - value = module.eks.oidc_provider_arn -} - -output "cluster_tls_certificate_sha1_fingerprint" { - description = "The SHA1 fingerprint of the public key of the cluster's certificate" - value = module.eks.cluster_tls_certificate_sha1_fingerprint -} - -################################################################################ -# IAM Role -################################################################################ - -output "cluster_iam_role_name" { - description = "IAM role name of the EKS cluster" - value = module.eks.cluster_iam_role_name -} - -output "cluster_iam_role_arn" { - description = "IAM role ARN of the EKS cluster" - value = module.eks.cluster_iam_role_arn -} - -output "cluster_iam_role_unique_id" { - description = "Stable and unique string identifying the IAM role" - value = module.eks.cluster_iam_role_unique_id -} - -################################################################################ -# EKS Addons -################################################################################ - -output "cluster_addons" { - description = "Map of attribute maps for all EKS cluster addons enabled" - value = module.eks.cluster_addons -} - -################################################################################ -# EKS Identity Provider -################################################################################ - -output "cluster_identity_providers" { - description = "Map of attribute maps for all EKS identity providers enabled" - value = module.eks.cluster_identity_providers -} - -################################################################################ -# CloudWatch Log Group -################################################################################ - -output "cloudwatch_log_group_name" { - description = "Name of cloudwatch log group created" - value = module.eks.cloudwatch_log_group_name -} - -output "cloudwatch_log_group_arn" { - description = "Arn of cloudwatch log group created" - value = module.eks.cloudwatch_log_group_arn -} - -################################################################################ -# Fargate Profile -################################################################################ - -output "fargate_profiles" { - description = "Map of attribute maps for all EKS Fargate Profiles created" - value = module.eks.fargate_profiles -} - -################################################################################ -# EKS Managed Node Group -################################################################################ - -output "eks_managed_node_groups" { - description = "Map of attribute maps for all EKS managed node groups created" - value = module.eks.eks_managed_node_groups -} - -output "eks_managed_node_groups_autoscaling_group_names" { - description = "List of the autoscaling group names created by EKS managed node groups" - value = module.eks.eks_managed_node_groups_autoscaling_group_names -} - -################################################################################ -# Self Managed Node Group -################################################################################ - -output "self_managed_node_groups" { - description = "Map of attribute maps for all self managed node groups created" - value = module.eks.self_managed_node_groups -} - -output "self_managed_node_groups_autoscaling_group_names" { - description = "List of the autoscaling group names created by self-managed node groups" - value = module.eks.self_managed_node_groups_autoscaling_group_names -} - -################################################################################ -# Additional -################################################################################ - -output "aws_auth_configmap_yaml" { - description = "Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles" - value = module.eks.aws_auth_configmap_yaml -} diff --git a/examples/karpenter/variables.tf b/examples/karpenter/variables.tf deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/examples/karpenter/versions.tf b/examples/karpenter/versions.tf deleted file mode 100644 index ca6b5e11bf..0000000000 --- a/examples/karpenter/versions.tf +++ /dev/null @@ -1,18 +0,0 @@ -terraform { - required_version = ">= 1.0" - - required_providers { - aws = { - source = "hashicorp/aws" - version = ">= 4.7" - } - helm = { - source = "hashicorp/helm" - version = ">= 2.4" - } - kubectl = { - source = "gavinbunney/kubectl" - version = ">= 1.14" - } - } -} From aec153285185cd5bda6eeafc1a00ebea580f96bc Mon Sep 17 00:00:00 2001 From: Bryant Biggs Date: Sat, 20 Aug 2022 13:59:21 -0400 Subject: [PATCH 06/33] chore: Update local variable name to align with sub-modules --- main.tf | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/main.tf b/main.tf index 5f1a0a1491..40d110ff66 100644 --- a/main.tf +++ b/main.tf @@ -226,9 +226,9 @@ resource "aws_iam_openid_connect_provider" "oidc_provider" { ################################################################################ locals { - create_iam_role = local.create && var.create_iam_role - iam_role_name = coalesce(var.iam_role_name, "${var.cluster_name}-cluster") - policy_arn_prefix = "arn:${data.aws_partition.current.partition}:iam::aws:policy" + create_iam_role = local.create && var.create_iam_role + iam_role_name = coalesce(var.iam_role_name, "${var.cluster_name}-cluster") + iam_role_policy_prefix = "arn:${data.aws_partition.current.partition}:iam::aws:policy" cluster_encryption_policy_name = coalesce(var.cluster_encryption_policy_name, "${local.iam_role_name}-ClusterEncryption") @@ -293,8 +293,8 @@ resource "aws_iam_role" "this" { resource "aws_iam_role_policy_attachment" "this" { for_each = { for k, v in merge( { - AmazonEKSClusterPolicy = "${local.policy_arn_prefix}/AmazonEKSClusterPolicy", - AmazonEKSVPCResourceController = "${local.policy_arn_prefix}/AmazonEKSVPCResourceController", + AmazonEKSClusterPolicy = "${local.iam_role_policy_prefix}/AmazonEKSClusterPolicy", + AmazonEKSVPCResourceController = "${local.iam_role_policy_prefix}/AmazonEKSVPCResourceController", } , var.iam_role_additional_policies From 6b8c4ec62d2ba7e5009e299aa880049e3b232531 Mon Sep 17 00:00:00 2001 From: Bryant Biggs Date: Sat, 20 Aug 2022 14:05:09 -0400 Subject: [PATCH 07/33] feat: Change default behavior of KMS key creation to true --- README.md | 2 +- variables.tf | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index bf2a156083..29201e6350 100644 --- a/README.md +++ b/README.md @@ -305,7 +305,7 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | [create\_cluster\_security\_group](#input\_create\_cluster\_security\_group) | Determines if a security group is created for the cluster or use the existing `cluster_security_group_id` | `bool` | `true` | no | | [create\_cni\_ipv6\_iam\_policy](#input\_create\_cni\_ipv6\_iam\_policy) | Determines whether to create an [`AmazonEKS_CNI_IPv6_Policy`](https://docs.aws.amazon.com/eks/latest/userguide/cni-iam-role.html#cni-iam-role-create-ipv6-policy) | `bool` | `false` | no | | [create\_iam\_role](#input\_create\_iam\_role) | Determines whether a an IAM role is created or to use an existing IAM role | `bool` | `true` | no | -| [create\_kms\_key](#input\_create\_kms\_key) | Controls if a KMS key for cluster encryption should be created | `bool` | `false` | no | +| [create\_kms\_key](#input\_create\_kms\_key) | Controls if a KMS key for cluster encryption should be created | `bool` | `true` | no | | [create\_node\_security\_group](#input\_create\_node\_security\_group) | Determines whether to create a security group for the node groups or use the existing `node_security_group_id` | `bool` | `true` | no | | [custom\_oidc\_thumbprints](#input\_custom\_oidc\_thumbprints) | Additional list of server certificate thumbprints for the OpenID Connect (OIDC) identity provider's server certificate(s) | `list(string)` | `[]` | no | | [eks\_managed\_node\_group\_defaults](#input\_eks\_managed\_node\_group\_defaults) | Map of EKS managed node group default configurations | `any` | `{}` | no | diff --git a/variables.tf b/variables.tf index 79bb8b48e0..7b65b7dd20 100644 --- a/variables.tf +++ b/variables.tf @@ -123,7 +123,7 @@ variable "cluster_timeouts" { variable "create_kms_key" { description = "Controls if a KMS key for cluster encryption should be created" type = bool - default = false + default = true } variable "kms_key_description" { From 780a737446fb155b0c34fd952e606a7a70619f31 Mon Sep 17 00:00:00 2001 From: Bryant Biggs Date: Sat, 20 Aug 2022 14:38:10 -0400 Subject: [PATCH 08/33] feat: Update documentation for v19.x changes --- README.md | 23 +++++--- docs/UPGRADE-19.0.md | 100 +++++++++++++++++++++++++++++++++++ docs/network_connectivity.md | 3 +- main.tf | 2 +- 4 files changed, 118 insertions(+), 10 deletions(-) create mode 100644 docs/UPGRADE-19.0.md diff --git a/README.md b/README.md index 29201e6350..18cbb3625d 100644 --- a/README.md +++ b/README.md @@ -14,6 +14,7 @@ Terraform module which creates AWS EKS (Kubernetes) resources - Upgrade Guides - [Upgrade to v17.x](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/UPGRADE-17.0.md) - [Upgrade to v18.x](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/UPGRADE-18.0.md) + - [Upgrade to v19.x](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/UPGRADE-19.0.md) ### External Documentation @@ -42,13 +43,22 @@ An IAM role for service accounts (IRSA) sub-module has been created to make depl Some of the addon/controller policies that are currently supported include: +- [Cert-Manager](https://cert-manager.io/docs/configuration/acme/dns01/route53/#set-up-an-iam-role) - [Cluster Autoscaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md) -- [External DNS](https://github.com/kubernetes-sigs/external-dns/blob/master/docs/tutorials/aws.md#iam-policy) - [EBS CSI Driver](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/blob/master/docs/example-iam-policy.json) -- [VPC CNI](https://docs.aws.amazon.com/eks/latest/userguide/cni-iam-role.html) -- [Node Termination Handler](https://github.com/aws/aws-node-termination-handler#5-create-an-iam-role-for-the-pods) -- [Karpenter](https://karpenter.sh/preview/getting-started/getting-started-with-terraform/#create-the-karpentercontroller-iam-role) +- [EFS CSI Driver](https://github.com/kubernetes-sigs/aws-efs-csi-driver/blob/master/docs/iam-policy-example.json) +- [External DNS](https://github.com/kubernetes-sigs/external-dns/blob/master/docs/tutorials/aws.md#iam-policy) +- [External Secrets](https://github.com/external-secrets/kubernetes-external-secrets#add-a-secret) +- [FSx for Lustre CSI Driver](https://github.com/kubernetes-sigs/aws-fsx-csi-driver/blob/master/docs/README.md) +- [Karpenter](https://github.com/aws/karpenter/blob/main/website/content/en/preview/getting-started/cloudformation.yaml) - [Load Balancer Controller](https://github.com/kubernetes-sigs/aws-load-balancer-controller/blob/main/docs/install/iam_policy.json) + - [Load Balancer Controller Target Group Binding Only](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.4/deploy/installation/#iam-permission-subset-for-those-who-use-targetgroupbinding-only-and-dont-plan-to-use-the-aws-load-balancer-controller-to-manage-security-group-rules) +- [App Mesh Controller](https://github.com/aws/aws-app-mesh-controller-for-k8s/blob/master/config/iam/controller-iam-policy.json) + - [App Mesh Envoy Proxy](https://raw.githubusercontent.com/aws/aws-app-mesh-controller-for-k8s/master/config/iam/envoy-iam-policy.json) +- [Managed Service for Prometheus](https://docs.aws.amazon.com/prometheus/latest/userguide/set-up-irsa.html) +- [Node Termination Handler](https://github.com/aws/aws-node-termination-handler#5-create-an-iam-role-for-the-pods) +- [Velero](https://github.com/vmware-tanzu/velero-plugin-for-aws#option-1-set-permissions-with-an-iam-user) +- [VPC CNI](https://docs.aws.amazon.com/eks/latest/userguide/cni-iam-role.html) See [terraform-aws-iam/modules/iam-role-for-service-accounts](https://github.com/terraform-aws-modules/terraform-aws-iam/tree/master/modules/iam-role-for-service-accounts-eks) for current list of supported addon/controller policies as more are added to the project. @@ -57,7 +67,7 @@ See [terraform-aws-iam/modules/iam-role-for-service-accounts](https://github.com ```hcl module "eks" { source = "terraform-aws-modules/eks/aws" - version = "~> 18.0" + version = "~> 19.0" cluster_name = "my-cluster" cluster_version = "1.22" @@ -191,7 +201,6 @@ module "eks" { - [Complete](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/complete): EKS Cluster using all available node group types in various combinations demonstrating many of the supported features and configurations - [EKS Managed Node Group](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/eks_managed_node_group): EKS Cluster using EKS managed node groups - [Fargate Profile](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/fargate_profile): EKS cluster using [Fargate Profiles](https://docs.aws.amazon.com/eks/latest/userguide/fargate.html) -- [Karpenter](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/karpenter): EKS Cluster with [Karpenter](https://karpenter.sh/) provisioned for managing compute resource scaling - [Self Managed Node Group](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/self_managed_node_group): EKS Cluster using self-managed node groups - [User Data](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/user_data): Various supported methods of providing necessary bootstrap scripts and configuration settings via user data @@ -226,7 +235,7 @@ We are grateful to the community for contributing bugfixes and improvements! Ple |------|--------|---------| | [eks\_managed\_node\_group](#module\_eks\_managed\_node\_group) | ./modules/eks-managed-node-group | n/a | | [fargate\_profile](#module\_fargate\_profile) | ./modules/fargate-profile | n/a | -| [kms](#module\_kms) | terraform-aws-modules/kms/aws | 1.0.2 | +| [kms](#module\_kms) | terraform-aws-modules/kms/aws | 1.1.0 | | [self\_managed\_node\_group](#module\_self\_managed\_node\_group) | ./modules/self-managed-node-group | n/a | ## Resources diff --git a/docs/UPGRADE-19.0.md b/docs/UPGRADE-19.0.md new file mode 100644 index 0000000000..b0de07656b --- /dev/null +++ b/docs/UPGRADE-19.0.md @@ -0,0 +1,100 @@ +# Upgrade from v18.x to v19.x + +Please consult the `examples` directory for reference example configurations. If you find a bug, please open an issue with supporting configuration to reproduce. + +## List of backwards incompatible changes + +- Minimum supported version of Terraform AWS provider updated to v4.7 to support latest features in autoscaling groups +- Individual security group created per EKS managed node group or self managed node group has been removed. This feature was largely un-used, often caused confusion, and can readily be replaced by a user provided security group that was externally created + +## Additional changes + +### Added + +- + +### Modified + +- `block_device_mappings` previously required a map of maps but has since changed to an array of maps. Users can remove the outer key for each block device mapping and replace the outermost map `{}` with an array `[]`. There are not state changes required for this change + +### Removed + +- + +### Variable and output changes + +1. Removed variables: + + - Self managed node groups: + - `create_security_group` + - `security_group_name` + - `security_group_use_name_prefix` + - `security_group_description` + - `security_group_rules` + - `security_group_tags` + - `cluster_security_group_id` + - `vpc_id` + - EKS managed node groups: + - `create_security_group` + - `security_group_name` + - `security_group_use_name_prefix` + - `security_group_description` + - `security_group_rules` + - `security_group_tags` + - `cluster_security_group_id` + - `vpc_id` + +2. Renamed variables: + + - + +3. Added variables: + + - Self managed node groups: + - + - EKS managed node groups: + - + +4. Removed outputs: + + - Self managed node groups: + - `security_group_arn` + - `security_group_id` + - EKS managed node groups: + - `security_group_arn` + - `security_group_id` + +5. Renamed outputs: + + - + +6. Added outputs: + + - + +## Upgrade Migrations + +### Self Managed Node Groups + +#### 1. [v18.x] Remove Security Group Created by Node Group + +Self managed node groups on `v18.x` by default create a security group that does not specify any rules. In `v19.x`, this security group has been removed due to the predominant lack of usage (most users rely on the cluster security group and/or the shared node security group). While still on the `v18.x` of your module definition, remove this security group from your node groups. + +- If you are currently utilizing this security group, it is recommended to create an additional security group that matches the rules/settings of the security group created by the node group, and specify that security group ID in `vpc_security_group_ids`. Once this is in place, you can proceed with the original security group removal. +- For most users, the security group is not used and can be safely removed. However, deployed instances will have the security group attached and require removal of the security group. Because instances are deployed via autoscaling groups, we cannot simply remove the security group from code and have those changes reflected on the instances. Instead, we have to update the code and then force the autoscaling groups to refresh so that new instances are provisioned without the security group attached. You can utilize the `instance_refresh` parameter to force nodes to re-deploy when removing the security group since changes to launch templates automatically trigger an instance refresh. An example configuration is provided below. + - Add the following to either/or `self_managed_node_group_defaults`/`eks_managed_node_group_defaults`: + ```hcl + create_security_group = false + instance_refresh = { + strategy = "Rolling" + preferences = { + min_healthy_percentage = 100 + } + } + ``` + - It is recommended to use the `aws-node-termination-handler` while performing this update. Please refer to the [`irsa-autoscale-refresh` example](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/20af82846b4a1f23f3787a8c455f39c0b6164d80/examples/irsa_autoscale_refresh/charts.tf#L86) for usage. This will ensure that pods are safely evicted in a controlled manner to avoid service disruptions. + - The alternative is to manually detach the security groups from instances so that they can be deleted. Note: security groups cannot be deleted if they are still attached to an ENI. + +#### EKS Managed Node Groups + +EKS managed node groups on `v18.x` by default create a security group that does not specify any rules. In `v19.x`, this security group has been removed due to the predominant lack of usage (most users rely on the cluster security group and/or the shared node security group). However, unlike self managed node groups, EKS managed node groups by default rollout changes using a [rolling update strategy](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-update-behavior.html) that can be influenced through `update_config`. No additional changes are required for removing the the security group created by node groups. diff --git a/docs/network_connectivity.md b/docs/network_connectivity.md index 67805aa77c..53668e88c2 100644 --- a/docs/network_connectivity.md +++ b/docs/network_connectivity.md @@ -20,8 +20,7 @@ Please refer to the [AWS documentation](https://docs.aws.amazon.com/eks/latest/u - Lastly, users are able to opt in to attaching the primary security group automatically created by the EKS service by setting `attach_cluster_primary_security_group` = `true` from the root module for the respective node group (or set it within the node group defaults). This security group is not managed by the module; it is created by the EKS service. It permits all traffic within the domain of the security group as well as all egress traffic to the internet. - Node Group Security Group(s) - - Each node group (EKS Managed Node Group and Self Managed Node Group) by default creates its own security group. By default, this security group does not contain any additional security group rules. It is merely an "empty container" that offers users the ability to opt into any addition inbound our outbound rules as necessary - - Users also have the option to supply their own, and/or additional, externally created security group(s) to the node group as well via the `vpc_security_group_ids` variable + - Users have the option to supply their own, and/or additional, externally created security group(s) to the node group via the `vpc_security_group_ids` variable See the example snippet below which adds additional security group rules to the cluster security group as well as the shared node security group (for node-to-node access). Users can use this extensibility to open up network access as they see fit using the security groups provided by the module: diff --git a/main.tf b/main.tf index 40d110ff66..ae3a4edf56 100644 --- a/main.tf +++ b/main.tf @@ -93,7 +93,7 @@ resource "aws_cloudwatch_log_group" "this" { module "kms" { source = "terraform-aws-modules/kms/aws" - version = "1.0.2" # Note - be mindful of Terraform/provider version compatibility between modules + version = "1.1.0" # Note - be mindful of Terraform/provider version compatibility between modules create = local.create && var.create_kms_key From 002be2f28667e576923caedf05e11f7103ab92bf Mon Sep 17 00:00:00 2001 From: Bryant Biggs Date: Thu, 25 Aug 2022 13:30:08 -0400 Subject: [PATCH 09/33] feat: Update defaults, add variable for custom launch template usage, update examples to consolidate --- README.md | 27 +-- docs/compute_resources.md | 54 +++-- docs/faq.md | 1 - docs/irsa_integration.md | 2 +- docs/user_data.md | 18 +- examples/complete/README.md | 1 + examples/complete/main.tf | 34 +-- examples/eks_managed_node_group/README.md | 13 +- examples/eks_managed_node_group/main.tf | 217 +++++++------------ examples/eks_managed_node_group/versions.tf | 4 - examples/fargate_profile/README.md | 1 + examples/fargate_profile/main.tf | 34 +-- examples/self_managed_node_group/README.md | 9 +- examples/self_managed_node_group/main.tf | 191 +++++++--------- examples/self_managed_node_group/versions.tf | 4 - examples/user_data/README.md | 1 - examples/user_data/main.tf | 64 +++--- examples/user_data/versions.tf | 7 - modules/eks-managed-node-group/README.md | 5 +- modules/eks-managed-node-group/main.tf | 16 +- modules/eks-managed-node-group/variables.tf | 6 + modules/fargate-profile/README.md | 4 +- modules/self-managed-node-group/README.md | 7 +- node_groups.tf | 1 + variables.tf | 10 +- 25 files changed, 315 insertions(+), 416 deletions(-) diff --git a/README.md b/README.md index 18cbb3625d..2e14cde62f 100644 --- a/README.md +++ b/README.md @@ -70,9 +70,8 @@ module "eks" { version = "~> 19.0" cluster_name = "my-cluster" - cluster_version = "1.22" + cluster_version = "1.23" - cluster_endpoint_private_access = true cluster_endpoint_public_access = true cluster_addons = { @@ -85,21 +84,17 @@ module "eks" { } } - cluster_encryption_config = [{ - provider_key_arn = "arn:aws:kms:eu-west-1:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab" - resources = ["secrets"] - }] - - vpc_id = "vpc-1234556abcdef" - subnet_ids = ["subnet-abcde012", "subnet-bcde012a", "subnet-fghi345a"] + vpc_id = "vpc-1234556abcdef" + subnet_ids = ["subnet-abcde012", "subnet-bcde012a", "subnet-fghi345a"] + control_plane_subnet_ids = ["subnet-abcde012", "subnet-bcde012a", "subnet-fghi345a"] # Self Managed Node Group(s) self_managed_node_group_defaults = { instance_type = "m6i.large" update_launch_template_default_version = true - iam_role_additional_policies = [ - "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore" - ] + iam_role_additional_policies = { + AmazonSSMManagedInstanceCore = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore" + } } self_managed_node_groups = { @@ -283,14 +278,14 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | [cluster\_additional\_security\_group\_ids](#input\_cluster\_additional\_security\_group\_ids) | List of additional, externally created security group IDs to attach to the cluster control plane | `list(string)` | `[]` | no | | [cluster\_addons](#input\_cluster\_addons) | Map of cluster addon configurations to enable for the cluster. Addon name can be the map keys or set with `name` | `any` | `{}` | no | | [cluster\_enabled\_log\_types](#input\_cluster\_enabled\_log\_types) | A list of the desired control plane logs to enable. For more information, see Amazon EKS Control Plane Logging documentation (https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html) | `list(string)` |
[
"audit",
"api",
"authenticator"
]
| no | -| [cluster\_encryption\_config](#input\_cluster\_encryption\_config) | Configuration block with encryption configuration for the cluster | `list(any)` | `[]` | no | +| [cluster\_encryption\_config](#input\_cluster\_encryption\_config) | Configuration block with encryption configuration for the cluster | `list(any)` |
[
{
"resources": [
"secrets"
]
}
]
| no | | [cluster\_encryption\_policy\_description](#input\_cluster\_encryption\_policy\_description) | Description of the cluster encryption policy created | `string` | `"Cluster encryption policy to allow cluster role to utilize CMK provided"` | no | | [cluster\_encryption\_policy\_name](#input\_cluster\_encryption\_policy\_name) | Name to use on cluster encryption policy created | `string` | `null` | no | | [cluster\_encryption\_policy\_path](#input\_cluster\_encryption\_policy\_path) | Cluster encryption policy path | `string` | `null` | no | | [cluster\_encryption\_policy\_tags](#input\_cluster\_encryption\_policy\_tags) | A map of additional tags to add to the cluster encryption policy created | `map(string)` | `{}` | no | | [cluster\_encryption\_policy\_use\_name\_prefix](#input\_cluster\_encryption\_policy\_use\_name\_prefix) | Determines whether cluster encryption policy name (`cluster_encryption_policy_name`) is used as a prefix | `bool` | `true` | no | -| [cluster\_endpoint\_private\_access](#input\_cluster\_endpoint\_private\_access) | Indicates whether or not the Amazon EKS private API server endpoint is enabled | `bool` | `false` | no | -| [cluster\_endpoint\_public\_access](#input\_cluster\_endpoint\_public\_access) | Indicates whether or not the Amazon EKS public API server endpoint is enabled | `bool` | `true` | no | +| [cluster\_endpoint\_private\_access](#input\_cluster\_endpoint\_private\_access) | Indicates whether or not the Amazon EKS private API server endpoint is enabled | `bool` | `true` | no | +| [cluster\_endpoint\_public\_access](#input\_cluster\_endpoint\_public\_access) | Indicates whether or not the Amazon EKS public API server endpoint is enabled | `bool` | `false` | no | | [cluster\_endpoint\_public\_access\_cidrs](#input\_cluster\_endpoint\_public\_access\_cidrs) | List of CIDR blocks which can access the Amazon EKS public API server endpoint | `list(string)` |
[
"0.0.0.0/0"
]
| no | | [cluster\_iam\_role\_dns\_suffix](#input\_cluster\_iam\_role\_dns\_suffix) | Base DNS domain name for the current partition (e.g., amazonaws.com in AWS Commercial, amazonaws.com.cn in AWS China) | `string` | `null` | no | | [cluster\_identity\_providers](#input\_cluster\_identity\_providers) | Map of cluster identity provider configurations to enable for the cluster. Note - this is different/separate from IRSA | `any` | `{}` | no | @@ -305,7 +300,7 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | [cluster\_service\_ipv4\_cidr](#input\_cluster\_service\_ipv4\_cidr) | The CIDR block to assign Kubernetes service IP addresses from. If you don't specify a block, Kubernetes assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks | `string` | `null` | no | | [cluster\_tags](#input\_cluster\_tags) | A map of additional tags to add to the cluster | `map(string)` | `{}` | no | | [cluster\_timeouts](#input\_cluster\_timeouts) | Create, update, and delete timeout configurations for the cluster | `map(string)` | `{}` | no | -| [cluster\_version](#input\_cluster\_version) | Kubernetes `.` version to use for the EKS cluster (i.e.: `1.22`) | `string` | `null` | no | +| [cluster\_version](#input\_cluster\_version) | Kubernetes `.` version to use for the EKS cluster (i.e.: `1.23`) | `string` | `null` | no | | [control\_plane\_subnet\_ids](#input\_control\_plane\_subnet\_ids) | A list of subnet IDs where the EKS cluster control plane (ENIs) will be provisioned. Used for expanding the pool of subnets used by nodes/node groups without replacing the EKS control plane | `list(string)` | `[]` | no | | [create](#input\_create) | Controls if EKS resources should be created (affects nearly all resources) | `bool` | `true` | no | | [create\_aws\_auth\_configmap](#input\_create\_aws\_auth\_configmap) | Determines whether to create the aws-auth configmap. NOTE - this is only intended for scenarios where the configmap does not exist (i.e. - when using only self-managed node groups). Most users should use `manage_aws_auth_configmap` | `bool` | `false` | no | diff --git a/docs/compute_resources.md b/docs/compute_resources.md index 51d41025bb..a93fd2178a 100644 --- a/docs/compute_resources.md +++ b/docs/compute_resources.md @@ -18,8 +18,7 @@ Refer to the [EKS Managed Node Group documentation](https://docs.aws.amazon.com/ ```hcl eks_managed_node_groups = { default = { - create_launch_template = false - launch_template_name = "" + use_custom_launch_template = false } } ``` @@ -29,9 +28,6 @@ Refer to the [EKS Managed Node Group documentation](https://docs.aws.amazon.com/ ```hcl eks_managed_node_groups = { bottlerocket_default = { - create_launch_template = false - launch_template_name = "" - ami_type = "BOTTLEROCKET_x86_64" platform = "bottlerocket" } @@ -45,15 +41,15 @@ Refer to the [EKS Managed Node Group documentation](https://docs.aws.amazon.com/ prepend_userdata = { # See issue https://github.com/awslabs/amazon-eks-ami/issues/844 pre_bootstrap_user_data = <<-EOT - #!/bin/bash - set -ex - cat <<-EOF > /etc/profile.d/bootstrap.sh - export CONTAINER_RUNTIME="containerd" - export USE_MAX_PODS=false - export KUBELET_EXTRA_ARGS="--max-pods=110" - EOF - # Source extra environment variables in bootstrap script - sed -i '/^set -o errexit/a\\nsource /etc/profile.d/bootstrap.sh' /etc/eks/bootstrap.sh + #!/bin/bash + set -ex + cat <<-EOF > /etc/profile.d/bootstrap.sh + export CONTAINER_RUNTIME="containerd" + export USE_MAX_PODS=false + export KUBELET_EXTRA_ARGS="--max-pods=110" + EOF + # Source extra environment variables in bootstrap script + sed -i '/^set -o errexit/a\\nsource /etc/profile.d/bootstrap.sh' /etc/eks/bootstrap.sh EOT } } @@ -68,9 +64,9 @@ Refer to the [EKS Managed Node Group documentation](https://docs.aws.amazon.com/ platform = "bottlerocket" bootstrap_extra_args = <<-EOT - # extra args added - [settings.kernel] - lockdown = "integrity" + # extra args added + [settings.kernel] + lockdown = "integrity" EOT } } @@ -116,17 +112,17 @@ Refer to the [EKS Managed Node Group documentation](https://docs.aws.amazon.com/ enable_bootstrap_user_data = true # this will get added to the template bootstrap_extra_args = <<-EOT - # extra args added - [settings.kernel] - lockdown = "integrity" + # extra args added + [settings.kernel] + lockdown = "integrity" - [settings.kubernetes.node-labels] - "label1" = "foo" - "label2" = "bar" + [settings.kubernetes.node-labels] + "label1" = "foo" + "label2" = "bar" - [settings.kubernetes.node-taints] - "dedicated" = "experimental:PreferNoSchedule" - "special" = "true:NoSchedule" + [settings.kubernetes.node-taints] + "dedicated" = "experimental:PreferNoSchedule" + "special" = "true:NoSchedule" EOT } } @@ -141,9 +137,9 @@ Refer to the [Self Managed Node Group documentation](https://docs.aws.amazon.com 1. The `self-managed-node-group` uses the latest AWS EKS Optimized AMI (Linux) for the given Kubernetes version by default: ```hcl - cluster_version = "1.22" + cluster_version = "1.23" - # This self managed node group will use the latest AWS EKS Optimized AMI for Kubernetes 1.22 + # This self managed node group will use the latest AWS EKS Optimized AMI for Kubernetes 1.23 self_managed_node_groups = { default = {} } @@ -152,7 +148,7 @@ Refer to the [Self Managed Node Group documentation](https://docs.aws.amazon.com 2. To use Bottlerocket, specify the `platform` as `bottlerocket` and supply a Bottlerocket OS AMI: ```hcl - cluster_version = "1.22" + cluster_version = "1.23" self_managed_node_groups = { bottlerocket = { diff --git a/docs/faq.md b/docs/faq.md index 4a73b7a8d7..7926cc6d6a 100644 --- a/docs/faq.md +++ b/docs/faq.md @@ -81,7 +81,6 @@ TL;DR - Terraform resource passed into the modules map definition _must_ be know - `cluster_security_group_additional_rules` (i.e. - referencing an external security group resource in a rule) - `node_security_group_additional_rules` (i.e. - referencing an external security group resource in a rule) -- `iam_role_additional_policies` (i.e. - referencing an external policy resource) ### Why are nodes not being registered? diff --git a/docs/irsa_integration.md b/docs/irsa_integration.md index 6c78bd9575..8ab8b39648 100644 --- a/docs/irsa_integration.md +++ b/docs/irsa_integration.md @@ -8,7 +8,7 @@ module "eks" { source = "terraform-aws-modules/eks/aws" cluster_name = "example" - cluster_version = "1.22" + cluster_version = "1.23" cluster_addons = { vpc-cni = { diff --git a/docs/user_data.md b/docs/user_data.md index e5c247b798..4ce9a13ac4 100644 --- a/docs/user_data.md +++ b/docs/user_data.md @@ -55,15 +55,15 @@ Since the EKS Managed Node Group service provides the necessary bootstrap user d ```hcl # See issue https://github.com/awslabs/amazon-eks-ami/issues/844 pre_bootstrap_user_data = <<-EOT - #!/bin/bash - set -ex - cat <<-EOF > /etc/profile.d/bootstrap.sh - export CONTAINER_RUNTIME="containerd" - export USE_MAX_PODS=false - export KUBELET_EXTRA_ARGS="--max-pods=110" - EOF - # Source extra environment variables in bootstrap script - sed -i '/^set -o errexit/a\\nsource /etc/profile.d/bootstrap.sh' /etc/eks/bootstrap.sh + #!/bin/bash + set -ex + cat <<-EOF > /etc/profile.d/bootstrap.sh + export CONTAINER_RUNTIME="containerd" + export USE_MAX_PODS=false + export KUBELET_EXTRA_ARGS="--max-pods=110" + EOF + # Source extra environment variables in bootstrap script + sed -i '/^set -o errexit/a\\nsource /etc/profile.d/bootstrap.sh' /etc/eks/bootstrap.sh EOT ``` diff --git a/examples/complete/README.md b/examples/complete/README.md index 55db516d25..b61893c538 100644 --- a/examples/complete/README.md +++ b/examples/complete/README.md @@ -63,6 +63,7 @@ Note that this example may create resources which cost money. Run `terraform des |------|------| | [aws_iam_policy.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | | [aws_security_group.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | +| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | ## Inputs diff --git a/examples/complete/main.tf b/examples/complete/main.tf index 33cbca4dee..c8c80d0622 100644 --- a/examples/complete/main.tf +++ b/examples/complete/main.tf @@ -20,10 +20,15 @@ provider "kubernetes" { } } +data "aws_availability_zones" "available" {} + locals { name = "ex-${replace(basename(path.cwd), "_", "-")}" region = "eu-west-1" + vpc_cidr = "10.0.0.0/16" + azs = slice(data.aws_availability_zones.available.names, 0, 3) + tags = { Example = local.name GithubRepo = "terraform-aws-eks" @@ -38,9 +43,8 @@ locals { module "eks" { source = "../.." - cluster_name = local.name - cluster_endpoint_private_access = true - cluster_endpoint_public_access = true + cluster_name = local.name + cluster_endpoint_public_access = true cluster_addons = { coredns = { @@ -118,17 +122,17 @@ module "eks" { } pre_bootstrap_user_data = <<-EOT - echo "foo" - export FOO=bar + echo "foo" + export FOO=bar EOT bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'" post_bootstrap_user_data = <<-EOT - cd /tmp - sudo yum install -y https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_amd64/amazon-ssm-agent.rpm - sudo systemctl enable amazon-ssm-agent - sudo systemctl start amazon-ssm-agent + cd /tmp + sudo yum install -y https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_amd64/amazon-ssm-agent.rpm + sudo systemctl enable amazon-ssm-agent + sudo systemctl start amazon-ssm-agent EOT } } @@ -169,7 +173,7 @@ module "eks" { } update_config = { - max_unavailable_percentage = 50 # or set `max_unavailable` + max_unavailable_percentage = 33 # or set `max_unavailable` } tags = { @@ -345,12 +349,12 @@ module "vpc" { version = "~> 3.0" name = local.name - cidr = "10.0.0.0/16" + cidr = local.vpc_cidr - azs = ["${local.region}a", "${local.region}b", "${local.region}c"] - private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"] - public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"] - intra_subnets = ["10.0.7.0/28", "10.0.7.16/28", "10.0.7.32/28"] + azs = local.azs + private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 4, k)] + public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 48)] + intra_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 52)] enable_nat_gateway = true single_nat_gateway = true diff --git a/examples/eks_managed_node_group/README.md b/examples/eks_managed_node_group/README.md index 03f3c91020..e570b7f38c 100644 --- a/examples/eks_managed_node_group/README.md +++ b/examples/eks_managed_node_group/README.md @@ -60,22 +60,22 @@ Note that this example may create resources which cost money. Run `terraform des | [terraform](#requirement\_terraform) | >= 1.0 | | [aws](#requirement\_aws) | >= 4.7 | | [kubernetes](#requirement\_kubernetes) | >= 2.10 | -| [tls](#requirement\_tls) | >= 3.0 | ## Providers | Name | Version | |------|---------| | [aws](#provider\_aws) | >= 4.7 | -| [tls](#provider\_tls) | >= 3.0 | ## Modules | Name | Source | Version | |------|--------|---------| +| [ebs\_kms\_key](#module\_ebs\_kms\_key) | terraform-aws-modules/kms/aws | ~> 1.1 | | [eks](#module\_eks) | ../.. | n/a | +| [key\_pair](#module\_key\_pair) | terraform-aws-modules/key-pair/aws | ~> 2.0 | | [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 | -| [vpc\_cni\_irsa](#module\_vpc\_cni\_irsa) | terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks | ~> 4.12 | +| [vpc\_cni\_irsa](#module\_vpc\_cni\_irsa) | terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks | ~> 5.0 | ## Resources @@ -83,19 +83,14 @@ Note that this example may create resources which cost money. Run `terraform des |------|------| | [aws_autoscaling_group_tag.cluster_autoscaler_label_tags](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/autoscaling_group_tag) | resource | | [aws_iam_policy.node_additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | -| [aws_iam_role_policy_attachment.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | -| [aws_key_pair.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/key_pair) | resource | -| [aws_kms_key.ebs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource | -| [aws_kms_key.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource | | [aws_launch_template.external](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource | | [aws_security_group.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | | [aws_security_group.remote_access](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | -| [tls_private_key.this](https://registry.terraform.io/providers/hashicorp/tls/latest/docs/resources/private_key) | resource | | [aws_ami.eks_default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source | | [aws_ami.eks_default_arm](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source | | [aws_ami.eks_default_bottlerocket](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source | +| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | | [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | -| [aws_iam_policy_document.ebs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | ## Inputs diff --git a/examples/eks_managed_node_group/main.tf b/examples/eks_managed_node_group/main.tf index f007a79ded..8b58a430b7 100644 --- a/examples/eks_managed_node_group/main.tf +++ b/examples/eks_managed_node_group/main.tf @@ -14,11 +14,17 @@ provider "kubernetes" { } } +data "aws_caller_identity" "current" {} +data "aws_availability_zones" "available" {} + locals { name = "ex-${replace(basename(path.cwd), "_", "-")}" - cluster_version = "1.22" + cluster_version = "1.23" region = "eu-west-1" + vpc_cidr = "10.0.0.0/16" + azs = slice(data.aws_availability_zones.available.names, 0, 3) + tags = { Example = local.name GithubRepo = "terraform-aws-eks" @@ -26,8 +32,6 @@ locals { } } -data "aws_caller_identity" "current" {} - ################################################################################ # EKS Module ################################################################################ @@ -35,10 +39,9 @@ data "aws_caller_identity" "current" {} module "eks" { source = "../.." - cluster_name = local.name - cluster_version = local.cluster_version - cluster_endpoint_private_access = true - cluster_endpoint_public_access = true + cluster_name = local.name + cluster_version = local.cluster_version + cluster_endpoint_public_access = true # IPV6 cluster_ip_family = "ipv6" @@ -62,10 +65,13 @@ module "eks" { } } + # Encryption key + create_kms_key = true cluster_encryption_config = [{ - provider_key_arn = aws_kms_key.eks.arn - resources = ["secrets"] + resources = ["secrets"] }] + kms_key_deletion_window_in_days = 7 + enable_kms_key_rotation = true cluster_tags = { # This should not affect the name of the cluster primary security group @@ -74,8 +80,9 @@ module "eks" { Name = local.name } - vpc_id = module.vpc.vpc_id - subnet_ids = module.vpc.private_subnets + vpc_id = module.vpc.vpc_id + subnet_ids = module.vpc.private_subnets + control_plane_subnet_ids = module.vpc.intra_subnets manage_aws_auth_configmap = true @@ -130,14 +137,13 @@ module "eks" { default_node_group = { # By default, the module creates a launch template to ensure tags are propagated to instances, etc., # so we need to disable it to use the default template provided by the AWS EKS managed node group service - create_launch_template = false - launch_template_name = "" + use_custom_launch_template = false disk_size = 50 # Remote access cannot be specified with a launch template remote_access = { - ec2_ssh_key = aws_key_pair.this.key_name + ec2_ssh_key = module.key_pair.key_pair_name source_security_group_ids = [aws_security_group.remote_access.id] } } @@ -146,8 +152,7 @@ module "eks" { bottlerocket_default = { # By default, the module creates a launch template to ensure tags are propagated to instances, etc., # so we need to disable it to use the default template provided by the AWS EKS managed node group service - create_launch_template = false - launch_template_name = "" + use_custom_launch_template = false ami_type = "BOTTLEROCKET_x86_64" platform = "bottlerocket" @@ -158,11 +163,11 @@ module "eks" { ami_type = "BOTTLEROCKET_x86_64" platform = "bottlerocket" - # this will get added to what AWS provides + # This will get added to what AWS provides bootstrap_extra_args = <<-EOT - # extra args added - [settings.kernel] - lockdown = "integrity" + # extra args added + [settings.kernel] + lockdown = "integrity" EOT } @@ -172,21 +177,21 @@ module "eks" { ami_id = data.aws_ami.eks_default_bottlerocket.image_id platform = "bottlerocket" - # use module user data template to boostrap + # Use module user data template to boostrap enable_bootstrap_user_data = true - # this will get added to the template + # This will get added to the template bootstrap_extra_args = <<-EOT - # extra args added - [settings.kernel] - lockdown = "integrity" + # extra args added + [settings.kernel] + lockdown = "integrity" - [settings.kubernetes.node-labels] - "label1" = "foo" - "label2" = "bar" + [settings.kubernetes.node-labels] + "label1" = "foo" + "label2" = "bar" - [settings.kubernetes.node-taints] - "dedicated" = "experimental:PreferNoSchedule" - "special" = "true:NoSchedule" + [settings.kubernetes.node-taints] + "dedicated" = "experimental:PreferNoSchedule" + "special" = "true:NoSchedule" EOT } @@ -219,15 +224,15 @@ module "eks" { # See issue https://github.com/awslabs/amazon-eks-ami/issues/844 pre_bootstrap_user_data = <<-EOT - #!/bin/bash - set -ex - cat <<-EOF > /etc/profile.d/bootstrap.sh - export CONTAINER_RUNTIME="containerd" - export USE_MAX_PODS=false - export KUBELET_EXTRA_ARGS="--max-pods=110" - EOF - # Source extra environment variables in bootstrap script - sed -i '/^set -o errexit/a\\nsource /etc/profile.d/bootstrap.sh' /etc/eks/bootstrap.sh + #!/bin/bash + set -ex + cat <<-EOF > /etc/profile.d/bootstrap.sh + export CONTAINER_RUNTIME="containerd" + export USE_MAX_PODS=false + export KUBELET_EXTRA_ARGS="--max-pods=110" + EOF + # Source extra environment variables in bootstrap script + sed -i '/^set -o errexit/a\\nsource /etc/profile.d/bootstrap.sh' /etc/eks/bootstrap.sh EOT } @@ -247,12 +252,12 @@ module "eks" { bootstrap_extra_args = "--container-runtime containerd --kubelet-extra-args '--max-pods=20'" pre_bootstrap_user_data = <<-EOT - export CONTAINER_RUNTIME="containerd" - export USE_MAX_PODS=false + export CONTAINER_RUNTIME="containerd" + export USE_MAX_PODS=false EOT post_bootstrap_user_data = <<-EOT - echo "you are free little kubelet!" + echo "you are free little kubelet!" EOT capacity_type = "SPOT" @@ -272,7 +277,7 @@ module "eks" { ] update_config = { - max_unavailable_percentage = 50 # or set `max_unavailable` + max_unavailable_percentage = 33 # or set `max_unavailable` } description = "EKS managed node group example launch template" @@ -291,7 +296,7 @@ module "eks" { iops = 3000 throughput = 150 encrypted = true - kms_key_id = aws_kms_key.ebs.arn + kms_key_id = module.ebs_kms_key.key_id delete_on_termination = true } } @@ -311,9 +316,10 @@ module "eks" { iam_role_tags = { Purpose = "Protector of the kubelet" } - iam_role_additional_policies = [ - "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" - ] + iam_role_additional_policies = { + AmazonEC2ContainerRegistryReadOnly = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" + additional = aws_iam_policy.node_additional.arn + } tags = { ExtraTag = "EKS managed node group complete example" @@ -324,18 +330,6 @@ module "eks" { tags = local.tags } -# References to resources that do not exist yet when creating a cluster will cause a plan failure due to https://github.com/hashicorp/terraform/issues/4149 -# There are two options users can take -# 1. Create the dependent resources before the cluster => `terraform apply -target and then `terraform apply` -# Note: this is the route users will have to take for adding additonal security groups to nodes since there isn't a separate "security group attachment" resource -# 2. For addtional IAM policies, users can attach the policies outside of the cluster definition as demonstrated below -resource "aws_iam_role_policy_attachment" "additional" { - for_each = module.eks.eks_managed_node_groups - - policy_arn = aws_iam_policy.node_additional.arn - role = each.value.iam_role_name -} - ################################################################################ # Supporting Resources ################################################################################ @@ -345,11 +339,12 @@ module "vpc" { version = "~> 3.0" name = local.name - cidr = "10.0.0.0/16" + cidr = local.vpc_cidr - azs = ["${local.region}a", "${local.region}b", "${local.region}c"] - private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"] - public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"] + azs = local.azs + private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 4, k)] + public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 48)] + intra_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 52)] enable_ipv6 = true assign_ipv6_address_on_creation = true @@ -381,7 +376,7 @@ module "vpc" { module "vpc_cni_irsa" { source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" - version = "~> 4.12" + version = "~> 5.0" role_name_prefix = "VPC-CNI-IRSA" attach_vpc_cni_policy = true @@ -415,74 +410,27 @@ resource "aws_security_group" "additional" { tags = local.tags } -resource "aws_kms_key" "eks" { - description = "EKS Secret Encryption Key" - deletion_window_in_days = 7 - enable_key_rotation = true +module "ebs_kms_key" { + source = "terraform-aws-modules/kms/aws" + version = "~> 1.1" - tags = local.tags -} + description = "Customer managed key to encrypt EKS managed node group volumes" -resource "aws_kms_key" "ebs" { - description = "Customer managed key to encrypt EKS managed node group volumes" - deletion_window_in_days = 7 - policy = data.aws_iam_policy_document.ebs.json -} + # Policy + key_administrators = [ + data.aws_caller_identity.current.arn + ] + key_service_users = [ + # required for the ASG to manage encrypted volumes for nodes + "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", + # required for the cluster / persistentvolume-controller to create encrypted PVCs + module.eks.cluster_iam_role_arn, + ] -# This policy is required for the KMS key used for EKS root volumes, so the cluster is allowed to enc/dec/attach encrypted EBS volumes -data "aws_iam_policy_document" "ebs" { - # Copy of default KMS policy that lets you manage it - statement { - sid = "Enable IAM User Permissions" - actions = ["kms:*"] - resources = ["*"] - - principals { - type = "AWS" - identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"] - } - } + # Aliases + aliases = ["eks/${local.name}/ebs"] - # Required for EKS - statement { - sid = "Allow service-linked role use of the CMK" - actions = [ - "kms:Encrypt", - "kms:Decrypt", - "kms:ReEncrypt*", - "kms:GenerateDataKey*", - "kms:DescribeKey" - ] - resources = ["*"] - - principals { - type = "AWS" - identifiers = [ - "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", # required for the ASG to manage encrypted volumes for nodes - module.eks.cluster_iam_role_arn, # required for the cluster / persistentvolume-controller to create encrypted PVCs - ] - } - } - - statement { - sid = "Allow attachment of persistent resources" - actions = ["kms:CreateGrant"] - resources = ["*"] - - principals { - type = "AWS" - identifiers = [ - "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", # required for the ASG to manage encrypted volumes for nodes - module.eks.cluster_iam_role_arn, # required for the cluster / persistentvolume-controller to create encrypted PVCs - ] - } - - condition { - test = "Bool" - variable = "kms:GrantIsForAWSResource" - values = ["true"] - } - } + tags = local.tags } # This is based on the LT that EKS would create if no custom one is specified (aws ec2 describe-launch-template-versions --launch-template-id xxx) @@ -559,13 +507,12 @@ resource "aws_launch_template" "external" { } } -resource "tls_private_key" "this" { - algorithm = "RSA" -} +module "key_pair" { + source = "terraform-aws-modules/key-pair/aws" + version = "~> 2.0" -resource "aws_key_pair" "this" { - key_name_prefix = local.name - public_key = tls_private_key.this.public_key_openssh + key_name_prefix = local.name + create_private_key = true tags = local.tags } diff --git a/examples/eks_managed_node_group/versions.tf b/examples/eks_managed_node_group/versions.tf index 7b9aed5fa9..248218ac69 100644 --- a/examples/eks_managed_node_group/versions.tf +++ b/examples/eks_managed_node_group/versions.tf @@ -6,10 +6,6 @@ terraform { source = "hashicorp/aws" version = ">= 4.7" } - tls = { - source = "hashicorp/tls" - version = ">= 3.0" - } kubernetes = { source = "hashicorp/kubernetes" version = ">= 2.10" diff --git a/examples/fargate_profile/README.md b/examples/fargate_profile/README.md index 78d868ff9d..6aec324669 100644 --- a/examples/fargate_profile/README.md +++ b/examples/fargate_profile/README.md @@ -41,6 +41,7 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Type | |------|------| | [aws_kms_key.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource | +| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | ## Inputs diff --git a/examples/fargate_profile/main.tf b/examples/fargate_profile/main.tf index 6c80e22c41..65f951e3cf 100644 --- a/examples/fargate_profile/main.tf +++ b/examples/fargate_profile/main.tf @@ -2,11 +2,16 @@ provider "aws" { region = local.region } +data "aws_availability_zones" "available" {} + locals { name = "ex-${replace(basename(path.cwd), "_", "-")}" - cluster_version = "1.22" + cluster_version = "1.23" region = "eu-west-1" + vpc_cidr = "10.0.0.0/16" + azs = slice(data.aws_availability_zones.available.names, 0, 3) + tags = { Example = local.name GithubRepo = "terraform-aws-eks" @@ -21,10 +26,8 @@ locals { module "eks" { source = "../.." - cluster_name = local.name - cluster_version = local.cluster_version - cluster_endpoint_private_access = true - cluster_endpoint_public_access = true + cluster_name = local.name + cluster_version = local.cluster_version cluster_addons = { # Note: https://docs.aws.amazon.com/eks/latest/userguide/fargate-getting-started.html#fargate-gs-coredns @@ -37,13 +40,17 @@ module "eks" { } } + # Encryption key + create_kms_key = true cluster_encryption_config = [{ - provider_key_arn = aws_kms_key.eks.arn - resources = ["secrets"] + resources = ["secrets"] }] + kms_key_deletion_window_in_days = 7 + enable_kms_key_rotation = true - vpc_id = module.vpc.vpc_id - subnet_ids = module.vpc.private_subnets + vpc_id = module.vpc.vpc_id + subnet_ids = module.vpc.private_subnets + control_plane_subnet_ids = module.vpc.intra_subnets # You require a node group to schedule coredns which is critical for running correctly internal DNS. # If you want to use only fargate you must follow docs `(Optional) Update CoreDNS` @@ -126,11 +133,12 @@ module "vpc" { version = "~> 3.0" name = local.name - cidr = "10.0.0.0/16" + cidr = local.vpc_cidr - azs = ["${local.region}a", "${local.region}b", "${local.region}c"] - private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"] - public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"] + azs = local.azs + private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 4, k)] + public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 48)] + intra_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 52)] enable_nat_gateway = true single_nat_gateway = true diff --git a/examples/self_managed_node_group/README.md b/examples/self_managed_node_group/README.md index 47353b66a9..9874577a25 100644 --- a/examples/self_managed_node_group/README.md +++ b/examples/self_managed_node_group/README.md @@ -28,20 +28,20 @@ Note that this example may create resources which cost money. Run `terraform des | [terraform](#requirement\_terraform) | >= 1.0 | | [aws](#requirement\_aws) | >= 4.7 | | [kubernetes](#requirement\_kubernetes) | >= 2.10 | -| [tls](#requirement\_tls) | >= 3.0 | ## Providers | Name | Version | |------|---------| | [aws](#provider\_aws) | >= 4.7 | -| [tls](#provider\_tls) | >= 3.0 | ## Modules | Name | Source | Version | |------|--------|---------| +| [ebs\_kms\_key](#module\_ebs\_kms\_key) | terraform-aws-modules/kms/aws | ~> 1.1 | | [eks](#module\_eks) | ../.. | n/a | +| [key\_pair](#module\_key\_pair) | terraform-aws-modules/key-pair/aws | ~> 2.0 | | [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 | ## Resources @@ -49,15 +49,12 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Type | |------|------| | [aws_ec2_capacity_reservation.targeted](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ec2_capacity_reservation) | resource | -| [aws_key_pair.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/key_pair) | resource | -| [aws_kms_key.ebs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource | | [aws_kms_key.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource | | [aws_security_group.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | -| [tls_private_key.this](https://registry.terraform.io/providers/hashicorp/tls/latest/docs/resources/private_key) | resource | | [aws_ami.eks_default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source | | [aws_ami.eks_default_bottlerocket](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source | +| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | | [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | -| [aws_iam_policy_document.ebs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | ## Inputs diff --git a/examples/self_managed_node_group/main.tf b/examples/self_managed_node_group/main.tf index bef32a9af1..5f9c8ba10e 100644 --- a/examples/self_managed_node_group/main.tf +++ b/examples/self_managed_node_group/main.tf @@ -14,11 +14,17 @@ provider "kubernetes" { } } +data "aws_caller_identity" "current" {} +data "aws_availability_zones" "available" {} + locals { name = "ex-${replace(basename(path.cwd), "_", "-")}" - cluster_version = "1.22" + cluster_version = "1.23" region = "eu-west-1" + vpc_cidr = "10.0.0.0/16" + azs = slice(data.aws_availability_zones.available.names, 0, 3) + tags = { Example = local.name GithubRepo = "terraform-aws-eks" @@ -26,8 +32,6 @@ locals { } } -data "aws_caller_identity" "current" {} - ################################################################################ # EKS Module ################################################################################ @@ -35,10 +39,9 @@ data "aws_caller_identity" "current" {} module "eks" { source = "../.." - cluster_name = local.name - cluster_version = local.cluster_version - cluster_endpoint_private_access = true - cluster_endpoint_public_access = true + cluster_name = local.name + cluster_version = local.cluster_version + cluster_endpoint_public_access = true cluster_addons = { coredns = { @@ -50,13 +53,17 @@ module "eks" { } } + # Encryption key + create_kms_key = true cluster_encryption_config = [{ - provider_key_arn = aws_kms_key.eks.arn - resources = ["secrets"] + resources = ["secrets"] }] + kms_key_deletion_window_in_days = 7 + enable_kms_key_rotation = true - vpc_id = module.vpc.vpc_id - subnet_ids = module.vpc.private_subnets + vpc_id = module.vpc.vpc_id + subnet_ids = module.vpc.private_subnets + control_plane_subnet_ids = module.vpc.intra_subnets # Self managed node groups will not automatically create the aws-auth configmap so we need to create_aws_auth_configmap = true @@ -115,24 +122,20 @@ module "eks" { ami_id = data.aws_ami.eks_default_bottlerocket.id instance_type = "m5.large" desired_size = 2 - key_name = aws_key_pair.this.key_name - - iam_role_additional_policies = ["arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"] + key_name = module.key_pair.key_pair_name bootstrap_extra_args = <<-EOT - # The admin host container provides SSH access and runs with "superpowers". - # It is disabled by default, but can be disabled explicitly. - [settings.host-containers.admin] - enabled = false - - # The control host container provides out-of-band access via SSM. - # It is enabled by default, and can be disabled if you do not expect to use SSM. - # This could leave you with no way to access the API and change settings on an existing node! - [settings.host-containers.control] - enabled = true - - [settings.kubernetes.node-labels] - ingress = "allowed" + # extra args added + [settings.kernel] + lockdown = "integrity" + + [settings.kubernetes.node-labels] + "label1" = "foo" + "label2" = "bar" + + [settings.kubernetes.node-taints] + "dedicated" = "experimental:PreferNoSchedule" + "special" = "true:NoSchedule" EOT } @@ -175,15 +178,14 @@ module "eks" { instance_type = "c5n.9xlarge" post_bootstrap_user_data = <<-EOT - - # Install EFA - curl -O https://efa-installer.amazonaws.com/aws-efa-installer-latest.tar.gz - tar -xf aws-efa-installer-latest.tar.gz && cd aws-efa-installer - ./efa_installer.sh -y --minimal - fi_info -p efa -t FI_EP_RDM - - # Disable ptrace - sysctl -w kernel.yama.ptrace_scope=0 + # Install EFA + curl -O https://efa-installer.amazonaws.com/aws-efa-installer-latest.tar.gz + tar -xf aws-efa-installer-latest.tar.gz && cd aws-efa-installer + ./efa_installer.sh -y --minimal + fi_info -p efa -t FI_EP_RDM + + # Disable ptrace + sysctl -w kernel.yama.ptrace_scope=0 EOT network_interfaces = [ @@ -212,12 +214,12 @@ module "eks" { bootstrap_extra_args = "--kubelet-extra-args '--max-pods=110'" pre_bootstrap_user_data = <<-EOT - export CONTAINER_RUNTIME="containerd" - export USE_MAX_PODS=false + export CONTAINER_RUNTIME="containerd" + export USE_MAX_PODS=false EOT post_bootstrap_user_data = <<-EOT - echo "you are free little kubelet!" + echo "you are free little kubelet!" EOT instance_type = "m6i.large" @@ -239,7 +241,7 @@ module "eks" { iops = 3000 throughput = 150 encrypted = true - kms_key_id = aws_kms_key.ebs.arn + kms_key_id = module.ebs_kms_key.key_id delete_on_termination = true } } @@ -265,9 +267,9 @@ module "eks" { iam_role_tags = { Purpose = "Protector of the kubelet" } - iam_role_additional_policies = [ - "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" - ] + iam_role_additional_policies = { + AmazonEC2ContainerRegistryReadOnly = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" + } timeouts = { create = "80m" @@ -293,11 +295,12 @@ module "vpc" { version = "~> 3.0" name = local.name - cidr = "10.0.0.0/16" + cidr = local.vpc_cidr - azs = ["${local.region}a", "${local.region}b", "${local.region}c"] - private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"] - public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"] + azs = local.azs + private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 4, k)] + public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 48)] + intra_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 52)] enable_nat_gateway = true single_nat_gateway = true @@ -366,19 +369,37 @@ data "aws_ami" "eks_default_bottlerocket" { } } -resource "tls_private_key" "this" { - algorithm = "RSA" -} +module "key_pair" { + source = "terraform-aws-modules/key-pair/aws" + version = "~> 2.0" -resource "aws_key_pair" "this" { - key_name = local.name - public_key = tls_private_key.this.public_key_openssh + key_name_prefix = local.name + create_private_key = true + + tags = local.tags } -resource "aws_kms_key" "ebs" { - description = "Customer managed key to encrypt self managed node group volumes" - deletion_window_in_days = 7 - policy = data.aws_iam_policy_document.ebs.json +module "ebs_kms_key" { + source = "terraform-aws-modules/kms/aws" + version = "~> 1.1" + + description = "Customer managed key to encrypt EKS managed node group volumes" + + # Policy + key_administrators = [ + data.aws_caller_identity.current.arn + ] + key_service_users = [ + # required for the ASG to manage encrypted volumes for nodes + "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", + # required for the cluster / persistentvolume-controller to create encrypted PVCs + module.eks.cluster_iam_role_arn, + ] + + # Aliases + aliases = ["eks/${local.name}/ebs"] + + tags = local.tags } resource "aws_ec2_capacity_reservation" "targeted" { @@ -388,59 +409,3 @@ resource "aws_ec2_capacity_reservation" "targeted" { instance_count = 1 instance_match_criteria = "targeted" } - -# This policy is required for the KMS key used for EKS root volumes, so the cluster is allowed to enc/dec/attach encrypted EBS volumes -data "aws_iam_policy_document" "ebs" { - # Copy of default KMS policy that lets you manage it - statement { - sid = "Enable IAM User Permissions" - actions = ["kms:*"] - resources = ["*"] - - principals { - type = "AWS" - identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"] - } - } - - # Required for EKS - statement { - sid = "Allow service-linked role use of the CMK" - actions = [ - "kms:Encrypt", - "kms:Decrypt", - "kms:ReEncrypt*", - "kms:GenerateDataKey*", - "kms:DescribeKey" - ] - resources = ["*"] - - principals { - type = "AWS" - identifiers = [ - "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", # required for the ASG to manage encrypted volumes for nodes - module.eks.cluster_iam_role_arn, # required for the cluster / persistentvolume-controller to create encrypted PVCs - ] - } - } - - statement { - sid = "Allow attachment of persistent resources" - actions = ["kms:CreateGrant"] - resources = ["*"] - - principals { - type = "AWS" - identifiers = [ - "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", # required for the ASG to manage encrypted volumes for nodes - module.eks.cluster_iam_role_arn, # required for the cluster / persistentvolume-controller to create encrypted PVCs - ] - } - - condition { - test = "Bool" - variable = "kms:GrantIsForAWSResource" - values = ["true"] - } - } -} diff --git a/examples/self_managed_node_group/versions.tf b/examples/self_managed_node_group/versions.tf index 7b9aed5fa9..248218ac69 100644 --- a/examples/self_managed_node_group/versions.tf +++ b/examples/self_managed_node_group/versions.tf @@ -6,10 +6,6 @@ terraform { source = "hashicorp/aws" version = ">= 4.7" } - tls = { - source = "hashicorp/tls" - version = ">= 3.0" - } kubernetes = { source = "hashicorp/kubernetes" version = ">= 2.10" diff --git a/examples/user_data/README.md b/examples/user_data/README.md index 7f2c5de23f..cea7dce755 100644 --- a/examples/user_data/README.md +++ b/examples/user_data/README.md @@ -18,7 +18,6 @@ $ terraform apply | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0 | -| [aws](#requirement\_aws) | >= 4.7 | ## Providers diff --git a/examples/user_data/main.tf b/examples/user_data/main.tf index 5c5b1266cf..cb565ba04c 100644 --- a/examples/user_data/main.tf +++ b/examples/user_data/main.tf @@ -19,7 +19,7 @@ module "eks_mng_linux_additional" { source = "../../modules/_user_data" pre_bootstrap_user_data = <<-EOT - export CONTAINER_RUNTIME="containerd" + export CONTAINER_RUNTIME="containerd" EOT } @@ -34,14 +34,14 @@ module "eks_mng_linux_custom_ami" { enable_bootstrap_user_data = true pre_bootstrap_user_data = <<-EOT - export CONTAINER_RUNTIME="containerd" - export USE_MAX_PODS=false + export CONTAINER_RUNTIME="containerd" + export USE_MAX_PODS=false EOT bootstrap_extra_args = "--container-runtime containerd --kubelet-extra-args '--max-pods=20 --instance-type t3a.large'" post_bootstrap_user_data = <<-EOT - echo "All done" + echo "All done" EOT } @@ -56,14 +56,14 @@ module "eks_mng_linux_custom_template" { user_data_template_path = "${path.module}/templates/linux_custom.tpl" pre_bootstrap_user_data = <<-EOT - echo "foo" - export FOO=bar + echo "foo" + export FOO=bar EOT bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'" post_bootstrap_user_data = <<-EOT - echo "All done" + echo "All done" EOT } @@ -80,9 +80,9 @@ module "eks_mng_bottlerocket_additional" { platform = "bottlerocket" bootstrap_extra_args = <<-EOT - # extra args added - [settings.kernel] - lockdown = "integrity" + # extra args added + [settings.kernel] + lockdown = "integrity" EOT } @@ -98,9 +98,9 @@ module "eks_mng_bottlerocket_custom_ami" { enable_bootstrap_user_data = true bootstrap_extra_args = <<-EOT - # extra args added - [settings.kernel] - lockdown = "integrity" + # extra args added + [settings.kernel] + lockdown = "integrity" EOT } @@ -116,9 +116,9 @@ module "eks_mng_bottlerocket_custom_template" { user_data_template_path = "${path.module}/templates/bottlerocket_custom.tpl" bootstrap_extra_args = <<-EOT - # extra args added - [settings.kernel] - lockdown = "integrity" + # extra args added + [settings.kernel] + lockdown = "integrity" EOT } @@ -140,14 +140,14 @@ module "self_mng_linux_bootstrap" { cluster_auth_base64 = local.cluster_auth_base64 pre_bootstrap_user_data = <<-EOT - echo "foo" - export FOO=bar + echo "foo" + export FOO=bar EOT bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'" post_bootstrap_user_data = <<-EOT - echo "All done" + echo "All done" EOT } @@ -164,14 +164,14 @@ module "self_mng_linux_custom_template" { user_data_template_path = "${path.module}/templates/linux_custom.tpl" pre_bootstrap_user_data = <<-EOT - echo "foo" - export FOO=bar + echo "foo" + export FOO=bar EOT bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'" post_bootstrap_user_data = <<-EOT - echo "All done" + echo "All done" EOT } @@ -197,9 +197,9 @@ module "self_mng_bottlerocket_bootstrap" { cluster_auth_base64 = local.cluster_auth_base64 bootstrap_extra_args = <<-EOT - # extra args added - [settings.kernel] - lockdown = "integrity" + # extra args added + [settings.kernel] + lockdown = "integrity" EOT } @@ -218,9 +218,9 @@ module "self_mng_bottlerocket_custom_template" { user_data_template_path = "${path.module}/templates/bottlerocket_custom.tpl" bootstrap_extra_args = <<-EOT - # extra args added - [settings.kernel] - lockdown = "integrity" + # extra args added + [settings.kernel] + lockdown = "integrity" EOT } @@ -246,13 +246,13 @@ module "self_mng_windows_bootstrap" { cluster_auth_base64 = local.cluster_auth_base64 pre_bootstrap_user_data = <<-EOT - [string]$Something = 'IDoNotKnowAnyPowerShell ¯\_(ツ)_/¯' + [string]$Something = 'IDoNotKnowAnyPowerShell ¯\_(ツ)_/¯' EOT # I don't know if this is the right way on WindowsOS, but its just a string check here anyways bootstrap_extra_args = "-KubeletExtraArgs --node-labels=node.kubernetes.io/lifecycle=spot" post_bootstrap_user_data = <<-EOT - [string]$Something = 'IStillDoNotKnowAnyPowerShell ¯\_(ツ)_/¯' + [string]$Something = 'IStillDoNotKnowAnyPowerShell ¯\_(ツ)_/¯' EOT } @@ -271,12 +271,12 @@ module "self_mng_windows_custom_template" { user_data_template_path = "${path.module}/templates/windows_custom.tpl" pre_bootstrap_user_data = <<-EOT - [string]$Something = 'IDoNotKnowAnyPowerShell ¯\_(ツ)_/¯' + [string]$Something = 'IDoNotKnowAnyPowerShell ¯\_(ツ)_/¯' EOT # I don't know if this is the right way on WindowsOS, but its just a string check here anyways bootstrap_extra_args = "-KubeletExtraArgs --node-labels=node.kubernetes.io/lifecycle=spot" post_bootstrap_user_data = <<-EOT - [string]$Something = 'IStillDoNotKnowAnyPowerShell ¯\_(ツ)_/¯' + [string]$Something = 'IStillDoNotKnowAnyPowerShell ¯\_(ツ)_/¯' EOT } diff --git a/examples/user_data/versions.tf b/examples/user_data/versions.tf index 6fba0345fe..7117131f4c 100644 --- a/examples/user_data/versions.tf +++ b/examples/user_data/versions.tf @@ -1,10 +1,3 @@ terraform { required_version = ">= 1.0" - - required_providers { - aws = { - source = "hashicorp/aws" - version = ">= 4.7" - } - } } diff --git a/modules/eks-managed-node-group/README.md b/modules/eks-managed-node-group/README.md index 0bf446095c..1d915fa6dd 100644 --- a/modules/eks-managed-node-group/README.md +++ b/modules/eks-managed-node-group/README.md @@ -10,7 +10,7 @@ module "eks_managed_node_group" { name = "separate-eks-mng" cluster_name = "my-cluster" - cluster_version = "1.22" + cluster_version = "1.23" vpc_id = "vpc-1234556abcdef" subnet_ids = ["subnet-abcde012", "subnet-bcde012a", "subnet-fghi345a"] @@ -18,7 +18,7 @@ module "eks_managed_node_group" { // The following variables are necessary if you decide to use the module outside of the parent EKS module context. // Without it, the security groups of the nodes are empty and thus won't join the cluster. cluster_primary_security_group_id = module.eks.cluster_primary_security_group_id - cluster_security_group_id = module.eks.node_security_group_id + cluster_security_group_id = module.eks.node_security_group_id min_size = 1 max_size = 10 @@ -151,6 +151,7 @@ module "eks_managed_node_group" { | [timeouts](#input\_timeouts) | Create, update, and delete timeout configurations for the node group | `map(string)` | `{}` | no | | [update\_config](#input\_update\_config) | Configuration block of settings for max unavailable resources during node group updates | `map(string)` | `{}` | no | | [update\_launch\_template\_default\_version](#input\_update\_launch\_template\_default\_version) | Whether to update the launch templates default version on each update. Conflicts with `launch_template_default_version` | `bool` | `true` | no | +| [use\_custom\_launch\_template](#input\_use\_custom\_launch\_template) | Determines whether to use a custom launch template or not. If set to `false`, EKS will use its own default launch template | `bool` | `true` | no | | [use\_name\_prefix](#input\_use\_name\_prefix) | Determines whether to use `name` as is or create a unique name beginning with the `name` as the prefix | `bool` | `true` | no | | [user\_data\_template\_path](#input\_user\_data\_template\_path) | Path to a local, custom user data template file to use when rendering user data | `string` | `""` | no | | [vpc\_security\_group\_ids](#input\_vpc\_security\_group\_ids) | A list of security group IDs to associate | `list(string)` | `[]` | no | diff --git a/modules/eks-managed-node-group/main.tf b/modules/eks-managed-node-group/main.tf index cbf46c3642..08fa854060 100644 --- a/modules/eks-managed-node-group/main.tf +++ b/modules/eks-managed-node-group/main.tf @@ -29,18 +29,12 @@ module "user_data" { ################################################################################ locals { - # There are 4 scenarios here that have to be considered for `use_custom_launch_template`: - # 1. `var.create_launch_template = false && var.launch_template_name == ""` => EKS MNG will use its own default LT - # 2. `var.create_launch_template = false && var.launch_template_name == "something"` => User provided custom LT will be used - # 3. `var.create_launch_template = true && var.launch_template_name == ""` => Custom LT will be used, module will provide a default name - # 4. `var.create_launch_template = true && var.launch_template_name == "something"` => Custom LT will be used, LT name is provided by user - use_custom_launch_template = var.create_launch_template || var.launch_template_name != "" - launch_template_name_int = coalesce(var.launch_template_name, "${var.name}-eks-node-group") - security_group_ids = compact(concat([var.cluster_primary_security_group_id], var.vpc_security_group_ids)) + launch_template_name_int = coalesce(var.launch_template_name, "${var.name}-eks-node-group") + security_group_ids = compact(concat([var.cluster_primary_security_group_id], var.vpc_security_group_ids)) } resource "aws_launch_template" "this" { - count = var.create && var.create_launch_template ? 1 : 0 + count = var.create && var.create_launch_template && var.use_custom_launch_template ? 1 : 0 name = var.launch_template_use_name_prefix ? null : local.launch_template_name_int name_prefix = var.launch_template_use_name_prefix ? "${local.launch_template_name_int}-" : null @@ -289,13 +283,13 @@ resource "aws_eks_node_group" "this" { version = var.ami_id != "" ? null : var.cluster_version capacity_type = var.capacity_type - disk_size = local.use_custom_launch_template ? null : var.disk_size # if using LT, set disk size on LT or else it will error here + disk_size = var.use_custom_launch_template ? null : var.disk_size # if using LT, set disk size on LT or else it will error here force_update_version = var.force_update_version instance_types = var.instance_types labels = var.labels dynamic "launch_template" { - for_each = local.use_custom_launch_template ? [1] : [] + for_each = var.use_custom_launch_template ? [1] : [] content { name = local.launch_template_name version = local.launch_template_version diff --git a/modules/eks-managed-node-group/variables.tf b/modules/eks-managed-node-group/variables.tf index 7c2e14ee8c..a29658886e 100644 --- a/modules/eks-managed-node-group/variables.tf +++ b/modules/eks-managed-node-group/variables.tf @@ -84,6 +84,12 @@ variable "create_launch_template" { default = true } +variable "use_custom_launch_template" { + description = "Determines whether to use a custom launch template or not. If set to `false`, EKS will use its own default launch template" + type = bool + default = true +} + variable "launch_template_name" { description = "Launch template name - either to be created (`var.create_launch_template` = `true`) or existing (`var.create_launch_template` = `false`)" type = string diff --git a/modules/fargate-profile/README.md b/modules/fargate-profile/README.md index 4b361afbd4..2caa77ad3d 100644 --- a/modules/fargate-profile/README.md +++ b/modules/fargate-profile/README.md @@ -8,8 +8,8 @@ Configuration in this directory creates a Fargate EKS Profile module "fargate_profile" { source = "terraform-aws-modules/eks/aws//modules/fargate-profile" - name = "separate-fargate-profile" - cluster_name = "my-cluster" + name = "separate-fargate-profile" + cluster_name = "my-cluster" subnet_ids = ["subnet-abcde012", "subnet-bcde012a", "subnet-fghi345a"] selectors = [{ diff --git a/modules/self-managed-node-group/README.md b/modules/self-managed-node-group/README.md index 1d3ac272d0..f8098c4c33 100644 --- a/modules/self-managed-node-group/README.md +++ b/modules/self-managed-node-group/README.md @@ -10,14 +10,17 @@ module "self_managed_node_group" { name = "separate-self-mng" cluster_name = "my-cluster" - cluster_version = "1.22" + cluster_version = "1.23" cluster_endpoint = "https://012345678903AB2BAE5D1E0BFE0E2B50.gr7.us-east-1.eks.amazonaws.com" cluster_auth_base64 = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKbXFqQ1VqNGdGR2w3ZW5PeWthWnZ2RjROOTVOUEZCM2o0cGhVZUsrWGFtN2ZSQnZya0d6OGxKZmZEZWF2b2plTwpQK2xOZFlqdHZncmxCUEpYdHZIZmFzTzYxVzdIZmdWQ2EvamdRM2w3RmkvL1dpQmxFOG9oWUZkdWpjc0s1SXM2CnNkbk5KTTNYUWN2TysrSitkV09NT2ZlNzlsSWdncmdQLzgvRU9CYkw3eUY1aU1hS3lsb1RHL1V3TlhPUWt3ZUcKblBNcjdiUmdkQ1NCZTlXYXowOGdGRmlxV2FOditsTDhsODBTdFZLcWVNVlUxbjQyejVwOVpQRTd4T2l6L0xTNQpYV2lXWkVkT3pMN0xBWGVCS2gzdkhnczFxMkI2d1BKZnZnS1NzWllQRGFpZTloT1NNOUJkNFNPY3JrZTRYSVBOCkVvcXVhMlYrUDRlTWJEQzhMUkVWRDdCdVZDdWdMTldWOTBoL3VJUy9WU2VOcEdUOGVScE5DakszSjc2aFlsWm8KWjNGRG5QWUY0MWpWTHhiOXF0U1ROdEp6amYwWXBEYnFWci9xZzNmQWlxbVorMzd3YWM1eHlqMDZ4cmlaRUgzZgpUM002d2lCUEVHYVlGeWN5TmNYTk5aYW9DWDJVL0N1d2JsUHAKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ==" vpc_id = "vpc-1234556abcdef" subnet_ids = ["subnet-abcde012", "subnet-bcde012a", "subnet-fghi345a"] + // The following variables are necessary if you decide to use the module outside of the parent EKS module context. + // Without it, the security groups of the nodes are empty and thus won't join the cluster. vpc_security_group_ids = [ - # cluster_security_group_id, + module.eks.cluster_primary_security_group_id, + module.eks.cluster_security_group_id, ] min_size = 1 diff --git a/node_groups.tf b/node_groups.tf index 9bfe27cf7b..2e6ec255cf 100644 --- a/node_groups.tf +++ b/node_groups.tf @@ -276,6 +276,7 @@ module "eks_managed_node_group" { # Launch Template create_launch_template = try(each.value.create_launch_template, var.eks_managed_node_group_defaults.create_launch_template, true) + use_custom_launch_template = try(each.value.use_custom_launch_template, var.eks_managed_node_group_defaults.use_custom_launch_template, true) launch_template_name = try(each.value.launch_template_name, var.eks_managed_node_group_defaults.launch_template_name, each.key) launch_template_use_name_prefix = try(each.value.launch_template_use_name_prefix, var.eks_managed_node_group_defaults.launch_template_use_name_prefix, true) launch_template_version = try(each.value.launch_template_version, var.eks_managed_node_group_defaults.launch_template_version, null) diff --git a/variables.tf b/variables.tf index 7b65b7dd20..2ccd3fcc0e 100644 --- a/variables.tf +++ b/variables.tf @@ -27,7 +27,7 @@ variable "cluster_name" { } variable "cluster_version" { - description = "Kubernetes `.` version to use for the EKS cluster (i.e.: `1.22`)" + description = "Kubernetes `.` version to use for the EKS cluster (i.e.: `1.23`)" type = string default = null } @@ -59,13 +59,13 @@ variable "subnet_ids" { variable "cluster_endpoint_private_access" { description = "Indicates whether or not the Amazon EKS private API server endpoint is enabled" type = bool - default = false + default = true } variable "cluster_endpoint_public_access" { description = "Indicates whether or not the Amazon EKS public API server endpoint is enabled" type = bool - default = true + default = false } variable "cluster_endpoint_public_access_cidrs" { @@ -89,7 +89,9 @@ variable "cluster_service_ipv4_cidr" { variable "cluster_encryption_config" { description = "Configuration block with encryption configuration for the cluster" type = list(any) - default = [] + default = [{ + resources = ["secrets"] + }] } variable "attach_cluster_encryption_policy" { From c03f67c224bae016c347e6c292a9bd07a16d8374 Mon Sep 17 00:00:00 2001 From: Bryant Biggs Date: Mon, 26 Sep 2022 16:59:18 -0400 Subject: [PATCH 10/33] docs: Update docs for changes around security groups --- .github/images/security_groups.svg | 2 +- README.md | 9 ++++++++- docs/compute_resources.md | 2 ++ docs/network_connectivity.md | 2 +- variables.tf | 2 +- 5 files changed, 13 insertions(+), 4 deletions(-) diff --git a/.github/images/security_groups.svg b/.github/images/security_groups.svg index 6b120e98ba..33fce81d9b 100644 --- a/.github/images/security_groups.svg +++ b/.github/images/security_groups.svg @@ -1 +1 @@ - + diff --git a/README.md b/README.md index 2e14cde62f..697deaab16 100644 --- a/README.md +++ b/README.md @@ -22,10 +22,17 @@ Please note that we strive to provide a comprehensive suite of documentation for - [AWS EKS Documentation](https://docs.aws.amazon.com/eks/latest/userguide/getting-started.html) - [Kubernetes Documentation](https://kubernetes.io/docs/home/) +#### Reference Architecture + +The examples provided under `examples/` provide a comprehensive suite of configurations that demonstrate nearly all of the possible different configurations and settings that can be used with this module. However, these examples are not representative of clusters that you would normally find in use for production workloads. For reference architectures that utilize this module, please see the following: + +- [EKS Reference Architecture](https://github.com/clowdhaus/eks-reference-architecture) + ## Available Features - AWS EKS Cluster Addons - AWS EKS Identity Provider Configuration +- AWS EKS on Outposts support - All [node types](https://docs.aws.amazon.com/eks/latest/userguide/eks-compute.html) are supported: - [EKS Managed Node Group](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html) - [Self Managed Node Group](https://docs.aws.amazon.com/eks/latest/userguide/worker.html) @@ -352,7 +359,7 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | [self\_managed\_node\_groups](#input\_self\_managed\_node\_groups) | Map of self-managed node group definitions to create | `any` | `{}` | no | | [subnet\_ids](#input\_subnet\_ids) | A list of subnet IDs where the nodes/node groups will be provisioned. If `control_plane_subnet_ids` is not provided, the EKS cluster control plane (ENIs) will be provisioned in these subnets | `list(string)` | `[]` | no | | [tags](#input\_tags) | A map of tags to add to all resources | `map(string)` | `{}` | no | -| [vpc\_id](#input\_vpc\_id) | ID of the VPC where the cluster and its nodes will be provisioned | `string` | `null` | no | +| [vpc\_id](#input\_vpc\_id) | ID of the VPC where the cluster security group will be provisioned | `string` | `null` | no | ## Outputs diff --git a/docs/compute_resources.md b/docs/compute_resources.md index a93fd2178a..a49ee1ce87 100644 --- a/docs/compute_resources.md +++ b/docs/compute_resources.md @@ -28,6 +28,8 @@ Refer to the [EKS Managed Node Group documentation](https://docs.aws.amazon.com/ ```hcl eks_managed_node_groups = { bottlerocket_default = { + use_custom_launch_template = false + ami_type = "BOTTLEROCKET_x86_64" platform = "bottlerocket" } diff --git a/docs/network_connectivity.md b/docs/network_connectivity.md index 53668e88c2..9d38fc130d 100644 --- a/docs/network_connectivity.md +++ b/docs/network_connectivity.md @@ -20,7 +20,7 @@ Please refer to the [AWS documentation](https://docs.aws.amazon.com/eks/latest/u - Lastly, users are able to opt in to attaching the primary security group automatically created by the EKS service by setting `attach_cluster_primary_security_group` = `true` from the root module for the respective node group (or set it within the node group defaults). This security group is not managed by the module; it is created by the EKS service. It permits all traffic within the domain of the security group as well as all egress traffic to the internet. - Node Group Security Group(s) - - Users have the option to supply their own, and/or additional, externally created security group(s) to the node group via the `vpc_security_group_ids` variable + - Users have the option to assign their own externally created security group(s) to the node group via the `vpc_security_group_ids` variable See the example snippet below which adds additional security group rules to the cluster security group as well as the shared node security group (for node-to-node access). Users can use this extensibility to open up network access as they see fit using the security groups provided by the module: diff --git a/variables.tf b/variables.tf index 2ccd3fcc0e..f9f607d449 100644 --- a/variables.tf +++ b/variables.tf @@ -233,7 +233,7 @@ variable "cluster_security_group_id" { } variable "vpc_id" { - description = "ID of the VPC where the cluster and its nodes will be provisioned" + description = "ID of the VPC where the cluster security group will be provisioned" type = string default = null } From 8b176dbe118893abc8a6b2dad15d7abcecb4379d Mon Sep 17 00:00:00 2001 From: Bryant Biggs Date: Mon, 26 Sep 2022 18:56:44 -0400 Subject: [PATCH 11/33] feat: Add support for provisioning on outposts and addon timeout configurations --- README.md | 7 +++-- examples/complete/README.md | 4 +-- examples/complete/versions.tf | 2 +- examples/eks_managed_node_group/README.md | 4 +-- examples/eks_managed_node_group/versions.tf | 2 +- examples/fargate_profile/README.md | 4 +-- examples/fargate_profile/versions.tf | 2 +- examples/self_managed_node_group/README.md | 4 +-- examples/self_managed_node_group/versions.tf | 2 +- main.tf | 31 ++++++++++++++++---- modules/eks-managed-node-group/README.md | 4 +-- modules/eks-managed-node-group/versions.tf | 2 +- modules/fargate-profile/README.md | 4 +-- modules/fargate-profile/versions.tf | 2 +- modules/self-managed-node-group/README.md | 4 +-- modules/self-managed-node-group/versions.tf | 2 +- variables.tf | 18 ++++++++++++ versions.tf | 2 +- 18 files changed, 71 insertions(+), 29 deletions(-) diff --git a/README.md b/README.md index 697deaab16..cec69c8394 100644 --- a/README.md +++ b/README.md @@ -219,7 +219,7 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0 | -| [aws](#requirement\_aws) | >= 4.7 | +| [aws](#requirement\_aws) | >= 4.32 | | [kubernetes](#requirement\_kubernetes) | >= 2.10 | | [tls](#requirement\_tls) | >= 3.0 | @@ -227,7 +227,7 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 4.7 | +| [aws](#provider\_aws) | >= 4.32 | | [kubernetes](#provider\_kubernetes) | >= 2.10 | | [tls](#provider\_tls) | >= 3.0 | @@ -284,6 +284,7 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | [cloudwatch\_log\_group\_retention\_in\_days](#input\_cloudwatch\_log\_group\_retention\_in\_days) | Number of days to retain log events. Default retention - 90 days | `number` | `90` | no | | [cluster\_additional\_security\_group\_ids](#input\_cluster\_additional\_security\_group\_ids) | List of additional, externally created security group IDs to attach to the cluster control plane | `list(string)` | `[]` | no | | [cluster\_addons](#input\_cluster\_addons) | Map of cluster addon configurations to enable for the cluster. Addon name can be the map keys or set with `name` | `any` | `{}` | no | +| [cluster\_addons\_timeouts](#input\_cluster\_addons\_timeouts) | Create, update, and delete timeout configurations for the cluster addons | `map(string)` | `{}` | no | | [cluster\_enabled\_log\_types](#input\_cluster\_enabled\_log\_types) | A list of the desired control plane logs to enable. For more information, see Amazon EKS Control Plane Logging documentation (https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html) | `list(string)` |
[
"audit",
"api",
"authenticator"
]
| no | | [cluster\_encryption\_config](#input\_cluster\_encryption\_config) | Configuration block with encryption configuration for the cluster | `list(any)` |
[
{
"resources": [
"secrets"
]
}
]
| no | | [cluster\_encryption\_policy\_description](#input\_cluster\_encryption\_policy\_description) | Description of the cluster encryption policy created | `string` | `"Cluster encryption policy to allow cluster role to utilize CMK provided"` | no | @@ -353,7 +354,9 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | [node\_security\_group\_tags](#input\_node\_security\_group\_tags) | A map of additional tags to add to the node security group created | `map(string)` | `{}` | no | | [node\_security\_group\_use\_name\_prefix](#input\_node\_security\_group\_use\_name\_prefix) | Determines whether node security group name (`node_security_group_name`) is used as a prefix | `bool` | `true` | no | | [openid\_connect\_audiences](#input\_openid\_connect\_audiences) | List of OpenID Connect audience client IDs to add to the IRSA provider | `list(string)` | `[]` | no | +| [outpost\_config](#input\_outpost\_config) | Configuration for the AWS Outpost to provision the cluster on | `any` | `{}` | no | | [prefix\_separator](#input\_prefix\_separator) | The separator to use between the prefix and the generated timestamp for resource names | `string` | `"-"` | no | +| [provision\_on\_outpost](#input\_provision\_on\_outpost) | Determines whether cluster should be provisioned on an AWS Outpost | `bool` | `false` | no | | [putin\_khuylo](#input\_putin\_khuylo) | Do you agree that Putin doesn't respect Ukrainian sovereignty and territorial integrity? More info: https://en.wikipedia.org/wiki/Putin_khuylo! | `bool` | `true` | no | | [self\_managed\_node\_group\_defaults](#input\_self\_managed\_node\_group\_defaults) | Map of self-managed node group default configurations | `any` | `{}` | no | | [self\_managed\_node\_groups](#input\_self\_managed\_node\_groups) | Map of self-managed node group definitions to create | `any` | `{}` | no | diff --git a/examples/complete/README.md b/examples/complete/README.md index b61893c538..03d45665ab 100644 --- a/examples/complete/README.md +++ b/examples/complete/README.md @@ -34,14 +34,14 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0 | -| [aws](#requirement\_aws) | >= 4.7 | +| [aws](#requirement\_aws) | >= 4.32 | | [kubernetes](#requirement\_kubernetes) | >= 2.10 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 4.7 | +| [aws](#provider\_aws) | >= 4.32 | ## Modules diff --git a/examples/complete/versions.tf b/examples/complete/versions.tf index 248218ac69..bd119cf38c 100644 --- a/examples/complete/versions.tf +++ b/examples/complete/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = ">= 4.7" + version = ">= 4.32" } kubernetes = { source = "hashicorp/kubernetes" diff --git a/examples/eks_managed_node_group/README.md b/examples/eks_managed_node_group/README.md index e570b7f38c..afa4888d3f 100644 --- a/examples/eks_managed_node_group/README.md +++ b/examples/eks_managed_node_group/README.md @@ -58,14 +58,14 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0 | -| [aws](#requirement\_aws) | >= 4.7 | +| [aws](#requirement\_aws) | >= 4.32 | | [kubernetes](#requirement\_kubernetes) | >= 2.10 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 4.7 | +| [aws](#provider\_aws) | >= 4.32 | ## Modules diff --git a/examples/eks_managed_node_group/versions.tf b/examples/eks_managed_node_group/versions.tf index 248218ac69..bd119cf38c 100644 --- a/examples/eks_managed_node_group/versions.tf +++ b/examples/eks_managed_node_group/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = ">= 4.7" + version = ">= 4.32" } kubernetes = { source = "hashicorp/kubernetes" diff --git a/examples/fargate_profile/README.md b/examples/fargate_profile/README.md index 6aec324669..dffeb5a1f8 100644 --- a/examples/fargate_profile/README.md +++ b/examples/fargate_profile/README.md @@ -20,14 +20,14 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0 | -| [aws](#requirement\_aws) | >= 4.7 | +| [aws](#requirement\_aws) | >= 4.32 | | [kubernetes](#requirement\_kubernetes) | >= 2.10 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 4.7 | +| [aws](#provider\_aws) | >= 4.32 | ## Modules diff --git a/examples/fargate_profile/versions.tf b/examples/fargate_profile/versions.tf index 248218ac69..bd119cf38c 100644 --- a/examples/fargate_profile/versions.tf +++ b/examples/fargate_profile/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = ">= 4.7" + version = ">= 4.32" } kubernetes = { source = "hashicorp/kubernetes" diff --git a/examples/self_managed_node_group/README.md b/examples/self_managed_node_group/README.md index 9874577a25..1014574c16 100644 --- a/examples/self_managed_node_group/README.md +++ b/examples/self_managed_node_group/README.md @@ -26,14 +26,14 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0 | -| [aws](#requirement\_aws) | >= 4.7 | +| [aws](#requirement\_aws) | >= 4.32 | | [kubernetes](#requirement\_kubernetes) | >= 2.10 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 4.7 | +| [aws](#provider\_aws) | >= 4.32 | ## Modules diff --git a/examples/self_managed_node_group/versions.tf b/examples/self_managed_node_group/versions.tf index 248218ac69..bd119cf38c 100644 --- a/examples/self_managed_node_group/versions.tf +++ b/examples/self_managed_node_group/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = ">= 4.7" + version = ">= 4.32" } kubernetes = { source = "hashicorp/kubernetes" diff --git a/main.tf b/main.tf index ae3a4edf56..89c4ae4fea 100644 --- a/main.tf +++ b/main.tf @@ -28,13 +28,28 @@ resource "aws_eks_cluster" "this" { public_access_cidrs = var.cluster_endpoint_public_access_cidrs } - kubernetes_network_config { - ip_family = var.cluster_ip_family - service_ipv4_cidr = var.cluster_service_ipv4_cidr + dynamic "kubernetes_network_config" { + # Not valid on Outposts + for_each = var.provision_on_outpost ? [1] : [] + + content { + ip_family = var.cluster_ip_family + service_ipv4_cidr = var.cluster_service_ipv4_cidr + } + } + + dynamic "outpost_config" { + for_each = var.provision_on_outpost ? [var.outpost_config] : [] + + content { + control_plane_instance_type = outpost_config.value.control_plane_instance_type + outpost_arns = outpost_config.value.outpost_arns + } } dynamic "encryption_config" { - for_each = toset(var.cluster_encryption_config) + # Not available on Outposts + for_each = { for k, v in toset(var.cluster_encryption_config) : k => v if !var.provision_on_outpost } content { provider { @@ -95,7 +110,7 @@ module "kms" { source = "terraform-aws-modules/kms/aws" version = "1.1.0" # Note - be mindful of Terraform/provider version compatibility between modules - create = local.create && var.create_kms_key + create = local.create && var.create_kms_key && !var.provision_on_outpost # not valid on Outposts description = coalesce(var.kms_key_description, "${var.cluster_name} cluster encryption key") key_usage = "ENCRYPT_DECRYPT" @@ -354,6 +369,12 @@ resource "aws_eks_addon" "this" { resolve_conflicts = try(each.value.resolve_conflicts, null) service_account_role_arn = try(each.value.service_account_role_arn, null) + timeouts { + create = try(var.cluster_addons_timeouts.create, null) + update = try(var.cluster_addons_timeouts.update, null) + delete = try(var.cluster_addons_timeouts.delete, null) + } + depends_on = [ module.fargate_profile, module.eks_managed_node_group, diff --git a/modules/eks-managed-node-group/README.md b/modules/eks-managed-node-group/README.md index 1d915fa6dd..736a1eabed 100644 --- a/modules/eks-managed-node-group/README.md +++ b/modules/eks-managed-node-group/README.md @@ -54,13 +54,13 @@ module "eks_managed_node_group" { | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0 | -| [aws](#requirement\_aws) | >= 4.7 | +| [aws](#requirement\_aws) | >= 4.32 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 4.7 | +| [aws](#provider\_aws) | >= 4.32 | ## Modules diff --git a/modules/eks-managed-node-group/versions.tf b/modules/eks-managed-node-group/versions.tf index 6fba0345fe..b12a7dd00b 100644 --- a/modules/eks-managed-node-group/versions.tf +++ b/modules/eks-managed-node-group/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = ">= 4.7" + version = ">= 4.32" } } } diff --git a/modules/fargate-profile/README.md b/modules/fargate-profile/README.md index 2caa77ad3d..a1c10aedac 100644 --- a/modules/fargate-profile/README.md +++ b/modules/fargate-profile/README.md @@ -29,13 +29,13 @@ module "fargate_profile" { | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0 | -| [aws](#requirement\_aws) | >= 4.7 | +| [aws](#requirement\_aws) | >= 4.32 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 4.7 | +| [aws](#provider\_aws) | >= 4.32 | ## Modules diff --git a/modules/fargate-profile/versions.tf b/modules/fargate-profile/versions.tf index 6fba0345fe..b12a7dd00b 100644 --- a/modules/fargate-profile/versions.tf +++ b/modules/fargate-profile/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = ">= 4.7" + version = ">= 4.32" } } } diff --git a/modules/self-managed-node-group/README.md b/modules/self-managed-node-group/README.md index f8098c4c33..df4b8f24bc 100644 --- a/modules/self-managed-node-group/README.md +++ b/modules/self-managed-node-group/README.md @@ -43,13 +43,13 @@ module "self_managed_node_group" { | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0 | -| [aws](#requirement\_aws) | >= 4.7 | +| [aws](#requirement\_aws) | >= 4.32 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 4.7 | +| [aws](#provider\_aws) | >= 4.32 | ## Modules diff --git a/modules/self-managed-node-group/versions.tf b/modules/self-managed-node-group/versions.tf index 6fba0345fe..b12a7dd00b 100644 --- a/modules/self-managed-node-group/versions.tf +++ b/modules/self-managed-node-group/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = ">= 4.7" + version = ">= 4.32" } } } diff --git a/variables.tf b/variables.tf index f9f607d449..d2dac30c79 100644 --- a/variables.tf +++ b/variables.tf @@ -86,6 +86,18 @@ variable "cluster_service_ipv4_cidr" { default = null } +variable "provision_on_outpost" { + description = "Determines whether cluster should be provisioned on an AWS Outpost" + type = bool + default = false +} + +variable "outpost_config" { + description = "Configuration for the AWS Outpost to provision the cluster on" + type = any + default = {} +} + variable "cluster_encryption_config" { description = "Configuration block with encryption configuration for the cluster" type = list(any) @@ -464,6 +476,12 @@ variable "cluster_addons" { default = {} } +variable "cluster_addons_timeouts" { + description = "Create, update, and delete timeout configurations for the cluster addons" + type = map(string) + default = {} +} + ################################################################################ # EKS Identity Provider ################################################################################ diff --git a/versions.tf b/versions.tf index 7b9aed5fa9..02eb2b4080 100644 --- a/versions.tf +++ b/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = ">= 4.7" + version = ">= 4.32" } tls = { source = "hashicorp/tls" From 911fe42c6c060e0f76301dccee35053385a3eae0 Mon Sep 17 00:00:00 2001 From: Bryant Biggs Date: Tue, 27 Sep 2022 11:04:51 -0400 Subject: [PATCH 12/33] fix: Move additional IAM policies to separate attachment resource, update cluster encryption config --- README.md | 3 ++- examples/complete/main.tf | 4 ++-- examples/eks_managed_node_group/main.tf | 4 ++-- examples/fargate_profile/main.tf | 4 ++-- examples/self_managed_node_group/README.md | 1 + examples/self_managed_node_group/main.tf | 25 ++++++++++++++++++-- main.tf | 23 ++++++++++-------- modules/eks-managed-node-group/README.md | 1 + modules/eks-managed-node-group/main.tf | 22 ++++++++++-------- modules/fargate-profile/README.md | 1 + modules/fargate-profile/main.tf | 20 ++++++++-------- modules/self-managed-node-group/README.md | 1 + modules/self-managed-node-group/main.tf | 27 +++++++++++----------- variables.tf | 6 ++--- 14 files changed, 88 insertions(+), 54 deletions(-) diff --git a/README.md b/README.md index cec69c8394..75b2f5cc24 100644 --- a/README.md +++ b/README.md @@ -253,6 +253,7 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | [aws_iam_policy.cluster_encryption](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | | [aws_iam_policy.cni_ipv6_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | | [aws_iam_role.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | +| [aws_iam_role_policy_attachment.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | | [aws_iam_role_policy_attachment.cluster_encryption](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | | [aws_iam_role_policy_attachment.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | | [aws_security_group.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | @@ -286,7 +287,7 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | [cluster\_addons](#input\_cluster\_addons) | Map of cluster addon configurations to enable for the cluster. Addon name can be the map keys or set with `name` | `any` | `{}` | no | | [cluster\_addons\_timeouts](#input\_cluster\_addons\_timeouts) | Create, update, and delete timeout configurations for the cluster addons | `map(string)` | `{}` | no | | [cluster\_enabled\_log\_types](#input\_cluster\_enabled\_log\_types) | A list of the desired control plane logs to enable. For more information, see Amazon EKS Control Plane Logging documentation (https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html) | `list(string)` |
[
"audit",
"api",
"authenticator"
]
| no | -| [cluster\_encryption\_config](#input\_cluster\_encryption\_config) | Configuration block with encryption configuration for the cluster | `list(any)` |
[
{
"resources": [
"secrets"
]
}
]
| no | +| [cluster\_encryption\_config](#input\_cluster\_encryption\_config) | Configuration block with encryption configuration for the cluster | `any` |
{
"resources": [
"secrets"
]
}
| no | | [cluster\_encryption\_policy\_description](#input\_cluster\_encryption\_policy\_description) | Description of the cluster encryption policy created | `string` | `"Cluster encryption policy to allow cluster role to utilize CMK provided"` | no | | [cluster\_encryption\_policy\_name](#input\_cluster\_encryption\_policy\_name) | Name to use on cluster encryption policy created | `string` | `null` | no | | [cluster\_encryption\_policy\_path](#input\_cluster\_encryption\_policy\_path) | Cluster encryption policy path | `string` | `null` | no | diff --git a/examples/complete/main.tf b/examples/complete/main.tf index c8c80d0622..5c4a336507 100644 --- a/examples/complete/main.tf +++ b/examples/complete/main.tf @@ -58,9 +58,9 @@ module "eks" { # Encryption key create_kms_key = true - cluster_encryption_config = [{ + cluster_encryption_config = { resources = ["secrets"] - }] + } kms_key_deletion_window_in_days = 7 enable_kms_key_rotation = true diff --git a/examples/eks_managed_node_group/main.tf b/examples/eks_managed_node_group/main.tf index 8b58a430b7..fef36b51e9 100644 --- a/examples/eks_managed_node_group/main.tf +++ b/examples/eks_managed_node_group/main.tf @@ -67,9 +67,9 @@ module "eks" { # Encryption key create_kms_key = true - cluster_encryption_config = [{ + cluster_encryption_config = { resources = ["secrets"] - }] + } kms_key_deletion_window_in_days = 7 enable_kms_key_rotation = true diff --git a/examples/fargate_profile/main.tf b/examples/fargate_profile/main.tf index 65f951e3cf..a40595d11f 100644 --- a/examples/fargate_profile/main.tf +++ b/examples/fargate_profile/main.tf @@ -42,9 +42,9 @@ module "eks" { # Encryption key create_kms_key = true - cluster_encryption_config = [{ + cluster_encryption_config = { resources = ["secrets"] - }] + } kms_key_deletion_window_in_days = 7 enable_kms_key_rotation = true diff --git a/examples/self_managed_node_group/README.md b/examples/self_managed_node_group/README.md index 1014574c16..f0a20c2342 100644 --- a/examples/self_managed_node_group/README.md +++ b/examples/self_managed_node_group/README.md @@ -49,6 +49,7 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Type | |------|------| | [aws_ec2_capacity_reservation.targeted](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ec2_capacity_reservation) | resource | +| [aws_iam_policy.node_additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | | [aws_kms_key.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource | | [aws_security_group.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | | [aws_ami.eks_default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source | diff --git a/examples/self_managed_node_group/main.tf b/examples/self_managed_node_group/main.tf index 5f9c8ba10e..7369d5235b 100644 --- a/examples/self_managed_node_group/main.tf +++ b/examples/self_managed_node_group/main.tf @@ -55,9 +55,9 @@ module "eks" { # Encryption key create_kms_key = true - cluster_encryption_config = [{ + cluster_encryption_config = { resources = ["secrets"] - }] + } kms_key_deletion_window_in_days = 7 enable_kms_key_rotation = true @@ -269,6 +269,7 @@ module "eks" { } iam_role_additional_policies = { AmazonEC2ContainerRegistryReadOnly = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" + additional = aws_iam_policy.node_additional.arn } timeouts = { @@ -409,3 +410,23 @@ resource "aws_ec2_capacity_reservation" "targeted" { instance_count = 1 instance_match_criteria = "targeted" } + +resource "aws_iam_policy" "node_additional" { + name = "${local.name}-additional" + description = "Example usage of node additional policy" + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Action = [ + "ec2:Describe*", + ] + Effect = "Allow" + Resource = "*" + }, + ] + }) + + tags = local.tags +} diff --git a/main.tf b/main.tf index 89c4ae4fea..e0e11cc5e6 100644 --- a/main.tf +++ b/main.tf @@ -49,7 +49,7 @@ resource "aws_eks_cluster" "this" { dynamic "encryption_config" { # Not available on Outposts - for_each = { for k, v in toset(var.cluster_encryption_config) : k => v if !var.provision_on_outpost } + for_each = length(var.cluster_encryption_config) > 0 && !var.provision_on_outpost ? [var.cluster_encryption_config] : [] content { provider { @@ -306,14 +306,17 @@ resource "aws_iam_role" "this" { # Policies attached ref https://docs.aws.amazon.com/eks/latest/userguide/service_IAM_role.html resource "aws_iam_role_policy_attachment" "this" { - for_each = { for k, v in merge( - { - AmazonEKSClusterPolicy = "${local.iam_role_policy_prefix}/AmazonEKSClusterPolicy", - AmazonEKSVPCResourceController = "${local.iam_role_policy_prefix}/AmazonEKSVPCResourceController", - } - , - var.iam_role_additional_policies - ) : k => v if local.create_iam_role } + for_each = { for k, v in toset(compact([ + "${local.iam_role_policy_prefix}/AmazonEKSClusterPolicy", + "${local.iam_role_policy_prefix}/AmazonEKSVPCResourceController", + ])) : k => v if local.create_iam_role } + + policy_arn = each.value + role = aws_iam_role.this[0].name +} + +resource "aws_iam_role_policy_attachment" "additional" { + for_each = { for k, v in var.iam_role_additional_policies : k => v if local.create_iam_role } policy_arn = each.value role = aws_iam_role.this[0].name @@ -364,7 +367,7 @@ resource "aws_eks_addon" "this" { cluster_name = aws_eks_cluster.this[0].name addon_name = try(each.value.name, each.key) - addon_version = try(each.value.addon_version, data.aws_eks_addon_version.this[0].version) + addon_version = try(each.value.addon_version, data.aws_eks_addon_version.this[each.key].version) preserve = try(each.value.preserve, null) resolve_conflicts = try(each.value.resolve_conflicts, null) service_account_role_arn = try(each.value.service_account_role_arn, null) diff --git a/modules/eks-managed-node-group/README.md b/modules/eks-managed-node-group/README.md index 736a1eabed..13f7eb1146 100644 --- a/modules/eks-managed-node-group/README.md +++ b/modules/eks-managed-node-group/README.md @@ -74,6 +74,7 @@ module "eks_managed_node_group" { |------|------| | [aws_eks_node_group.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_node_group) | resource | | [aws_iam_role.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | +| [aws_iam_role_policy_attachment.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | | [aws_iam_role_policy_attachment.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | | [aws_launch_template.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource | | [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | diff --git a/modules/eks-managed-node-group/main.tf b/modules/eks-managed-node-group/main.tf index 08fa854060..26fd4b2d09 100644 --- a/modules/eks-managed-node-group/main.tf +++ b/modules/eks-managed-node-group/main.tf @@ -381,16 +381,18 @@ resource "aws_iam_role" "this" { # Policies attached ref https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_node_group resource "aws_iam_role_policy_attachment" "this" { - for_each = { for k, v in merge( - { - AmazonEKSWorkerNodePolicy = "${local.iam_role_policy_prefix}/AmazonEKSWorkerNodePolicy" - AmazonEC2ContainerRegistryReadOnly = "${local.iam_role_policy_prefix}/AmazonEC2ContainerRegistryReadOnly" - }, - { - for k, v in { AmazonEKS_CNI_Policy = local.cni_policy } : k => v if var.iam_role_attach_cni_policy - }, - var.iam_role_additional_policies - ) : k => v if var.create && var.create_iam_role } + for_each = { for k, v in toset(compact([ + "${local.iam_role_policy_prefix}/AmazonEKSWorkerNodePolicy", + "${local.iam_role_policy_prefix}/AmazonEC2ContainerRegistryReadOnly", + var.iam_role_attach_cni_policy ? local.cni_policy : "", + ])) : k => v if var.create && var.create_iam_role } + + policy_arn = each.value + role = aws_iam_role.this[0].name +} + +resource "aws_iam_role_policy_attachment" "additional" { + for_each = { for k, v in var.iam_role_additional_policies : k => v if var.create && var.create_iam_role } policy_arn = each.value role = aws_iam_role.this[0].name diff --git a/modules/fargate-profile/README.md b/modules/fargate-profile/README.md index a1c10aedac..e0dba7cb79 100644 --- a/modules/fargate-profile/README.md +++ b/modules/fargate-profile/README.md @@ -47,6 +47,7 @@ No modules. |------|------| | [aws_eks_fargate_profile.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_fargate_profile) | resource | | [aws_iam_role.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | +| [aws_iam_role_policy_attachment.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | | [aws_iam_role_policy_attachment.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | | [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | | [aws_iam_policy_document.assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | diff --git a/modules/fargate-profile/main.tf b/modules/fargate-profile/main.tf index 9dd01e54b0..de9dd2d754 100644 --- a/modules/fargate-profile/main.tf +++ b/modules/fargate-profile/main.tf @@ -41,15 +41,17 @@ resource "aws_iam_role" "this" { } resource "aws_iam_role_policy_attachment" "this" { - for_each = { for k, v in merge( - { - AmazonEKSFargatePodExecutionRolePolicy = "${local.iam_role_policy_prefix}/AmazonEKSFargatePodExecutionRolePolicy" - }, - { - for k, v in { AmazonEKS_CNI_Policy = local.cni_policy } : k => v if var.iam_role_attach_cni_policy - }, - var.iam_role_additional_policies - ) : k => v if var.create && var.create_iam_role } + for_each = { for k, v in toset(compact([ + "${local.iam_role_policy_prefix}/AmazonEKSFargatePodExecutionRolePolicy", + var.iam_role_attach_cni_policy ? local.cni_policy : "", + ])) : k => v if var.create && var.create_iam_role } + + policy_arn = each.value + role = aws_iam_role.this[0].name +} + +resource "aws_iam_role_policy_attachment" "additional" { + for_each = { for k, v in var.iam_role_additional_policies : k => v if var.create && var.create_iam_role } policy_arn = each.value role = aws_iam_role.this[0].name diff --git a/modules/self-managed-node-group/README.md b/modules/self-managed-node-group/README.md index df4b8f24bc..0a3d1778aa 100644 --- a/modules/self-managed-node-group/README.md +++ b/modules/self-managed-node-group/README.md @@ -65,6 +65,7 @@ module "self_managed_node_group" { | [aws_autoscaling_schedule.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/autoscaling_schedule) | resource | | [aws_iam_instance_profile.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_instance_profile) | resource | | [aws_iam_role.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | +| [aws_iam_role_policy_attachment.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | | [aws_iam_role_policy_attachment.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | | [aws_launch_template.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource | | [aws_ami.eks_default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source | diff --git a/modules/self-managed-node-group/main.tf b/modules/self-managed-node-group/main.tf index 2f044cd03c..ecfc29123a 100644 --- a/modules/self-managed-node-group/main.tf +++ b/modules/self-managed-node-group/main.tf @@ -480,22 +480,23 @@ resource "aws_iam_role" "this" { } resource "aws_iam_role_policy_attachment" "this" { - for_each = { for k, v in merge( - { - AmazonEKSWorkerNodePolicy = "${local.iam_role_policy_prefix}/AmazonEKSWorkerNodePolicy" - AmazonEC2ContainerRegistryReadOnly = "${local.iam_role_policy_prefix}/AmazonEC2ContainerRegistryReadOnly" - }, - { - for k, v in { AmazonEKS_CNI_Policy = local.cni_policy } : k => v if var.iam_role_attach_cni_policy - }, - var.iam_role_additional_policies - ) : k => v if var.create && var.create_iam_instance_profile } + for_each = { for k, v in toset(compact([ + "${local.iam_role_policy_prefix}/AmazonEKSWorkerNodePolicy", + "${local.iam_role_policy_prefix}/AmazonEC2ContainerRegistryReadOnly", + var.iam_role_attach_cni_policy ? local.cni_policy : "", + ])) : k => v if var.create && var.create_iam_instance_profile } + + policy_arn = each.value + role = aws_iam_role.this[0].name +} + +resource "aws_iam_role_policy_attachment" "additional" { + for_each = { for k, v in var.iam_role_additional_policies : k => v if var.create && var.create_iam_instance_profile } policy_arn = each.value role = aws_iam_role.this[0].name } -# Only self-managed node group requires instance profile resource "aws_iam_instance_profile" "this" { count = var.create && var.create_iam_instance_profile ? 1 : 0 @@ -505,9 +506,9 @@ resource "aws_iam_instance_profile" "this" { name_prefix = var.iam_role_use_name_prefix ? "${local.iam_role_name}-" : null path = var.iam_role_path + tags = merge(var.tags, var.iam_role_tags) + lifecycle { create_before_destroy = true } - - tags = merge(var.tags, var.iam_role_tags) } diff --git a/variables.tf b/variables.tf index d2dac30c79..555f07bec3 100644 --- a/variables.tf +++ b/variables.tf @@ -100,10 +100,10 @@ variable "outpost_config" { variable "cluster_encryption_config" { description = "Configuration block with encryption configuration for the cluster" - type = list(any) - default = [{ + type = any + default = { resources = ["secrets"] - }] + } } variable "attach_cluster_encryption_policy" { From d3d4f38f3c8ae25232acc9a8491df1f1b66dc5b7 Mon Sep 17 00:00:00 2001 From: Bryant Biggs Date: Wed, 28 Sep 2022 09:57:19 -0400 Subject: [PATCH 13/33] chore: Remove VPC `"shared"` subnet tags which are no longer required --- examples/complete/main.tf | 6 ++--- examples/eks_managed_node_group/main.tf | 6 ++--- examples/fargate_profile/README.md | 1 + examples/fargate_profile/main.tf | 29 ++++++++++++++++++++---- examples/self_managed_node_group/main.tf | 6 ++--- 5 files changed, 32 insertions(+), 16 deletions(-) diff --git a/examples/complete/main.tf b/examples/complete/main.tf index 5c4a336507..dbe5da28aa 100644 --- a/examples/complete/main.tf +++ b/examples/complete/main.tf @@ -365,13 +365,11 @@ module "vpc" { create_flow_log_cloudwatch_log_group = true public_subnet_tags = { - "kubernetes.io/cluster/${local.name}" = "shared" - "kubernetes.io/role/elb" = 1 + "kubernetes.io/role/elb" = 1 } private_subnet_tags = { - "kubernetes.io/cluster/${local.name}" = "shared" - "kubernetes.io/role/internal-elb" = 1 + "kubernetes.io/role/internal-elb" = 1 } tags = local.tags diff --git a/examples/eks_managed_node_group/main.tf b/examples/eks_managed_node_group/main.tf index fef36b51e9..8166e9d8ad 100644 --- a/examples/eks_managed_node_group/main.tf +++ b/examples/eks_managed_node_group/main.tf @@ -362,13 +362,11 @@ module "vpc" { create_flow_log_cloudwatch_log_group = true public_subnet_tags = { - "kubernetes.io/cluster/${local.name}" = "shared" - "kubernetes.io/role/elb" = 1 + "kubernetes.io/role/elb" = 1 } private_subnet_tags = { - "kubernetes.io/cluster/${local.name}" = "shared" - "kubernetes.io/role/internal-elb" = 1 + "kubernetes.io/role/internal-elb" = 1 } tags = local.tags diff --git a/examples/fargate_profile/README.md b/examples/fargate_profile/README.md index dffeb5a1f8..0a942c43e3 100644 --- a/examples/fargate_profile/README.md +++ b/examples/fargate_profile/README.md @@ -40,6 +40,7 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Type | |------|------| +| [aws_iam_policy.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | | [aws_kms_key.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource | | [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | diff --git a/examples/fargate_profile/main.tf b/examples/fargate_profile/main.tf index a40595d11f..8c2375efd9 100644 --- a/examples/fargate_profile/main.tf +++ b/examples/fargate_profile/main.tf @@ -71,6 +71,12 @@ module "eks" { } } + fargate_profile_defaults = { + iam_role_additional_policies = { + "additional" = aws_iam_policy.additional.arn + } + } + fargate_profiles = { default = { name = "default" @@ -149,13 +155,11 @@ module "vpc" { create_flow_log_cloudwatch_log_group = true public_subnet_tags = { - "kubernetes.io/cluster/${local.name}" = "shared" - "kubernetes.io/role/elb" = 1 + "kubernetes.io/role/elb" = 1 } private_subnet_tags = { - "kubernetes.io/cluster/${local.name}" = "shared" - "kubernetes.io/role/internal-elb" = 1 + "kubernetes.io/role/internal-elb" = 1 } tags = local.tags @@ -168,3 +172,20 @@ resource "aws_kms_key" "eks" { tags = local.tags } + +resource "aws_iam_policy" "additional" { + name = "${local.name}-additional" + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Action = [ + "ec2:Describe*", + ] + Effect = "Allow" + Resource = "*" + }, + ] + }) +} diff --git a/examples/self_managed_node_group/main.tf b/examples/self_managed_node_group/main.tf index 7369d5235b..5fbadea2e8 100644 --- a/examples/self_managed_node_group/main.tf +++ b/examples/self_managed_node_group/main.tf @@ -312,13 +312,11 @@ module "vpc" { create_flow_log_cloudwatch_log_group = true public_subnet_tags = { - "kubernetes.io/cluster/${local.name}" = "shared" - "kubernetes.io/role/elb" = 1 + "kubernetes.io/role/elb" = 1 } private_subnet_tags = { - "kubernetes.io/cluster/${local.name}" = "shared" - "kubernetes.io/role/internal-elb" = 1 + "kubernetes.io/role/internal-elb" = 1 } tags = local.tags From b1615aea2174ae6be42b82f16d2e5d8f2f632ff7 Mon Sep 17 00:00:00 2001 From: Bryant Biggs Date: Wed, 28 Sep 2022 16:40:29 -0400 Subject: [PATCH 14/33] fix: Correct use of iterating over maps with `for_each` and computed values --- README.md | 2 +- docs/faq.md | 35 ------------------------ main.tf | 16 +++++------ modules/eks-managed-node-group/README.md | 11 ++++++++ node_groups.tf | 28 +++++++++++-------- 5 files changed, 35 insertions(+), 57 deletions(-) diff --git a/README.md b/README.md index 75b2f5cc24..e5d05664a2 100644 --- a/README.md +++ b/README.md @@ -93,7 +93,7 @@ module "eks" { vpc_id = "vpc-1234556abcdef" subnet_ids = ["subnet-abcde012", "subnet-bcde012a", "subnet-fghi345a"] - control_plane_subnet_ids = ["subnet-abcde012", "subnet-bcde012a", "subnet-fghi345a"] + control_plane_subnet_ids = ["subnet-xyzde987", "subnet-slkjf456", "subnet-qeiru789"] # Self Managed Node Group(s) self_managed_node_group_defaults = { diff --git a/docs/faq.md b/docs/faq.md index 7926cc6d6a..f5be8d56a6 100644 --- a/docs/faq.md +++ b/docs/faq.md @@ -1,7 +1,6 @@ # Frequently Asked Questions - [I received an error: `expect exactly one securityGroup tagged with kubernetes.io/cluster/ ...`](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/faq.md#i-received-an-error-expect-exactly-one-securitygroup-tagged-with-kubernetesioclustername-) -- [I received an error: `Error: Invalid for_each argument ...`](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/faq.md#i-received-an-error-error-invalid-for_each-argument-) - [Why are nodes not being registered?](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/faq.md#why-are-nodes-not-being-registered) - [Why are there no changes when a node group's `desired_size` is modified?](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/faq.md#why-are-there-no-changes-when-a-node-groups-desired_size-is-modified) - [How can I deploy Windows based nodes?](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/faq.md#how-can-i-deploy-windows-based-nodes) @@ -48,40 +47,6 @@ By default, EKS creates a cluster primary security group that is created outside In theory, if you are attaching the cluster primary security group, you shouldn't need to use the shared node security group created by the module. However, this is left up to users to decide for their requirements and use case. -### I received an error: `Error: Invalid for_each argument ...` - -Users may encounter an error such as `Error: Invalid for_each argument - The "for_each" value depends on resource attributes that cannot be determined until apply, so Terraform cannot predict how many instances will be created. To work around this, use the -target argument to first apply ...` - -This error is due to an upstream issue with [Terraform core](https://github.com/hashicorp/terraform/issues/4149). There are two potential options you can take to help mitigate this issue: - -1. Create the dependent resources before the cluster => `terraform apply -target ` and then `terraform apply` for the cluster (or other similar means to just ensure the referenced resources exist before creating the cluster) - -- Note: this is the route users will have to take for adding additional security groups to nodes since there isn't a separate "security group attachment" resource - -2. For additional IAM policies, users can attach the policies outside of the cluster definition as demonstrated below - -```hcl -resource "aws_iam_role_policy_attachment" "additional" { - for_each = module.eks.eks_managed_node_groups - # you could also do the following or any combination: - # for_each = merge( - # module.eks.eks_managed_node_groups, - # module.eks.self_managed_node_group, - # module.eks.fargate_profile, - # ) - - # This policy does not have to exist at the time of cluster creation. Terraform can - # deduce the proper order of its creation to avoid errors during creation - policy_arn = aws_iam_policy.node_additional.arn - role = each.value.iam_role_name -} -``` - -TL;DR - Terraform resource passed into the modules map definition _must_ be known before you can apply the EKS module. The variables this potentially affects are: - -- `cluster_security_group_additional_rules` (i.e. - referencing an external security group resource in a rule) -- `node_security_group_additional_rules` (i.e. - referencing an external security group resource in a rule) - ### Why are nodes not being registered? Nodes not being able to register with the EKS control plane is generally due to networking mis-configurations. diff --git a/main.tf b/main.tf index e0e11cc5e6..988d44473e 100644 --- a/main.tf +++ b/main.tf @@ -201,15 +201,13 @@ resource "aws_security_group_rule" "cluster" { type = each.value.type # Optional - description = try(each.value.description, null) - cidr_blocks = try(each.value.cidr_blocks, null) - ipv6_cidr_blocks = try(each.value.ipv6_cidr_blocks, null) - prefix_list_ids = try(each.value.prefix_list_ids, []) - self = try(each.value.self, null) - source_security_group_id = try( - each.value.source_security_group_id, - try(each.value.source_node_security_group, false) ? local.node_security_group_id : null - ) + description = lookup(each.value, "description", null) + cidr_blocks = lookup(each.value, "cidr_blocks", null) + ipv6_cidr_blocks = lookup(each.value, "ipv6_cidr_blocks", null) + prefix_list_ids = lookup(each.value, "prefix_list_ids", []) + self = lookup(each.value, "self", null) + source_security_group_id = lookup(each.value, "source_security_group_id", + lookup(each.value, "source_node_security_group", false)) ? local.node_security_group_id : null } ################################################################################ diff --git a/modules/eks-managed-node-group/README.md b/modules/eks-managed-node-group/README.md index 13f7eb1146..941b99916d 100644 --- a/modules/eks-managed-node-group/README.md +++ b/modules/eks-managed-node-group/README.md @@ -20,6 +20,17 @@ module "eks_managed_node_group" { cluster_primary_security_group_id = module.eks.cluster_primary_security_group_id cluster_security_group_id = module.eks.node_security_group_id + // Note: `disk_size`, and `remote_access` can only be set when using the EKS managed node group default launch template + // This module defaults to providing a custom launch template to allow for custom security groups, tag propagation, etc. + // use_custom_launch_template = false + // disk_size = 50 + // + // # Remote access cannot be specified with a launch template + // remote_access = { + // ec2_ssh_key = module.key_pair.key_pair_name + // source_security_group_ids = [aws_security_group.remote_access.id] + // } + min_size = 1 max_size = 10 desired_size = 1 diff --git a/node_groups.tf b/node_groups.tf index 2e6ec255cf..d8572f575d 100644 --- a/node_groups.tf +++ b/node_groups.tf @@ -178,15 +178,13 @@ resource "aws_security_group_rule" "node" { type = each.value.type # Optional - description = try(each.value.description, null) - cidr_blocks = try(each.value.cidr_blocks, null) - ipv6_cidr_blocks = try(each.value.ipv6_cidr_blocks, null) - prefix_list_ids = try(each.value.prefix_list_ids, []) - self = try(each.value.self, null) - source_security_group_id = try( - each.value.source_security_group_id, - try(each.value.source_cluster_security_group, false) ? local.cluster_security_group_id : null - ) + description = lookup(each.value, "description", null) + cidr_blocks = lookup(each.value, "cidr_blocks", null) + ipv6_cidr_blocks = lookup(each.value, "ipv6_cidr_blocks", null) + prefix_list_ids = lookup(each.value, "prefix_list_ids", []) + self = lookup(each.value, "self", null) + source_security_group_id = lookup(each.value, "source_security_group_id", + lookup(each.value, "source_cluster_security_group", false)) ? local.cluster_security_group_id : null } ################################################################################ @@ -218,7 +216,9 @@ module "fargate_profile" { iam_role_permissions_boundary = try(each.value.iam_role_permissions_boundary, var.fargate_profile_defaults.iam_role_permissions_boundary, null) iam_role_tags = try(each.value.iam_role_tags, var.fargate_profile_defaults.iam_role_tags, {}) iam_role_attach_cni_policy = try(each.value.iam_role_attach_cni_policy, var.fargate_profile_defaults.iam_role_attach_cni_policy, true) - iam_role_additional_policies = try(each.value.iam_role_additional_policies, var.fargate_profile_defaults.iam_role_additional_policies, {}) + # To better understand why this `lookup()` logic is required, see: + # https://github.com/hashicorp/terraform/issues/31646#issuecomment-1217279031 + iam_role_additional_policies = lookup(each.value, "iam_role_additional_policies", lookup(var.fargate_profile_defaults, "iam_role_additional_policies", {})) tags = merge(var.tags, try(each.value.tags, var.fargate_profile_defaults.tags, {})) } @@ -315,7 +315,9 @@ module "eks_managed_node_group" { iam_role_permissions_boundary = try(each.value.iam_role_permissions_boundary, var.eks_managed_node_group_defaults.iam_role_permissions_boundary, null) iam_role_tags = try(each.value.iam_role_tags, var.eks_managed_node_group_defaults.iam_role_tags, {}) iam_role_attach_cni_policy = try(each.value.iam_role_attach_cni_policy, var.eks_managed_node_group_defaults.iam_role_attach_cni_policy, true) - iam_role_additional_policies = try(each.value.iam_role_additional_policies, var.eks_managed_node_group_defaults.iam_role_additional_policies, {}) + # To better understand why this `lookup()` logic is required, see: + # https://github.com/hashicorp/terraform/issues/31646#issuecomment-1217279031 + iam_role_additional_policies = lookup(each.value, "iam_role_additional_policies", lookup(var.eks_managed_node_group_defaults, "iam_role_additional_policies", {})) # Security group vpc_security_group_ids = compact(concat([local.node_security_group_id], try(each.value.vpc_security_group_ids, var.eks_managed_node_group_defaults.vpc_security_group_ids, []))) @@ -439,7 +441,9 @@ module "self_managed_node_group" { iam_role_permissions_boundary = try(each.value.iam_role_permissions_boundary, var.self_managed_node_group_defaults.iam_role_permissions_boundary, null) iam_role_tags = try(each.value.iam_role_tags, var.self_managed_node_group_defaults.iam_role_tags, {}) iam_role_attach_cni_policy = try(each.value.iam_role_attach_cni_policy, var.self_managed_node_group_defaults.iam_role_attach_cni_policy, true) - iam_role_additional_policies = try(each.value.iam_role_additional_policies, var.self_managed_node_group_defaults.iam_role_additional_policies, {}) + # To better understand why this `lookup()` logic is required, see: + # https://github.com/hashicorp/terraform/issues/31646#issuecomment-1217279031 + iam_role_additional_policies = lookup(each.value, "iam_role_additional_policies", lookup(var.self_managed_node_group_defaults, "iam_role_additional_policies", {})) # Security group vpc_security_group_ids = compact(concat([local.node_security_group_id], try(each.value.vpc_security_group_ids, var.self_managed_node_group_defaults.vpc_security_group_ids, []))) From 291e0b79598f6d6ecbb13499322530e77cb47206 Mon Sep 17 00:00:00 2001 From: Bryant Biggs Date: Thu, 29 Sep 2022 10:15:32 -0400 Subject: [PATCH 15/33] chore: Update formatting and example content --- README.md | 2 +- examples/complete/main.tf | 6 +++--- examples/eks_managed_node_group/main.tf | 19 +++++++++++++++---- examples/fargate_profile/main.tf | 2 +- examples/self_managed_node_group/main.tf | 19 +++++++++++++++---- 5 files changed, 35 insertions(+), 13 deletions(-) diff --git a/README.md b/README.md index e5d05664a2..4366ba5d51 100644 --- a/README.md +++ b/README.md @@ -32,7 +32,7 @@ The examples provided under `examples/` provide a comprehensive suite of configu - AWS EKS Cluster Addons - AWS EKS Identity Provider Configuration -- AWS EKS on Outposts support +- [AWS EKS on Outposts support](https://aws.amazon.com/blogs/aws/deploy-your-amazon-eks-clusters-locally-on-aws-outposts/) - All [node types](https://docs.aws.amazon.com/eks/latest/userguide/eks-compute.html) are supported: - [EKS Managed Node Group](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html) - [Self Managed Node Group](https://docs.aws.amazon.com/eks/latest/userguide/worker.html) diff --git a/examples/complete/main.tf b/examples/complete/main.tf index dbe5da28aa..2bc0b11762 100644 --- a/examples/complete/main.tf +++ b/examples/complete/main.tf @@ -65,7 +65,7 @@ module "eks" { enable_kms_key_rotation = true iam_role_additional_policies = { - "additional" = aws_iam_policy.additional.arn + additional = aws_iam_policy.additional.arn } vpc_id = module.vpc.vpc_id @@ -110,7 +110,7 @@ module "eks" { self_managed_node_group_defaults = { vpc_security_group_ids = [aws_security_group.additional.id] iam_role_additional_policies = { - "additional" = aws_iam_policy.additional.arn + additional = aws_iam_policy.additional.arn } } @@ -145,7 +145,7 @@ module "eks" { attach_cluster_primary_security_group = true vpc_security_group_ids = [aws_security_group.additional.id] iam_role_additional_policies = { - "additional" = aws_iam_policy.additional.arn + additional = aws_iam_policy.additional.arn } } diff --git a/examples/eks_managed_node_group/main.tf b/examples/eks_managed_node_group/main.tf index 8166e9d8ad..a7d9a9f4dc 100644 --- a/examples/eks_managed_node_group/main.tf +++ b/examples/eks_managed_node_group/main.tf @@ -181,17 +181,28 @@ module "eks" { enable_bootstrap_user_data = true # This will get added to the template bootstrap_extra_args = <<-EOT + # The admin host container provides SSH access and runs with "superpowers". + # It is disabled by default, but can be disabled explicitly. + [settings.host-containers.admin] + enabled = false + + # The control host container provides out-of-band access via SSM. + # It is enabled by default, and can be disabled if you do not expect to use SSM. + # This could leave you with no way to access the API and change settings on an existing node! + [settings.host-containers.control] + enabled = true + # extra args added [settings.kernel] lockdown = "integrity" [settings.kubernetes.node-labels] - "label1" = "foo" - "label2" = "bar" + label1 = "foo" + label2 = "bar" [settings.kubernetes.node-taints] - "dedicated" = "experimental:PreferNoSchedule" - "special" = "true:NoSchedule" + dedicated = "experimental:PreferNoSchedule" + special = "true:NoSchedule" EOT } diff --git a/examples/fargate_profile/main.tf b/examples/fargate_profile/main.tf index 8c2375efd9..53213b4df4 100644 --- a/examples/fargate_profile/main.tf +++ b/examples/fargate_profile/main.tf @@ -73,7 +73,7 @@ module "eks" { fargate_profile_defaults = { iam_role_additional_policies = { - "additional" = aws_iam_policy.additional.arn + additional = aws_iam_policy.additional.arn } } diff --git a/examples/self_managed_node_group/main.tf b/examples/self_managed_node_group/main.tf index 5fbadea2e8..01eaea44fa 100644 --- a/examples/self_managed_node_group/main.tf +++ b/examples/self_managed_node_group/main.tf @@ -125,17 +125,28 @@ module "eks" { key_name = module.key_pair.key_pair_name bootstrap_extra_args = <<-EOT + # The admin host container provides SSH access and runs with "superpowers". + # It is disabled by default, but can be disabled explicitly. + [settings.host-containers.admin] + enabled = false + + # The control host container provides out-of-band access via SSM. + # It is enabled by default, and can be disabled if you do not expect to use SSM. + # This could leave you with no way to access the API and change settings on an existing node! + [settings.host-containers.control] + enabled = true + # extra args added [settings.kernel] lockdown = "integrity" [settings.kubernetes.node-labels] - "label1" = "foo" - "label2" = "bar" + label1 = "foo" + label2 = "bar" [settings.kubernetes.node-taints] - "dedicated" = "experimental:PreferNoSchedule" - "special" = "true:NoSchedule" + dedicated = "experimental:PreferNoSchedule" + special = "true:NoSchedule" EOT } From 385d75871b8ffc50e32c0ce8a62b106eecc75985 Mon Sep 17 00:00:00 2001 From: Bryant Biggs Date: Thu, 29 Sep 2022 17:23:22 -0400 Subject: [PATCH 16/33] docs: Update upgrade guide for v19.x --- docs/UPGRADE-19.0.md | 436 ++++++++++++++++++++--- examples/complete/main.tf | 21 +- examples/eks_managed_node_group/main.tf | 9 +- examples/fargate_profile/main.tf | 9 +- examples/self_managed_node_group/main.tf | 9 +- main.tf | 10 +- 6 files changed, 432 insertions(+), 62 deletions(-) diff --git a/docs/UPGRADE-19.0.md b/docs/UPGRADE-19.0.md index b0de07656b..46f5be344d 100644 --- a/docs/UPGRADE-19.0.md +++ b/docs/UPGRADE-19.0.md @@ -4,97 +4,443 @@ Please consult the `examples` directory for reference example configurations. If ## List of backwards incompatible changes -- Minimum supported version of Terraform AWS provider updated to v4.7 to support latest features in autoscaling groups -- Individual security group created per EKS managed node group or self managed node group has been removed. This feature was largely un-used, often caused confusion, and can readily be replaced by a user provided security group that was externally created +- Minimum supported version of Terraform AWS provider updated to v4.32 to support latest features provided via the resources utilized. +- Minimum supported version of Terraform updated to v1.0 +- Individual security group created per EKS managed node group or self managed node group has been removed. This configuration went mostly un-used and would often cause confusion ("Why is there an empty security group attached to my nodes?"). This functionality can easily be replicated by user's providing one or more externally created security groups to attach to nodes launched from the node group. +- Previously, `var.iam_role_additional_policies` (one for each of the following: cluster IAM role, EKS managed node group IAM role, self-managed node group IAM role, and Fargate Profile IAM role) accepted a list of strings. This worked well for policies that already existed but failed for policies being created at the same time as the cluster due to the well known issue of unkown values used in a `for_each` loop. To rectify this issue in `v19.x`, two changes were made: + 1. `var.iam_role_additional_policies` was changed from type `list(string)` to type `map(string)` -> this is a breaking change. More information on managing this change can be found below, under `Terraform State Moves` + 2. The logic used in the root module for this variable was changed to replace the use of `try()` with `lookup()`. More details on why can be found [here](https://github.com/clowdhaus/terraform-for-each-unknown) ## Additional changes ### Added -- +- Support for setting `preserve` as well as `most_recent` on addons. + - `preserve` indicates if you want to preserve the created resources when deleting the EKS add-on + - `most_recent` indicates if you want to use the most recent revision of the add-on or the default version (default) ### Modified -- `block_device_mappings` previously required a map of maps but has since changed to an array of maps. Users can remove the outer key for each block device mapping and replace the outermost map `{}` with an array `[]`. There are not state changes required for this change +- `cluster_security_group_additional_rules` and `node_security_group_additional_rules` have been modified to use `lookup()` instead of `try()` to avoid the well known issue of [unkown values within a `for_each` loop](https://github.com/hashicorp/terraform/issues/4149) +- `block_device_mappings` previously required a map of maps but has since changed to an array of maps. Users can remove the outer key for each block device mapping and replace the outermost map `{}` with an array `[]`. There are no state changes required for this change. +- `node_security_group_ntp_ipv4_cidr_block` previously defaulted to `["0.0.0.0/0"]` and now defaults to `["169.254.169.123/32"]` (Referenc: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/set-time.html) +- `node_security_group_ntp_ipv6_cidr_block` previously defaulted to `["::/0"]` and now defaults to `["fd00:ec2::123/128"]` (Referenc: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/set-time.html) +- `create_kms_key` previously defaulted to `false` and now defaults to `true`. Clusters created with this module now default to enabling secret encryption by default with a customer managed KMS key created by this module +- `cluster_encryption_config` previously used a type of `list(any)` and now uses a type of `any` -> users can simply remove the outer `[`...`]` brackets on `v19.x` + - `cluster_encryption_config` previously defaulted to `[]` and now defaults to `{resources = ["secrets"]}` to encrypt secrets by default +- `cluster_endpoint_public_access` previously defaulted to `true` and now defaults to `false`. Clusters created with this module now default to private only access to the cluster endpoint + - `cluster_endpoint_private_access` previously defaulted to `false` and now defaults to `true` +- The addon configuration now sets `"OVERWRITE"` as the default value for `resolve_conflicts` to ease addon upgrade management. Users can opt out of this by instead setting `"NONE"` as the value for `resolve_conflicts` +- The `kms` module used has been updated from `v1.0.2` to `v1.1.0` - no material changes other than updated to latest ### Removed -- +- N/A ### Variable and output changes 1. Removed variables: - - Self managed node groups: - - `create_security_group` - - `security_group_name` - - `security_group_use_name_prefix` - - `security_group_description` - - `security_group_rules` - - `security_group_tags` - - `cluster_security_group_id` - - `vpc_id` - - EKS managed node groups: - - `create_security_group` - - `security_group_name` - - `security_group_use_name_prefix` - - `security_group_description` - - `security_group_rules` - - `security_group_tags` - - `cluster_security_group_id` - - `vpc_id` + - Self managed node groups: + - `create_security_group` + - `security_group_name` + - `security_group_use_name_prefix` + - `security_group_description` + - `security_group_rules` + - `security_group_tags` + - `cluster_security_group_id` + - `vpc_id` + - EKS managed node groups: + - `create_security_group` + - `security_group_name` + - `security_group_use_name_prefix` + - `security_group_description` + - `security_group_rules` + - `security_group_tags` + - `cluster_security_group_id` + - `vpc_id` 2. Renamed variables: - - + - N/A 3. Added variables: - - Self managed node groups: - - - - EKS managed node groups: - - + - `provision_on_outpost`for Outposts support + - `outpost_config` for Outposts support + - `cluster_addons_timeouts` for setting a common set of timeouts for all addons (unless a specific value is provided within the addon configuration) + + - Self managed node groups: + - N/A + - EKS managed node groups: + - `use_custom_launch_template` was added to better clarify how users can switch betweeen a custom launch template or the default launch template provided by the EKS managed node group. Previously, to achieve this same functionality of using the default launch template, users needed to set `create_launch_template = false` and `launch_template_name = ""` which is not very intuitive. 4. Removed outputs: - - Self managed node groups: - - `security_group_arn` - - `security_group_id` - - EKS managed node groups: - - `security_group_arn` - - `security_group_id` + - Self managed node groups: + - `security_group_arn` + - `security_group_id` + - EKS managed node groups: + - `security_group_arn` + - `security_group_id` 5. Renamed outputs: - - + - N/A 6. Added outputs: - - + - N/A ## Upgrade Migrations -### Self Managed Node Groups +1. Before upgrading your module definition to `v19.x`, please see below for both EKS managed node group(s) and self-managed node groups and removing the node group(s) security group prior to upgrading. -#### 1. [v18.x] Remove Security Group Created by Node Group +### Self Managed Node Groups -Self managed node groups on `v18.x` by default create a security group that does not specify any rules. In `v19.x`, this security group has been removed due to the predominant lack of usage (most users rely on the cluster security group and/or the shared node security group). While still on the `v18.x` of your module definition, remove this security group from your node groups. +Self managed node groups on `v18.x` by default create a security group that does not specify any rules. In `v19.x`, this security group has been removed due to the predominant lack of usage (most users rely on the the shared node security group). While still using version `v18.x` of your module definition, remove this security group from your node groups by setting `create_security_group = false`. - If you are currently utilizing this security group, it is recommended to create an additional security group that matches the rules/settings of the security group created by the node group, and specify that security group ID in `vpc_security_group_ids`. Once this is in place, you can proceed with the original security group removal. -- For most users, the security group is not used and can be safely removed. However, deployed instances will have the security group attached and require removal of the security group. Because instances are deployed via autoscaling groups, we cannot simply remove the security group from code and have those changes reflected on the instances. Instead, we have to update the code and then force the autoscaling groups to refresh so that new instances are provisioned without the security group attached. You can utilize the `instance_refresh` parameter to force nodes to re-deploy when removing the security group since changes to launch templates automatically trigger an instance refresh. An example configuration is provided below. - - Add the following to either/or `self_managed_node_group_defaults`/`eks_managed_node_group_defaults`: +- For most users, the security group is not used and can be safely removed. However, deployed instances will have the security group attached to nodes and require the security group to be disassociated before the security group can be deleted. Because instances are deployed via autoscaling groups, we cannot simply remove the security group from code and have those changes reflected on the instances. Instead, we have to update the code and then trigger the autoscaling groups to cycle the instances deployed so that new instances are provisioned without the security group attached. You can utilize the `instance_refresh` parameter of Autoscaling groups to force nodes to re-deploy when removing the security group since changes to launch templates automatically trigger an instance refresh. An example configuration is provided below. + - Add the following to either/or `self_managed_node_group_defaults` or the individual self-managed node group definitions: ```hcl create_security_group = false instance_refresh = { strategy = "Rolling" preferences = { - min_healthy_percentage = 100 + min_healthy_percentage = 66 } } ``` - - It is recommended to use the `aws-node-termination-handler` while performing this update. Please refer to the [`irsa-autoscale-refresh` example](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/20af82846b4a1f23f3787a8c455f39c0b6164d80/examples/irsa_autoscale_refresh/charts.tf#L86) for usage. This will ensure that pods are safely evicted in a controlled manner to avoid service disruptions. - - The alternative is to manually detach the security groups from instances so that they can be deleted. Note: security groups cannot be deleted if they are still attached to an ENI. +- It is recommended to use the `aws-node-termination-handler` while performing this update. Please refer to the [`irsa-autoscale-refresh` example](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/20af82846b4a1f23f3787a8c455f39c0b6164d80/examples/irsa_autoscale_refresh/charts.tf#L86) for usage. This will ensure that pods are safely evicted in a controlled manner to avoid service disruptions. +- Once the necessary configurations are in place, you can apply the changes which will: + 1. Create a new launch template (version) without the self-managed node group security group + 2. Replace instances based on the `instance_refresh` configuration settings + 3. New instances will launch without the self-managed node group security group, and prior instances will be terminated + 4. Once the self-managed node group has cycled, the security group will be deleted #### EKS Managed Node Groups -EKS managed node groups on `v18.x` by default create a security group that does not specify any rules. In `v19.x`, this security group has been removed due to the predominant lack of usage (most users rely on the cluster security group and/or the shared node security group). However, unlike self managed node groups, EKS managed node groups by default rollout changes using a [rolling update strategy](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-update-behavior.html) that can be influenced through `update_config`. No additional changes are required for removing the the security group created by node groups. +EKS managed node groups on `v18.x` by default create a security group that does not specify any rules. In `v19.x`, this security group has been removed due to the predominant lack of usage (most users rely on the the shared node security group). While still using version `v18.x` of your module definition, remove this security group from your node groups by setting `create_security_group = false`. + +- If you are currently utilizing this security group, it is recommended to create an additional security group that matches the rules/settings of the security group created by the node group, and specify that security group ID in `vpc_security_group_ids`. Once this is in place, you can proceed with the original security group removal. +- EKS managed node groups rollout changes using a [rolling update strategy](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-update-behavior.html) that can be influenced through `update_config`. No additional changes are required for removing the the security group created by node groups (unlike self-managed node groups which should utilize the `instance_refresh` setting of Autoscaling groups). +- Once `create_security_group = false` has been set, you can apply the changes which will: + 1. Create a new launch template (version) without the EKS managed node group security group + 2. Replace instances based on the `update_config` configuration settings + 3. New instances will launch without the EKS managed node group security group, and prior instances will be terminated + 4. Once the EKS managed node group has cycled, the security group will be deleted + +2. Once the node group security group(s) have been removed, you can update your module definition to specify the `v19.x` version of the module. +3. Using the documentation provided above, update your module definition to reflect the changes in the module from `v18.x` to `v19.x`. You can utilize `terraform plan` as you go to help highlight any changes that you wish to make. See below for `terraform state mv ...` commands related to the use of `iam_role_additional_policies`. If you are not providing any values to these variables, you can skip this section. +4. Once you are satisifed with the changes and the `terraform plan` output, you can apply the changes to sync your infrastructure with the updated module definition (or vice versa). + +### Diff of Before (v18.x) vs After (v19.x) + +```diff + module "eks" { + source = "terraform-aws-modules/eks/aws" +- version = "~> 18.0" ++ version = "~> 19.0" + + cluster_name = local.name ++ cluster_endpoint_public_access = true +- cluster_endpoint_private_access = true # now the default + + cluster_addons = { +- resolve_conflicts = "OVERWRITE" # now the default ++ preserve = true ++ most_recent = true + ++ timeouts = { ++ create = "25m" ++ delete = "10m" + } + kube-proxy = {} + vpc-cni = { +- resolve_conflicts = "OVERWRITE" # now the default + } + } + + # Encryption key + create_kms_key = true +- cluster_encryption_config = { +- resources = ["secrets"] +- } ++ cluster_encryption_config = [{ ++ resources = ["secrets"] ++ }] + kms_key_deletion_window_in_days = 7 + enable_kms_key_rotation = true + +- iam_role_additional_policies = [additional = aws_iam_policy.additional.arn] ++ iam_role_additional_policies = { ++ additional = aws_iam_policy.additional.arn ++ } + + vpc_id = module.vpc.vpc_id + subnet_ids = module.vpc.private_subnets + control_plane_subnet_ids = module.vpc.intra_subnets + + # Extend cluster security group rules + cluster_security_group_additional_rules = { + egress_nodes_ephemeral_ports_tcp = { + description = "To node 1025-65535" + protocol = "tcp" + from_port = 1025 + to_port = 65535 + type = "egress" + source_node_security_group = true + } + } + + # Extend node-to-node security group rules +- node_security_group_ntp_ipv4_cidr_block = ["169.254.169.123/32"] # now the default + node_security_group_additional_rules = { + ingress_self_all = { + description = "Node to node all ports/protocols" + protocol = "-1" + from_port = 0 + to_port = 0 + type = "ingress" + self = true + } + egress_all = { + description = "Node all egress" + protocol = "-1" + from_port = 0 + to_port = 0 + type = "egress" + cidr_blocks = ["0.0.0.0/0"] + ipv6_cidr_blocks = ["::/0"] + } + } + + # Self Managed Node Group(s) + self_managed_node_group_defaults = { + vpc_security_group_ids = [aws_security_group.additional.id] +- iam_role_additional_policies = [additional = aws_iam_policy.additional.arn] ++ iam_role_additional_policies = { ++ additional = aws_iam_policy.additional.arn ++ } + } + + self_managed_node_groups = { + spot = { + instance_type = "m5.large" + instance_market_options = { + market_type = "spot" + } + + pre_bootstrap_user_data = <<-EOT + echo "foo" + export FOO=bar + EOT + + bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'" + + post_bootstrap_user_data = <<-EOT + cd /tmp + sudo yum install -y https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_amd64/amazon-ssm-agent.rpm + sudo systemctl enable amazon-ssm-agent + sudo systemctl start amazon-ssm-agent + EOT + +- create_security_group = true +- security_group_name = "eks-managed-node-group-complete-example" +- security_group_use_name_prefix = false +- security_group_description = "EKS managed node group complete example security group" +- security_group_rules = {} +- security_group_tags = {} + } + } + + # EKS Managed Node Group(s) + eks_managed_node_group_defaults = { + ami_type = "AL2_x86_64" + instance_types = ["m6i.large", "m5.large", "m5n.large", "m5zn.large"] + + attach_cluster_primary_security_group = true + vpc_security_group_ids = [aws_security_group.additional.id] +- iam_role_additional_policies = [additional = aws_iam_policy.additional.arn] ++ iam_role_additional_policies = { ++ additional = aws_iam_policy.additional.arn ++ } + } + + eks_managed_node_groups = { + blue = {} + green = { + min_size = 1 + max_size = 10 + desired_size = 1 + + instance_types = ["t3.large"] + capacity_type = "SPOT" + labels = { + Environment = "test" + GithubRepo = "terraform-aws-eks" + GithubOrg = "terraform-aws-modules" + } + + taints = { + dedicated = { + key = "dedicated" + value = "gpuGroup" + effect = "NO_SCHEDULE" + } + } + + update_config = { + max_unavailable_percentage = 33 # or set `max_unavailable` + } + +- create_security_group = true +- security_group_name = "eks-managed-node-group-complete-example" +- security_group_use_name_prefix = false +- security_group_description = "EKS managed node group complete example security group" +- security_group_rules = {} +- security_group_tags = {} + + tags = { + ExtraTag = "example" + } + } + } + + # Fargate Profile(s) + fargate_profile_defaults = { +- iam_role_additional_policies = [additional = aws_iam_policy.additional.arn] ++ iam_role_additional_policies = { ++ additional = aws_iam_policy.additional.arn ++ } + } + + fargate_profiles = { + default = { + name = "default" + selectors = [ + { + namespace = "kube-system" + labels = { + k8s-app = "kube-dns" + } + }, + { + namespace = "default" + } + ] + + tags = { + Owner = "test" + } + + timeouts = { + create = "20m" + delete = "20m" + } + } + } + + # OIDC Identity provider + cluster_identity_providers = { + sts = { + client_id = "sts.amazonaws.com" + } + } + + # aws-auth configmap + manage_aws_auth_configmap = true + + aws_auth_node_iam_role_arns_non_windows = [ + module.eks_managed_node_group.iam_role_arn, + module.self_managed_node_group.iam_role_arn, + ] + aws_auth_fargate_profile_pod_execution_role_arns = [ + module.fargate_profile.fargate_profile_pod_execution_role_arn + ] + + aws_auth_roles = [ + { + rolearn = "arn:aws:iam::66666666666:role/role1" + username = "role1" + groups = ["system:masters"] + }, + ] + + aws_auth_users = [ + { + userarn = "arn:aws:iam::66666666666:user/user1" + username = "user1" + groups = ["system:masters"] + }, + { + userarn = "arn:aws:iam::66666666666:user/user2" + username = "user2" + groups = ["system:masters"] + }, + ] + + aws_auth_accounts = [ + "777777777777", + "888888888888", + ] + + tags = local.tags +} +``` + +## Terraform State Moves + +The following Terraform state move commands are optional but recommended if you are providing additional IAM policies that are to be attached to IAM roles created by this module (cluster IAM role, node group IAM role, Fargate profile IAM role). Because the resources affected are `aws_iam_role_policy_attachment`, in theory you could get away with simply applying the configuration and letting Terraform detach and re-attach the policies. However, during this brief period of update, you could experience permission failures as the policy is detached and re-attached and therefore the state move route is recommended. + +Where `""` is specified, this should be replaced with the full ARN of the policy, and `""` should be replaced with the key used in the `iam_role_additional_policies` map for the associated policy. For example, if you have the following`v19.x` configuration: + +```hcl + ... + # This is demonstrating the cluster IAM role addtional policies + iam_role_additional_policies = { + additional = aws_iam_policy.additional.arn + } + ... +``` + +The associated state move command would look similar to (albeit with your correct policy ARN): + +```sh +terraform state mv 'module.eks.aws_iam_role_policy_attachment.this["arn:aws:iam::111111111111:policy/ex-complete-additional"]' 'module.eks.aws_iam_role_policy_attachment.additional["additional"]' +``` + +If you are not providing any additional IAM policies, no actions are required. + +### Cluster IAM Role + +Repeat for each policy provided in `iam_role_additional_policies`: + +```sh +terraform state mv 'module.eks.aws_iam_role_policy_attachment.this[""]' 'module.eks.aws_iam_role_policy_attachment.additional[""]' +``` + +### EKS Managed Node Group IAM Role + +Where `""` is the key used in the `eks_managed_node_groups` map for the associated node group. Repeat for each policy provided in `iam_role_additional_policies` in either/or `eks_managed_node_group_defaults` or the individual node group definitions: + +```sh +terraform state mv 'module.eks.module.eks_managed_node_group[""].aws_iam_role_policy_attachment.this[""]' 'module.eks.module.eks_managed_node_group[""].aws_iam_role_policy_attachment.additional[""]' +``` + +### Self-Managed Node Group IAM Role + +Where `""` is the key used in the `self_managed_node_groups` map for the associated node group. Repeat for each policy provided in `iam_role_additional_policies` in either/or `self_managed_node_group_defaults` or the individual node group definitions: + +```sh +terraform state mv 'module.eks.module.self_managed_node_group[""].aws_iam_role_policy_attachment.this[""]' 'module.eks.module.self_managed_node_group[""].aws_iam_role_policy_attachment.additional[""]' +``` + +### Fargate Profile IAM Role + +Where `""` is the key used in the `fargate_profiles` map for the associated profile. Repeat for each policy provided in `iam_role_additional_policies` in either/or `fargate_profile_defaults` or the individual profile definitions: + +```sh +terraform state mv 'module.eks.module.fargate_profile[""].aws_iam_role_policy_attachment.this[""]' 'module.eks.module.fargate_profile[""].aws_iam_role_policy_attachment.additional[""]' +``` diff --git a/examples/complete/main.tf b/examples/complete/main.tf index 2bc0b11762..8d891d463f 100644 --- a/examples/complete/main.tf +++ b/examples/complete/main.tf @@ -48,11 +48,19 @@ module "eks" { cluster_addons = { coredns = { - resolve_conflicts = "OVERWRITE" + preserve = true + most_recent = true + + timeouts = { + create = "25m" + delete = "10m" + } + } + kube-proxy = { + most_recent = true } - kube-proxy = {} vpc-cni = { - resolve_conflicts = "OVERWRITE" + most_recent = true } } @@ -112,6 +120,13 @@ module "eks" { iam_role_additional_policies = { additional = aws_iam_policy.additional.arn } + + instance_refresh = { + strategy = "Rolling" + preferences = { + min_healthy_percentage = 66 + } + } } self_managed_node_groups = { diff --git a/examples/eks_managed_node_group/main.tf b/examples/eks_managed_node_group/main.tf index a7d9a9f4dc..e492c5eedc 100644 --- a/examples/eks_managed_node_group/main.tf +++ b/examples/eks_managed_node_group/main.tf @@ -56,11 +56,14 @@ module "eks" { cluster_addons = { coredns = { - resolve_conflicts = "OVERWRITE" + preserve = true + most_recent = true + } + kube-proxy = { + most_recent = true } - kube-proxy = {} vpc-cni = { - resolve_conflicts = "OVERWRITE" + most_recent = true service_account_role_arn = module.vpc_cni_irsa.iam_role_arn } } diff --git a/examples/fargate_profile/main.tf b/examples/fargate_profile/main.tf index 53213b4df4..e11b7feca1 100644 --- a/examples/fargate_profile/main.tf +++ b/examples/fargate_profile/main.tf @@ -32,11 +32,14 @@ module "eks" { cluster_addons = { # Note: https://docs.aws.amazon.com/eks/latest/userguide/fargate-getting-started.html#fargate-gs-coredns coredns = { - resolve_conflicts = "OVERWRITE" + preserve = true + most_recent = true + } + kube-proxy = { + most_recent = true } - kube-proxy = {} vpc-cni = { - resolve_conflicts = "OVERWRITE" + most_recent = true } } diff --git a/examples/self_managed_node_group/main.tf b/examples/self_managed_node_group/main.tf index 01eaea44fa..1131150d41 100644 --- a/examples/self_managed_node_group/main.tf +++ b/examples/self_managed_node_group/main.tf @@ -45,11 +45,14 @@ module "eks" { cluster_addons = { coredns = { - resolve_conflicts = "OVERWRITE" + preserve = true + most_recent = true + } + kube-proxy = { + most_recent = true } - kube-proxy = {} vpc-cni = { - resolve_conflicts = "OVERWRITE" + most_recent = true } } diff --git a/main.tf b/main.tf index 988d44473e..3ec3f5f04f 100644 --- a/main.tf +++ b/main.tf @@ -367,13 +367,13 @@ resource "aws_eks_addon" "this" { addon_version = try(each.value.addon_version, data.aws_eks_addon_version.this[each.key].version) preserve = try(each.value.preserve, null) - resolve_conflicts = try(each.value.resolve_conflicts, null) + resolve_conflicts = try(each.value.resolve_conflicts, "OVERWRITE") service_account_role_arn = try(each.value.service_account_role_arn, null) timeouts { - create = try(var.cluster_addons_timeouts.create, null) - update = try(var.cluster_addons_timeouts.update, null) - delete = try(var.cluster_addons_timeouts.delete, null) + create = try(each.value.timeouts.create, var.cluster_addons_timeouts.create, null) + update = try(each.value.timeouts.update, var.cluster_addons_timeouts.update, null) + delete = try(each.value.timeouts.delete, var.cluster_addons_timeouts.delete, null) } depends_on = [ @@ -389,7 +389,7 @@ data "aws_eks_addon_version" "this" { for_each = { for k, v in var.cluster_addons : k => v if local.create } addon_name = try(each.value.name, each.key) - kubernetes_version = aws_eks_cluster.this[0].version + kubernetes_version = coalesce(var.cluster_version, aws_eks_cluster.this[0].version) most_recent = try(each.value.most_recent, null) } From ed0c336d1c52c757800c96bb9a88e2fe5f358558 Mon Sep 17 00:00:00 2001 From: Bryant Biggs Date: Tue, 18 Oct 2022 10:13:14 -0400 Subject: [PATCH 17/33] fix: Remove all references of `aws_default_tags` to avoid update conflicts; this is the responsibility of the provider --- README.md | 1 - main.tf | 4 +--- modules/self-managed-node-group/README.md | 2 -- modules/self-managed-node-group/main.tf | 3 +-- modules/self-managed-node-group/variables.tf | 6 ------ 5 files changed, 2 insertions(+), 14 deletions(-) diff --git a/README.md b/README.md index 4366ba5d51..1d339d8abd 100644 --- a/README.md +++ b/README.md @@ -263,7 +263,6 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | [kubernetes_config_map.aws_auth](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/config_map) | resource | | [kubernetes_config_map_v1_data.aws_auth](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/config_map_v1_data) | resource | | [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | -| [aws_default_tags.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/default_tags) | data source | | [aws_eks_addon_version.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_addon_version) | data source | | [aws_iam_policy_document.assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | | [aws_iam_policy_document.cni_ipv6_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | diff --git a/main.tf b/main.tf index 3d82afef15..311d0b8239 100644 --- a/main.tf +++ b/main.tf @@ -1,6 +1,5 @@ data "aws_partition" "current" {} data "aws_caller_identity" "current" {} -data "aws_default_tags" "current" {} locals { create = var.create && var.putin_khuylo @@ -82,8 +81,7 @@ resource "aws_ec2_tag" "cluster_primary_security_group" { # This should not affect the name of the cluster primary security group # Ref: https://github.com/terraform-aws-modules/terraform-aws-eks/pull/2006 # Ref: https://github.com/terraform-aws-modules/terraform-aws-eks/pull/2008 - # `aws_default_tags` is merged in to "dedupe" tags and stabilize tag updates - for_each = { for k, v in merge(var.tags, var.cluster_tags, data.aws_default_tags.current.tags) : + for_each = { for k, v in merge(var.tags, var.cluster_tags) : k => v if local.create && k != "Name" && var.create_cluster_primary_security_group_tags } diff --git a/modules/self-managed-node-group/README.md b/modules/self-managed-node-group/README.md index 0a3d1778aa..6034b93c2e 100644 --- a/modules/self-managed-node-group/README.md +++ b/modules/self-managed-node-group/README.md @@ -70,7 +70,6 @@ module "self_managed_node_group" { | [aws_launch_template.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource | | [aws_ami.eks_default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source | | [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | -| [aws_default_tags.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/default_tags) | data source | | [aws_iam_policy_document.assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | | [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source | @@ -159,7 +158,6 @@ module "self_managed_node_group" { | [target\_group\_arns](#input\_target\_group\_arns) | A set of `aws_alb_target_group` ARNs, for use with Application or Network Load Balancing | `list(string)` | `[]` | no | | [termination\_policies](#input\_termination\_policies) | A list of policies to decide how the instances in the Auto Scaling Group should be terminated. The allowed values are `OldestInstance`, `NewestInstance`, `OldestLaunchConfiguration`, `ClosestToNextInstanceHour`, `OldestLaunchTemplate`, `AllocationStrategy`, `Default` | `list(string)` | `[]` | no | | [update\_launch\_template\_default\_version](#input\_update\_launch\_template\_default\_version) | Whether to update Default Version each update. Conflicts with `launch_template_default_version` | `bool` | `true` | no | -| [use\_default\_tags](#input\_use\_default\_tags) | Enables/disables the use of provider default tags in the tag\_specifications of the Auto Scaling group | `bool` | `false` | no | | [use\_mixed\_instances\_policy](#input\_use\_mixed\_instances\_policy) | Determines whether to use a mixed instances policy in the autoscaling group or not | `bool` | `false` | no | | [use\_name\_prefix](#input\_use\_name\_prefix) | Determines whether to use `name` as is or create a unique name beginning with the `name` as the prefix | `bool` | `true` | no | | [user\_data\_template\_path](#input\_user\_data\_template\_path) | Path to a local, custom user data template file to use when rendering user data | `string` | `""` | no | diff --git a/modules/self-managed-node-group/main.tf b/modules/self-managed-node-group/main.tf index ecfc29123a..a1d9eaf675 100644 --- a/modules/self-managed-node-group/main.tf +++ b/modules/self-managed-node-group/main.tf @@ -1,6 +1,5 @@ data "aws_partition" "current" {} data "aws_caller_identity" "current" {} -data "aws_default_tags" "current" {} data "aws_ami" "eks_default" { count = var.create ? 1 : 0 @@ -386,7 +385,7 @@ resource "aws_autoscaling_group" "this" { "kubernetes.io/cluster/${var.cluster_name}" = "owned" "k8s.io/cluster/${var.cluster_name}" = "owned" }, - var.use_default_tags ? merge(data.aws_default_tags.current.tags, var.tags) : var.tags + var.tags ) content { diff --git a/modules/self-managed-node-group/variables.tf b/modules/self-managed-node-group/variables.tf index eb7ec029fe..71cbdb7ab0 100644 --- a/modules/self-managed-node-group/variables.tf +++ b/modules/self-managed-node-group/variables.tf @@ -458,12 +458,6 @@ variable "delete_timeout" { default = null } -variable "use_default_tags" { - description = "Enables/disables the use of provider default tags in the tag_specifications of the Auto Scaling group" - type = bool - default = false -} - variable "autoscaling_group_tags" { description = "A map of additional tags to add to the autoscaling group created. Tags are applied to the autoscaling group only and are NOT propagated to instances" type = map(string) From 4f651e1a388e260e3d3138c11d2b83c54518b2eb Mon Sep 17 00:00:00 2001 From: Bryant Biggs Date: Tue, 18 Oct 2022 10:20:21 -0400 Subject: [PATCH 18/33] feat: Add support for `service_ipv6_cidr` within the EKS cluster network settings --- README.md | 5 +++-- main.tf | 1 + modules/eks-managed-node-group/README.md | 4 ++-- modules/eks-managed-node-group/versions.tf | 2 +- modules/fargate-profile/README.md | 4 ++-- modules/fargate-profile/versions.tf | 2 +- modules/self-managed-node-group/README.md | 4 ++-- modules/self-managed-node-group/versions.tf | 2 +- variables.tf | 6 ++++++ versions.tf | 2 +- 10 files changed, 20 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index 1d339d8abd..debbc40453 100644 --- a/README.md +++ b/README.md @@ -219,7 +219,7 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0 | -| [aws](#requirement\_aws) | >= 4.32 | +| [aws](#requirement\_aws) | >= 4.34 | | [kubernetes](#requirement\_kubernetes) | >= 2.10 | | [tls](#requirement\_tls) | >= 3.0 | @@ -227,7 +227,7 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 4.32 | +| [aws](#provider\_aws) | >= 4.34 | | [kubernetes](#provider\_kubernetes) | >= 2.10 | | [tls](#provider\_tls) | >= 3.0 | @@ -306,6 +306,7 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | [cluster\_security\_group\_tags](#input\_cluster\_security\_group\_tags) | A map of additional tags to add to the cluster security group created | `map(string)` | `{}` | no | | [cluster\_security\_group\_use\_name\_prefix](#input\_cluster\_security\_group\_use\_name\_prefix) | Determines whether cluster security group name (`cluster_security_group_name`) is used as a prefix | `bool` | `true` | no | | [cluster\_service\_ipv4\_cidr](#input\_cluster\_service\_ipv4\_cidr) | The CIDR block to assign Kubernetes service IP addresses from. If you don't specify a block, Kubernetes assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks | `string` | `null` | no | +| [cluster\_service\_ipv6\_cidr](#input\_cluster\_service\_ipv6\_cidr) | The CIDR block to assign Kubernetes pod and service IP addresses from if `ipv6` was specified when the cluster was created. Kubernetes assigns service addresses from the unique local address range (fc00::/7) because you can't specify a custom IPv6 CIDR block when you create the cluster | `string` | `null` | no | | [cluster\_tags](#input\_cluster\_tags) | A map of additional tags to add to the cluster | `map(string)` | `{}` | no | | [cluster\_timeouts](#input\_cluster\_timeouts) | Create, update, and delete timeout configurations for the cluster | `map(string)` | `{}` | no | | [cluster\_version](#input\_cluster\_version) | Kubernetes `.` version to use for the EKS cluster (i.e.: `1.23`) | `string` | `null` | no | diff --git a/main.tf b/main.tf index 311d0b8239..b1966df153 100644 --- a/main.tf +++ b/main.tf @@ -34,6 +34,7 @@ resource "aws_eks_cluster" "this" { content { ip_family = var.cluster_ip_family service_ipv4_cidr = var.cluster_service_ipv4_cidr + service_ipv6_cidr = var.cluster_service_ipv6_cidr } } diff --git a/modules/eks-managed-node-group/README.md b/modules/eks-managed-node-group/README.md index 941b99916d..847c5da00b 100644 --- a/modules/eks-managed-node-group/README.md +++ b/modules/eks-managed-node-group/README.md @@ -65,13 +65,13 @@ module "eks_managed_node_group" { | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0 | -| [aws](#requirement\_aws) | >= 4.32 | +| [aws](#requirement\_aws) | >= 4.34 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 4.32 | +| [aws](#provider\_aws) | >= 4.34 | ## Modules diff --git a/modules/eks-managed-node-group/versions.tf b/modules/eks-managed-node-group/versions.tf index b12a7dd00b..5f058b4c11 100644 --- a/modules/eks-managed-node-group/versions.tf +++ b/modules/eks-managed-node-group/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = ">= 4.32" + version = ">= 4.34" } } } diff --git a/modules/fargate-profile/README.md b/modules/fargate-profile/README.md index e0dba7cb79..23aa059b96 100644 --- a/modules/fargate-profile/README.md +++ b/modules/fargate-profile/README.md @@ -29,13 +29,13 @@ module "fargate_profile" { | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0 | -| [aws](#requirement\_aws) | >= 4.32 | +| [aws](#requirement\_aws) | >= 4.34 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 4.32 | +| [aws](#provider\_aws) | >= 4.34 | ## Modules diff --git a/modules/fargate-profile/versions.tf b/modules/fargate-profile/versions.tf index b12a7dd00b..5f058b4c11 100644 --- a/modules/fargate-profile/versions.tf +++ b/modules/fargate-profile/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = ">= 4.32" + version = ">= 4.34" } } } diff --git a/modules/self-managed-node-group/README.md b/modules/self-managed-node-group/README.md index 6034b93c2e..a945de76bd 100644 --- a/modules/self-managed-node-group/README.md +++ b/modules/self-managed-node-group/README.md @@ -43,13 +43,13 @@ module "self_managed_node_group" { | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0 | -| [aws](#requirement\_aws) | >= 4.32 | +| [aws](#requirement\_aws) | >= 4.34 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 4.32 | +| [aws](#provider\_aws) | >= 4.34 | ## Modules diff --git a/modules/self-managed-node-group/versions.tf b/modules/self-managed-node-group/versions.tf index b12a7dd00b..5f058b4c11 100644 --- a/modules/self-managed-node-group/versions.tf +++ b/modules/self-managed-node-group/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = ">= 4.32" + version = ">= 4.34" } } } diff --git a/variables.tf b/variables.tf index 555f07bec3..75a8eaa516 100644 --- a/variables.tf +++ b/variables.tf @@ -86,6 +86,12 @@ variable "cluster_service_ipv4_cidr" { default = null } +variable "cluster_service_ipv6_cidr" { + description = "The CIDR block to assign Kubernetes pod and service IP addresses from if `ipv6` was specified when the cluster was created. Kubernetes assigns service addresses from the unique local address range (fc00::/7) because you can't specify a custom IPv6 CIDR block when you create the cluster" + type = string + default = null +} + variable "provision_on_outpost" { description = "Determines whether cluster should be provisioned on an AWS Outpost" type = bool diff --git a/versions.tf b/versions.tf index 02eb2b4080..b63c1c5946 100644 --- a/versions.tf +++ b/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = ">= 4.32" + version = ">= 4.34" } tls = { source = "hashicorp/tls" From 07385695c9d43804f35357eabd2a9b41a7977bca Mon Sep 17 00:00:00 2001 From: Bryant Biggs Date: Tue, 18 Oct 2022 10:32:25 -0400 Subject: [PATCH 19/33] chore: Update upgrade documentation and fix CI checks --- docs/UPGRADE-19.0.md | 7 +++++-- examples/complete/main.tf | 2 -- node_groups.tf | 1 - 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/UPGRADE-19.0.md b/docs/UPGRADE-19.0.md index 46f5be344d..f517146d29 100644 --- a/docs/UPGRADE-19.0.md +++ b/docs/UPGRADE-19.0.md @@ -4,7 +4,7 @@ Please consult the `examples` directory for reference example configurations. If ## List of backwards incompatible changes -- Minimum supported version of Terraform AWS provider updated to v4.32 to support latest features provided via the resources utilized. +- Minimum supported version of Terraform AWS provider updated to v4.34 to support latest features provided via the resources utilized. - Minimum supported version of Terraform updated to v1.0 - Individual security group created per EKS managed node group or self managed node group has been removed. This configuration went mostly un-used and would often cause confusion ("Why is there an empty security group attached to my nodes?"). This functionality can easily be replicated by user's providing one or more externally created security groups to attach to nodes launched from the node group. - Previously, `var.iam_role_additional_policies` (one for each of the following: cluster IAM role, EKS managed node group IAM role, self-managed node group IAM role, and Fargate Profile IAM role) accepted a list of strings. This worked well for policies that already existed but failed for policies being created at the same time as the cluster due to the well known issue of unkown values used in a `for_each` loop. To rectify this issue in `v19.x`, two changes were made: @@ -35,7 +35,9 @@ Please consult the `examples` directory for reference example configurations. If ### Removed -- N/A +- Remove all references of `aws_default_tags` to avoid update conflicts; this is the responsibility of the provider and should be handled at the provider level + - https://github.com/terraform-aws-modules/terraform-aws-eks/issues?q=is%3Aissue+default_tags+is%3Aclosed + - https://github.com/terraform-aws-modules/terraform-aws-eks/pulls?q=is%3Apr+default_tags+is%3Aclosed ### Variable and output changes @@ -69,6 +71,7 @@ Please consult the `examples` directory for reference example configurations. If - `provision_on_outpost`for Outposts support - `outpost_config` for Outposts support - `cluster_addons_timeouts` for setting a common set of timeouts for all addons (unless a specific value is provided within the addon configuration) + - `service_ipv6_cidr` for setting the IPv6 CIDR block for the Kubernetes service addresses - Self managed node groups: - N/A diff --git a/examples/complete/main.tf b/examples/complete/main.tf index 8d891d463f..46e2ee26dc 100644 --- a/examples/complete/main.tf +++ b/examples/complete/main.tf @@ -308,8 +308,6 @@ module "self_managed_node_group" { module.eks.cluster_security_group_id, ] - use_default_tags = true - tags = merge(local.tags, { Separate = "self-managed-node-group" }) } diff --git a/node_groups.tf b/node_groups.tf index d8572f575d..e2782b3b81 100644 --- a/node_groups.tf +++ b/node_groups.tf @@ -383,7 +383,6 @@ module "self_managed_node_group" { schedules = try(each.value.schedules, var.self_managed_node_group_defaults.schedules, {}) delete_timeout = try(each.value.delete_timeout, var.self_managed_node_group_defaults.delete_timeout, null) - use_default_tags = try(each.value.use_default_tags, var.self_managed_node_group_defaults.use_default_tags, false) autoscaling_group_tags = try(each.value.autoscaling_group_tags, var.self_managed_node_group_defaults.autoscaling_group_tags, {}) # User data From 87ced51b7e5a222b201d18f1005c9632d8ae6997 Mon Sep 17 00:00:00 2001 From: Anton Babenko Date: Tue, 25 Oct 2022 13:51:07 +0200 Subject: [PATCH 20/33] Fixed IAM policy cluster_encryption when create_kms_key is false --- main.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/main.tf b/main.tf index b1966df153..51f9d91bf2 100644 --- a/main.tf +++ b/main.tf @@ -347,7 +347,7 @@ resource "aws_iam_policy" "cluster_encryption" { "kms:DescribeKey", ] Effect = "Allow" - Resource = var.create_kms_key ? [module.kms.key_arn] : [for config in var.cluster_encryption_config : config.provider_key_arn] + Resource = var.create_kms_key ? module.kms.key_arn : var.cluster_encryption_config.provider_key_arn }, ] }) From 5fa0e3389924be5cf1e3c5f3108b2ec9b0a089ca Mon Sep 17 00:00:00 2001 From: Bryant Biggs Date: Tue, 25 Oct 2022 12:16:12 -0400 Subject: [PATCH 21/33] chore: Updates from PR review feedback --- docs/UPGRADE-19.0.md | 8 ++++---- main.tf | 8 ++++---- modules/eks-managed-node-group/main.tf | 2 +- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/docs/UPGRADE-19.0.md b/docs/UPGRADE-19.0.md index f517146d29..2777667132 100644 --- a/docs/UPGRADE-19.0.md +++ b/docs/UPGRADE-19.0.md @@ -167,12 +167,12 @@ EKS managed node groups on `v18.x` by default create a security group that does # Encryption key create_kms_key = true -- cluster_encryption_config = { +- cluster_encryption_config = [{ - resources = ["secrets"] -- } -+ cluster_encryption_config = [{ +- }] ++ cluster_encryption_config = { + resources = ["secrets"] -+ }] ++ } kms_key_deletion_window_in_days = 7 enable_kms_key_rotation = true diff --git a/main.tf b/main.tf index 51f9d91bf2..eba77b9748 100644 --- a/main.tf +++ b/main.tf @@ -304,10 +304,10 @@ resource "aws_iam_role" "this" { # Policies attached ref https://docs.aws.amazon.com/eks/latest/userguide/service_IAM_role.html resource "aws_iam_role_policy_attachment" "this" { - for_each = { for k, v in toset(compact([ - "${local.iam_role_policy_prefix}/AmazonEKSClusterPolicy", - "${local.iam_role_policy_prefix}/AmazonEKSVPCResourceController", - ])) : k => v if local.create_iam_role } + for_each = { for k, v in { + AmazonEKSClusterPolicy = "${local.iam_role_policy_prefix}/AmazonEKSClusterPolicy", + AmazonEKSVPCResourceController = "${local.iam_role_policy_prefix}/AmazonEKSVPCResourceController", + } : k => v if local.create_iam_role } policy_arn = each.value role = aws_iam_role.this[0].name diff --git a/modules/eks-managed-node-group/main.tf b/modules/eks-managed-node-group/main.tf index 26fd4b2d09..4f51107de0 100644 --- a/modules/eks-managed-node-group/main.tf +++ b/modules/eks-managed-node-group/main.tf @@ -283,7 +283,7 @@ resource "aws_eks_node_group" "this" { version = var.ami_id != "" ? null : var.cluster_version capacity_type = var.capacity_type - disk_size = var.use_custom_launch_template ? null : var.disk_size # if using LT, set disk size on LT or else it will error here + disk_size = var.use_custom_launch_template ? null : var.disk_size # if using a custom LT, set disk size on custom LT or else it will error here force_update_version = var.force_update_version instance_types = var.instance_types labels = var.labels From e782ce5d8f03f05345171eacd8a2934146af4f22 Mon Sep 17 00:00:00 2001 From: Bryant Biggs Date: Tue, 25 Oct 2022 14:54:20 -0400 Subject: [PATCH 22/33] docs: Fix v18 diff format for additional IAM policies --- docs/UPGRADE-19.0.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/UPGRADE-19.0.md b/docs/UPGRADE-19.0.md index 2777667132..9f23407f72 100644 --- a/docs/UPGRADE-19.0.md +++ b/docs/UPGRADE-19.0.md @@ -176,7 +176,7 @@ EKS managed node groups on `v18.x` by default create a security group that does kms_key_deletion_window_in_days = 7 enable_kms_key_rotation = true -- iam_role_additional_policies = [additional = aws_iam_policy.additional.arn] +- iam_role_additional_policies = [aws_iam_policy.additional.arn] + iam_role_additional_policies = { + additional = aws_iam_policy.additional.arn + } @@ -222,7 +222,7 @@ EKS managed node groups on `v18.x` by default create a security group that does # Self Managed Node Group(s) self_managed_node_group_defaults = { vpc_security_group_ids = [aws_security_group.additional.id] -- iam_role_additional_policies = [additional = aws_iam_policy.additional.arn] +- iam_role_additional_policies = [aws_iam_policy.additional.arn] + iam_role_additional_policies = { + additional = aws_iam_policy.additional.arn + } @@ -265,7 +265,7 @@ EKS managed node groups on `v18.x` by default create a security group that does attach_cluster_primary_security_group = true vpc_security_group_ids = [aws_security_group.additional.id] -- iam_role_additional_policies = [additional = aws_iam_policy.additional.arn] +- iam_role_additional_policies = [aws_iam_policy.additional.arn] + iam_role_additional_policies = { + additional = aws_iam_policy.additional.arn + } @@ -313,7 +313,7 @@ EKS managed node groups on `v18.x` by default create a security group that does # Fargate Profile(s) fargate_profile_defaults = { -- iam_role_additional_policies = [additional = aws_iam_policy.additional.arn] +- iam_role_additional_policies = [aws_iam_policy.additional.arn] + iam_role_additional_policies = { + additional = aws_iam_policy.additional.arn + } From cea7595042fa21a23db89971082e8c965866e0c8 Mon Sep 17 00:00:00 2001 From: Bryant Biggs Date: Wed, 26 Oct 2022 14:40:28 -0400 Subject: [PATCH 23/33] feat: Update `self-managed-node-group` to use latest settings provided by AWS provider --- docs/UPGRADE-19.0.md | 8 +- modules/self-managed-node-group/README.md | 13 +- modules/self-managed-node-group/main.tf | 564 ++++++++++++++----- modules/self-managed-node-group/variables.tf | 50 +- node_groups.tf | 39 +- 5 files changed, 497 insertions(+), 177 deletions(-) diff --git a/docs/UPGRADE-19.0.md b/docs/UPGRADE-19.0.md index 9f23407f72..20751a3ee3 100644 --- a/docs/UPGRADE-19.0.md +++ b/docs/UPGRADE-19.0.md @@ -74,7 +74,13 @@ Please consult the `examples` directory for reference example configurations. If - `service_ipv6_cidr` for setting the IPv6 CIDR block for the Kubernetes service addresses - Self managed node groups: - - N/A + - `launch_template_id` for use when using an existing/externally created launch template (Ref: https://github.com/terraform-aws-modules/terraform-aws-autoscaling/pull/204) + - `maintenance_options` + - `private_dns_name_options` + - `instance_requirements` + - `context` + - `default_instance_warmup` + - `force_delete_warm_pool` - EKS managed node groups: - `use_custom_launch_template` was added to better clarify how users can switch betweeen a custom launch template or the default launch template provided by the EKS managed node group. Previously, to achieve this same functionality of using the default launch template, users needed to set `create_launch_template = false` and `launch_template_name = ""` which is not very intuitive. diff --git a/modules/self-managed-node-group/README.md b/modules/self-managed-node-group/README.md index a945de76bd..411342e498 100644 --- a/modules/self-managed-node-group/README.md +++ b/modules/self-managed-node-group/README.md @@ -90,6 +90,7 @@ module "self_managed_node_group" { | [cluster\_name](#input\_cluster\_name) | Name of associated EKS cluster | `string` | `""` | no | | [cluster\_primary\_security\_group\_id](#input\_cluster\_primary\_security\_group\_id) | The ID of the EKS cluster primary security group to associate with the instance(s). This is the security group that is automatically created by the EKS service | `string` | `null` | no | | [cluster\_version](#input\_cluster\_version) | Kubernetes cluster version - used to lookup default AMI ID if one is not provided | `string` | `null` | no | +| [context](#input\_context) | Reserved | `string` | `null` | no | | [cpu\_options](#input\_cpu\_options) | The CPU options for the instance | `map(string)` | `{}` | no | | [create](#input\_create) | Determines whether to create self managed node group or not | `bool` | `true` | no | | [create\_autoscaling\_group](#input\_create\_autoscaling\_group) | Determines whether to create autoscaling group or not | `bool` | `true` | no | @@ -98,16 +99,18 @@ module "self_managed_node_group" { | [create\_schedule](#input\_create\_schedule) | Determines whether to create autoscaling group schedule or not | `bool` | `true` | no | | [credit\_specification](#input\_credit\_specification) | Customize the credit specification of the instance | `map(string)` | `{}` | no | | [default\_cooldown](#input\_default\_cooldown) | The amount of time, in seconds, after a scaling activity completes before another scaling activity can start | `number` | `null` | no | +| [default\_instance\_warmup](#input\_default\_instance\_warmup) | Amount of time, in seconds, until a newly launched instance can contribute to the Amazon CloudWatch metrics. This delay lets an instance finish initializing before Amazon EC2 Auto Scaling aggregates instance metrics, resulting in more reliable usage data | `number` | `null` | no | | [delete\_timeout](#input\_delete\_timeout) | Delete timeout to wait for destroying autoscaling group | `string` | `null` | no | | [desired\_size](#input\_desired\_size) | The number of Amazon EC2 instances that should be running in the autoscaling group | `number` | `1` | no | | [disable\_api\_termination](#input\_disable\_api\_termination) | If true, enables EC2 instance termination protection | `bool` | `null` | no | | [ebs\_optimized](#input\_ebs\_optimized) | If true, the launched EC2 instance will be EBS-optimized | `bool` | `null` | no | -| [elastic\_gpu\_specifications](#input\_elastic\_gpu\_specifications) | The elastic GPU to attach to the instance | `map(string)` | `{}` | no | +| [elastic\_gpu\_specifications](#input\_elastic\_gpu\_specifications) | The elastic GPU to attach to the instance | `any` | `{}` | no | | [elastic\_inference\_accelerator](#input\_elastic\_inference\_accelerator) | Configuration block containing an Elastic Inference Accelerator to attach to the instance | `map(string)` | `{}` | no | | [enable\_monitoring](#input\_enable\_monitoring) | Enables/disables detailed monitoring | `bool` | `true` | no | | [enabled\_metrics](#input\_enabled\_metrics) | A list of metrics to collect. The allowed values are `GroupDesiredCapacity`, `GroupInServiceCapacity`, `GroupPendingCapacity`, `GroupMinSize`, `GroupMaxSize`, `GroupInServiceInstances`, `GroupPendingInstances`, `GroupStandbyInstances`, `GroupStandbyCapacity`, `GroupTerminatingCapacity`, `GroupTerminatingInstances`, `GroupTotalCapacity`, `GroupTotalInstances` | `list(string)` | `[]` | no | | [enclave\_options](#input\_enclave\_options) | Enable Nitro Enclaves on launched instances | `map(string)` | `{}` | no | | [force\_delete](#input\_force\_delete) | Allows deleting the Auto Scaling Group without waiting for all instances in the pool to terminate. You can force an Auto Scaling Group to delete even if it's in the process of scaling a resource. Normally, Terraform drains all the instances before deleting the group. This bypasses that behavior and potentially leaves resources dangling | `bool` | `null` | no | +| [force\_delete\_warm\_pool](#input\_force\_delete\_warm\_pool) | Allows deleting the Auto Scaling Group without waiting for all instances in the warm pool to terminate | `bool` | `null` | no | | [health\_check\_grace\_period](#input\_health\_check\_grace\_period) | Time (in seconds) after instance comes into service before checking health | `number` | `null` | no | | [health\_check\_type](#input\_health\_check\_type) | `EC2` or `ELB`. Controls how health checking is done | `string` | `null` | no | | [hibernation\_options](#input\_hibernation\_options) | The hibernation options for the instance | `map(string)` | `{}` | no | @@ -124,16 +127,19 @@ module "self_managed_node_group" { | [instance\_initiated\_shutdown\_behavior](#input\_instance\_initiated\_shutdown\_behavior) | Shutdown behavior for the instance. Can be `stop` or `terminate`. (Default: `stop`) | `string` | `null` | no | | [instance\_market\_options](#input\_instance\_market\_options) | The market (purchasing) option for the instance | `any` | `{}` | no | | [instance\_refresh](#input\_instance\_refresh) | If this block is configured, start an Instance Refresh when this Auto Scaling Group is updated | `any` | `{}` | no | +| [instance\_requirements](#input\_instance\_requirements) | The attribute requirements for the type of instance. If present then `instance_type` cannot be present | `any` | `{}` | no | | [instance\_type](#input\_instance\_type) | The type of the instance to launch | `string` | `""` | no | | [kernel\_id](#input\_kernel\_id) | The kernel ID | `string` | `null` | no | | [key\_name](#input\_key\_name) | The key name that should be used for the instance | `string` | `null` | no | | [launch\_template\_default\_version](#input\_launch\_template\_default\_version) | Default Version of the launch template | `string` | `null` | no | | [launch\_template\_description](#input\_launch\_template\_description) | Description of the launch template | `string` | `null` | no | -| [launch\_template\_name](#input\_launch\_template\_name) | Launch template name - either to be created (`var.create_launch_template` = `true`) or existing (`var.create_launch_template` = `false`) | `string` | `null` | no | +| [launch\_template\_id](#input\_launch\_template\_id) | The ID of an existing launch template to use. Required when `create_launch_template` = `false` | `string` | `""` | no | +| [launch\_template\_name](#input\_launch\_template\_name) | Name of launch template to be created | `string` | `null` | no | | [launch\_template\_tags](#input\_launch\_template\_tags) | A map of additional tags to add to the tag\_specifications of launch template created | `map(string)` | `{}` | no | | [launch\_template\_use\_name\_prefix](#input\_launch\_template\_use\_name\_prefix) | Determines whether to use `launch_template_name` as is or create a unique name beginning with the `launch_template_name` as the prefix | `bool` | `true` | no | | [launch\_template\_version](#input\_launch\_template\_version) | Launch template version. Can be version number, `$Latest`, or `$Default` | `string` | `null` | no | -| [license\_specifications](#input\_license\_specifications) | A list of license specifications to associate with | `map(string)` | `{}` | no | +| [license\_specifications](#input\_license\_specifications) | A map of license specifications to associate with | `any` | `{}` | no | +| [maintenance\_options](#input\_maintenance\_options) | The maintenance options for the instance | `any` | `{}` | no | | [max\_instance\_lifetime](#input\_max\_instance\_lifetime) | The maximum amount of time, in seconds, that an instance can be in service, values must be either equal to 0 or between 604800 and 31536000 seconds | `number` | `null` | no | | [max\_size](#input\_max\_size) | The maximum size of the autoscaling group | `number` | `3` | no | | [metadata\_options](#input\_metadata\_options) | Customize the metadata options for the instance | `map(string)` |
{
"http_endpoint": "enabled",
"http_put_response_hop_limit": 2,
"http_tokens": "required"
}
| no | @@ -148,6 +154,7 @@ module "self_managed_node_group" { | [platform](#input\_platform) | Identifies if the OS platform is `bottlerocket`, `linux`, or `windows` based | `string` | `"linux"` | no | | [post\_bootstrap\_user\_data](#input\_post\_bootstrap\_user\_data) | User data that is appended to the user data script after of the EKS bootstrap script. Not used when `platform` = `bottlerocket` | `string` | `""` | no | | [pre\_bootstrap\_user\_data](#input\_pre\_bootstrap\_user\_data) | User data that is injected into the user data script ahead of the EKS bootstrap script. Not used when `platform` = `bottlerocket` | `string` | `""` | no | +| [private\_dns\_name\_options](#input\_private\_dns\_name\_options) | The options for the instance hostname. The default values are inherited from the subnet | `map(string)` | `{}` | no | | [protect\_from\_scale\_in](#input\_protect\_from\_scale\_in) | Allows setting instance protection. The autoscaling group will not select instances with this setting for termination during scale in events. | `bool` | `false` | no | | [ram\_disk\_id](#input\_ram\_disk\_id) | The ID of the ram disk | `string` | `null` | no | | [schedules](#input\_schedules) | Map of autoscaling group schedule to create | `map(any)` | `{}` | no | diff --git a/modules/self-managed-node-group/main.tf b/modules/self-managed-node-group/main.tf index a1d9eaf675..f938743903 100644 --- a/modules/self-managed-node-group/main.tf +++ b/modules/self-managed-node-group/main.tf @@ -40,64 +40,51 @@ module "user_data" { ################################################################################ locals { - launch_template_name_int = coalesce(var.launch_template_name, "${var.name}-node-group") - security_group_ids = compact(concat([var.cluster_primary_security_group_id], var.vpc_security_group_ids)) + launch_template_name = coalesce(var.launch_template_name, "${var.name}-node-group") + security_group_ids = compact(concat([var.cluster_primary_security_group_id], var.vpc_security_group_ids)) } resource "aws_launch_template" "this" { count = var.create && var.create_launch_template ? 1 : 0 - name = var.launch_template_use_name_prefix ? null : local.launch_template_name_int - name_prefix = var.launch_template_use_name_prefix ? "${local.launch_template_name_int}-" : null - description = var.launch_template_description - - ebs_optimized = var.ebs_optimized - image_id = coalesce(var.ami_id, data.aws_ami.eks_default[0].image_id) - instance_type = var.instance_type - key_name = var.key_name - user_data = module.user_data.user_data - - vpc_security_group_ids = length(var.network_interfaces) > 0 ? [] : local.security_group_ids - - default_version = var.launch_template_default_version - update_default_version = var.update_launch_template_default_version - disable_api_termination = var.disable_api_termination - instance_initiated_shutdown_behavior = var.instance_initiated_shutdown_behavior - kernel_id = var.kernel_id - ram_disk_id = var.ram_disk_id - dynamic "block_device_mappings" { for_each = var.block_device_mappings + content { - device_name = block_device_mappings.value.device_name - no_device = lookup(block_device_mappings.value, "no_device", null) - virtual_name = lookup(block_device_mappings.value, "virtual_name", null) + device_name = try(block_device_mappings.value.device_name, null) dynamic "ebs" { - for_each = flatten([lookup(block_device_mappings.value, "ebs", [])]) + for_each = try([block_device_mappings.value.ebs], []) + content { - delete_on_termination = lookup(ebs.value, "delete_on_termination", null) - encrypted = lookup(ebs.value, "encrypted", null) - kms_key_id = lookup(ebs.value, "kms_key_id", null) - iops = lookup(ebs.value, "iops", null) - throughput = lookup(ebs.value, "throughput", null) - snapshot_id = lookup(ebs.value, "snapshot_id", null) - volume_size = lookup(ebs.value, "volume_size", null) - volume_type = lookup(ebs.value, "volume_type", null) + delete_on_termination = try(ebs.value.delete_on_termination, null) + encrypted = try(ebs.value.encrypted, null) + iops = try(ebs.value.iops, null) + kms_key_id = try(ebs.value.kms_key_id, null) + snapshot_id = try(ebs.value.snapshot_id, null) + throughput = try(ebs.value.throughput, null) + volume_size = try(ebs.value.volume_size, null) + volume_type = try(ebs.value.volume_type, null) } } + + no_device = try(block_device_mappings.value.no_device, null) + virtual_name = try(block_device_mappings.value.virtual_name, null) } } dynamic "capacity_reservation_specification" { for_each = length(var.capacity_reservation_specification) > 0 ? [var.capacity_reservation_specification] : [] + content { - capacity_reservation_preference = lookup(capacity_reservation_specification.value, "capacity_reservation_preference", null) + capacity_reservation_preference = try(capacity_reservation_specification.value.capacity_reservation_preference, null) dynamic "capacity_reservation_target" { for_each = try([capacity_reservation_specification.value.capacity_reservation_target], []) + content { - capacity_reservation_id = lookup(capacity_reservation_target.value, "capacity_reservation_id", null) + capacity_reservation_id = try(capacity_reservation_target.value.capacity_reservation_id, null) + capacity_reservation_resource_group_arn = try(capacity_reservation_target.value.capacity_reservation_resource_group_arn, null) } } } @@ -105,21 +92,29 @@ resource "aws_launch_template" "this" { dynamic "cpu_options" { for_each = length(var.cpu_options) > 0 ? [var.cpu_options] : [] + content { - core_count = cpu_options.value.core_count - threads_per_core = cpu_options.value.threads_per_core + core_count = try(cpu_options.value.core_count, null) + threads_per_core = try(cpu_options.value.threads_per_core, null) } } dynamic "credit_specification" { for_each = length(var.credit_specification) > 0 ? [var.credit_specification] : [] + content { - cpu_credits = credit_specification.value.cpu_credits + cpu_credits = try(credit_specification.value.cpu_credits, null) } } + default_version = var.launch_template_default_version + description = var.launch_template_description + disable_api_termination = var.disable_api_termination + ebs_optimized = var.ebs_optimized + dynamic "elastic_gpu_specifications" { - for_each = length(var.elastic_gpu_specifications) > 0 ? [var.elastic_gpu_specifications] : [] + for_each = var.elastic_gpu_specifications + content { type = elastic_gpu_specifications.value.type } @@ -127,6 +122,7 @@ resource "aws_launch_template" "this" { dynamic "elastic_inference_accelerator" { for_each = length(var.elastic_inference_accelerator) > 0 ? [var.elastic_inference_accelerator] : [] + content { type = elastic_inference_accelerator.value.type } @@ -134,6 +130,7 @@ resource "aws_launch_template" "this" { dynamic "enclave_options" { for_each = length(var.enclave_options) > 0 ? [var.enclave_options] : [] + content { enabled = enclave_options.value.enabled } @@ -141,6 +138,7 @@ resource "aws_launch_template" "this" { dynamic "hibernation_options" { for_each = length(var.hibernation_options) > 0 ? [var.hibernation_options] : [] + content { configured = hibernation_options.value.configured } @@ -150,93 +148,235 @@ resource "aws_launch_template" "this" { arn = var.create_iam_instance_profile ? aws_iam_instance_profile.this[0].arn : var.iam_instance_profile_arn } + image_id = coalesce(var.ami_id, data.aws_ami.eks_default[0].image_id) + instance_initiated_shutdown_behavior = var.instance_initiated_shutdown_behavior + dynamic "instance_market_options" { for_each = length(var.instance_market_options) > 0 ? [var.instance_market_options] : [] + content { - market_type = instance_market_options.value.market_type + market_type = try(instance_market_options.value.market_type, null) dynamic "spot_options" { - for_each = length(lookup(instance_market_options.value, "spot_options", {})) > 0 ? [instance_market_options.value.spot_options] : [] + for_each = try([instance_market_options.value.spot_options], []) + content { - block_duration_minutes = lookup(spot_options.value, "block_duration_minutes", null) - instance_interruption_behavior = lookup(spot_options.value, "instance_interruption_behavior", null) - max_price = lookup(spot_options.value, "max_price", null) - spot_instance_type = lookup(spot_options.value, "spot_instance_type", null) - valid_until = lookup(spot_options.value, "valid_until", null) + block_duration_minutes = try(spot_options.value.block_duration_minutes, null) + instance_interruption_behavior = try(spot_options.value.instance_interruption_behavior, null) + max_price = try(spot_options.value.max_price, null) + spot_instance_type = try(spot_options.value.spot_instance_type, null) + valid_until = try(spot_options.value.valid_until, null) } } } } + dynamic "instance_requirements" { + for_each = length(var.instance_requirements) > 0 ? [var.instance_requirements] : [] + + content { + + dynamic "accelerator_count" { + for_each = try([instance_requirements.value.accelerator_count], []) + + content { + max = try(accelerator_count.value.max, null) + min = try(accelerator_count.value.min, null) + } + } + + accelerator_manufacturers = try(instance_requirements.value.accelerator_manufacturers, []) + accelerator_names = try(instance_requirements.value.accelerator_names, []) + + dynamic "accelerator_total_memory_mib" { + for_each = try([instance_requirements.value.accelerator_total_memory_mib], []) + + content { + max = try(accelerator_total_memory_mib.value.max, null) + min = try(accelerator_total_memory_mib.value.min, null) + } + } + + accelerator_types = try(instance_requirements.value.accelerator_types, []) + bare_metal = try(instance_requirements.value.bare_metal, null) + + dynamic "baseline_ebs_bandwidth_mbps" { + for_each = try([instance_requirements.value.baseline_ebs_bandwidth_mbps], []) + + content { + max = try(baseline_ebs_bandwidth_mbps.value.max, null) + min = try(baseline_ebs_bandwidth_mbps.value.min, null) + } + } + + burstable_performance = try(instance_requirements.value.burstable_performance, null) + cpu_manufacturers = try(instance_requirements.value.cpu_manufacturers, []) + excluded_instance_types = try(instance_requirements.value.excluded_instance_types, []) + instance_generations = try(instance_requirements.value.instance_generations, []) + local_storage = try(instance_requirements.value.local_storage, null) + local_storage_types = try(instance_requirements.value.local_storage_types, []) + + dynamic "memory_gib_per_vcpu" { + for_each = try([instance_requirements.value.memory_gib_per_vcpu], []) + + content { + max = try(memory_gib_per_vcpu.value.max, null) + min = try(memory_gib_per_vcpu.value.min, null) + } + } + + dynamic "memory_mib" { + for_each = [instance_requirements.value.memory_mib] + + content { + max = try(memory_mib.value.max, null) + min = memory_mib.value.min + } + } + + dynamic "network_interface_count" { + for_each = try([instance_requirements.value.network_interface_count], []) + + content { + max = try(network_interface_count.value.max, null) + min = try(network_interface_count.value.min, null) + } + } + + on_demand_max_price_percentage_over_lowest_price = try(instance_requirements.value.on_demand_max_price_percentage_over_lowest_price, null) + require_hibernate_support = try(instance_requirements.value.require_hibernate_support, null) + spot_max_price_percentage_over_lowest_price = try(instance_requirements.value.spot_max_price_percentage_over_lowest_price, null) + + dynamic "total_local_storage_gb" { + for_each = try([instance_requirements.value.total_local_storage_gb], []) + + content { + max = try(total_local_storage_gb.value.max, null) + min = try(total_local_storage_gb.value.min, null) + } + } + + dynamic "vcpu_count" { + for_each = [instance_requirements.value.vcpu_count] + + content { + max = try(vcpu_count.value.max, null) + min = vcpu_count.value.min + } + } + } + } + + instance_type = var.instance_type + kernel_id = var.kernel_id + key_name = var.key_name + dynamic "license_specification" { - for_each = length(var.license_specifications) > 0 ? [var.license_specifications] : [] + for_each = length(var.license_specifications) > 0 ? var.license_specifications : {} + content { license_configuration_arn = license_specifications.value.license_configuration_arn } } + dynamic "maintenance_options" { + for_each = length(var.maintenance_options) > 0 ? [var.maintenance_options] : [] + + content { + auto_recovery = try(maintenance_options.value.auto_recovery, null) + } + } + dynamic "metadata_options" { for_each = length(var.metadata_options) > 0 ? [var.metadata_options] : [] + content { - http_endpoint = lookup(metadata_options.value, "http_endpoint", null) - http_tokens = lookup(metadata_options.value, "http_tokens", null) - http_put_response_hop_limit = lookup(metadata_options.value, "http_put_response_hop_limit", null) - http_protocol_ipv6 = lookup(metadata_options.value, "http_protocol_ipv6", null) - instance_metadata_tags = lookup(metadata_options.value, "instance_metadata_tags", null) + http_endpoint = try(metadata_options.value.http_endpoint, null) + http_protocol_ipv6 = try(metadata_options.value.http_protocol_ipv6, null) + http_put_response_hop_limit = try(metadata_options.value.http_put_response_hop_limit, null) + http_tokens = try(metadata_options.value.http_tokens, null) + instance_metadata_tags = try(metadata_options.value.instance_metadata_tags, null) } } dynamic "monitoring" { - for_each = var.enable_monitoring != null ? [1] : [] + for_each = var.enable_monitoring ? [1] : [] + content { enabled = var.enable_monitoring } } + name = var.launch_template_use_name_prefix ? null : local.launch_template_name + name_prefix = var.launch_template_use_name_prefix ? "${local.launch_template_name}-" : null + dynamic "network_interfaces" { for_each = var.network_interfaces content { - associate_carrier_ip_address = lookup(network_interfaces.value, "associate_carrier_ip_address", null) - associate_public_ip_address = lookup(network_interfaces.value, "associate_public_ip_address", null) - delete_on_termination = lookup(network_interfaces.value, "delete_on_termination", null) - description = lookup(network_interfaces.value, "description", null) - device_index = lookup(network_interfaces.value, "device_index", null) - interface_type = lookup(network_interfaces.value, "interface_type", null) + associate_carrier_ip_address = try(network_interfaces.value.associate_carrier_ip_address, null) + associate_public_ip_address = try(network_interfaces.value.associate_public_ip_address, null) + delete_on_termination = try(network_interfaces.value.delete_on_termination, null) + description = try(network_interfaces.value.description, null) + device_index = try(network_interfaces.value.device_index, null) + interface_type = try(network_interfaces.value.interface_type, null) + ipv4_address_count = try(network_interfaces.value.ipv4_address_count, null) ipv4_addresses = try(network_interfaces.value.ipv4_addresses, []) - ipv4_address_count = lookup(network_interfaces.value, "ipv4_address_count", null) + ipv4_prefix_count = try(network_interfaces.value.ipv4_prefix_count, null) + ipv4_prefixes = try(network_interfaces.value.ipv4_prefixes, null) + ipv6_address_count = try(network_interfaces.value.ipv6_address_count, null) ipv6_addresses = try(network_interfaces.value.ipv6_addresses, []) - ipv6_address_count = lookup(network_interfaces.value, "ipv6_address_count", null) - network_interface_id = lookup(network_interfaces.value, "network_interface_id", null) - private_ip_address = lookup(network_interfaces.value, "private_ip_address", null) - security_groups = compact(concat(try(network_interfaces.value.security_groups, []), local.security_group_ids)) - subnet_id = lookup(network_interfaces.value, "subnet_id", null) + ipv6_prefix_count = try(network_interfaces.value.ipv6_prefix_count, null) + ipv6_prefixes = try(network_interfaces.value.ipv6_prefixes, []) + network_card_index = try(network_interfaces.value.network_card_index, null) + network_interface_id = try(network_interfaces.value.network_interface_id, null) + private_ip_address = try(network_interfaces.value.private_ip_address, null) + # Ref: https://github.com/hashicorp/terraform-provider-aws/issues/4570 + security_groups = compact(concat(try(network_interfaces.value.security_groups, []), local.security_group_ids)) + subnet_id = try(network_interfaces.value.subnet_id, null) } } dynamic "placement" { for_each = length(var.placement) > 0 ? [var.placement] : [] + content { - affinity = lookup(placement.value, "affinity", null) - availability_zone = lookup(placement.value, "availability_zone", null) - group_name = lookup(placement.value, "group_name", null) - host_id = lookup(placement.value, "host_id", null) - spread_domain = lookup(placement.value, "spread_domain", null) - tenancy = lookup(placement.value, "tenancy", null) - partition_number = lookup(placement.value, "partition_number", null) + affinity = try(placement.value.affinity, null) + availability_zone = try(placement.value.availability_zone, null) + group_name = try(placement.value.group_name, null) + host_id = try(placement.value.host_id, null) + host_resource_group_arn = try(placement.value.host_resource_group_arn, null) + partition_number = try(placement.value.partition_number, null) + spread_domain = try(placement.value.spread_domain, null) + tenancy = try(placement.value.tenancy, null) } } + dynamic "private_dns_name_options" { + for_each = length(var.private_dns_name_options) > 0 ? [var.private_dns_name_options] : [] + + content { + enable_resource_name_dns_aaaa_record = try(private_dns_name_options.value.enable_resource_name_dns_aaaa_record, null) + enable_resource_name_dns_a_record = try(private_dns_name_options.value.enable_resource_name_dns_a_record, null) + hostname_type = try(private_dns_name_options.value.hostname_type, null) + } + } + + ram_disk_id = var.ram_disk_id + dynamic "tag_specifications" { for_each = toset(["instance", "volume", "network-interface"]) + content { resource_type = tag_specifications.key tags = merge(var.tags, { Name = var.name }, var.launch_template_tags) } } - lifecycle { - create_before_destroy = true - } + update_default_version = var.update_launch_template_default_version + user_data = module.user_data.user_data + vpc_security_group_ids = length(var.network_interfaces) > 0 ? [] : local.security_group_ids + + tags = var.tags # Prevent premature access of policies by pods that # require permissions on create/destroy that depend on nodes @@ -244,7 +384,9 @@ resource "aws_launch_template" "this" { aws_iam_role_policy_attachment.this, ] - tags = var.tags + lifecycle { + create_before_destroy = true + } } ################################################################################ @@ -252,7 +394,7 @@ resource "aws_launch_template" "this" { ################################################################################ locals { - launch_template_name = try(aws_launch_template.this[0].name, var.launch_template_name) + launch_template_id = var.create_launch_template ? aws_launch_template.this[0].id : var.launch_template_id # Change order to allow users to set version priority before using defaults launch_template_version = coalesce(var.launch_template_version, try(aws_launch_template.this[0].default_version, "$Default")) } @@ -260,123 +402,215 @@ locals { resource "aws_autoscaling_group" "this" { count = var.create && var.create_autoscaling_group ? 1 : 0 - name = var.use_name_prefix ? null : var.name - name_prefix = var.use_name_prefix ? "${var.name}-" : null - - dynamic "launch_template" { - for_each = var.use_mixed_instances_policy ? [] : [1] - - content { - name = local.launch_template_name - version = local.launch_template_version - } - } - - availability_zones = var.availability_zones - vpc_zone_identifier = var.subnet_ids - - min_size = var.min_size - max_size = var.max_size - desired_capacity = var.desired_size + availability_zones = var.availability_zones capacity_rebalance = var.capacity_rebalance - min_elb_capacity = var.min_elb_capacity - wait_for_elb_capacity = var.wait_for_elb_capacity - wait_for_capacity_timeout = var.wait_for_capacity_timeout + context = var.context default_cooldown = var.default_cooldown - protect_from_scale_in = var.protect_from_scale_in - - target_group_arns = var.target_group_arns - placement_group = var.placement_group - health_check_type = var.health_check_type + default_instance_warmup = var.default_instance_warmup + desired_capacity = var.desired_size + enabled_metrics = var.enabled_metrics + force_delete = var.force_delete + force_delete_warm_pool = var.force_delete_warm_pool health_check_grace_period = var.health_check_grace_period - - force_delete = var.force_delete - termination_policies = var.termination_policies - suspended_processes = var.suspended_processes - max_instance_lifetime = var.max_instance_lifetime - - enabled_metrics = var.enabled_metrics - metrics_granularity = var.metrics_granularity - service_linked_role_arn = var.service_linked_role_arn + health_check_type = var.health_check_type dynamic "initial_lifecycle_hook" { for_each = var.initial_lifecycle_hooks + content { - name = initial_lifecycle_hook.value.name - default_result = lookup(initial_lifecycle_hook.value, "default_result", null) - heartbeat_timeout = lookup(initial_lifecycle_hook.value, "heartbeat_timeout", null) + default_result = try(initial_lifecycle_hook.value.default_result, null) + heartbeat_timeout = try(initial_lifecycle_hook.value.heartbeat_timeout, null) lifecycle_transition = initial_lifecycle_hook.value.lifecycle_transition - notification_metadata = lookup(initial_lifecycle_hook.value, "notification_metadata", null) - notification_target_arn = lookup(initial_lifecycle_hook.value, "notification_target_arn", null) - role_arn = lookup(initial_lifecycle_hook.value, "role_arn", null) + name = initial_lifecycle_hook.value.name + notification_metadata = try(initial_lifecycle_hook.value.notification_metadata, null) + notification_target_arn = try(initial_lifecycle_hook.value.notification_target_arn, null) + role_arn = try(initial_lifecycle_hook.value.role_arn, null) } } dynamic "instance_refresh" { for_each = length(var.instance_refresh) > 0 ? [var.instance_refresh] : [] - content { - strategy = instance_refresh.value.strategy - triggers = lookup(instance_refresh.value, "triggers", null) + content { dynamic "preferences" { - for_each = length(lookup(instance_refresh.value, "preferences", {})) > 0 ? [instance_refresh.value.preferences] : [] + for_each = try([instance_refresh.value.preferences], []) + content { - instance_warmup = lookup(preferences.value, "instance_warmup", null) - min_healthy_percentage = lookup(preferences.value, "min_healthy_percentage", null) - checkpoint_delay = lookup(preferences.value, "checkpoint_delay", null) - checkpoint_percentages = lookup(preferences.value, "checkpoint_percentages", null) + checkpoint_delay = try(preferences.value.checkpoint_delay, null) + checkpoint_percentages = try(preferences.value.checkpoint_percentages, null) + instance_warmup = try(preferences.value.instance_warmup, null) + min_healthy_percentage = try(preferences.value.min_healthy_percentage, null) + skip_matching = try(preferences.value.skip_matching, null) } } + + strategy = instance_refresh.value.strategy + triggers = try(instance_refresh.value.triggers, null) } } + dynamic "launch_template" { + for_each = var.use_mixed_instances_policy ? [] : [1] + + content { + id = local.launch_template_id + version = local.launch_template_version + } + } + + max_instance_lifetime = var.max_instance_lifetime + max_size = var.max_size + metrics_granularity = var.metrics_granularity + min_elb_capacity = var.min_elb_capacity + min_size = var.min_size + dynamic "mixed_instances_policy" { for_each = var.use_mixed_instances_policy ? [var.mixed_instances_policy] : [] + content { dynamic "instances_distribution" { for_each = try([mixed_instances_policy.value.instances_distribution], []) + content { - on_demand_allocation_strategy = lookup(instances_distribution.value, "on_demand_allocation_strategy", null) - on_demand_base_capacity = lookup(instances_distribution.value, "on_demand_base_capacity", null) - on_demand_percentage_above_base_capacity = lookup(instances_distribution.value, "on_demand_percentage_above_base_capacity", null) - spot_allocation_strategy = lookup(instances_distribution.value, "spot_allocation_strategy", null) - spot_instance_pools = lookup(instances_distribution.value, "spot_instance_pools", null) - spot_max_price = lookup(instances_distribution.value, "spot_max_price", null) + on_demand_allocation_strategy = try(instances_distribution.value.on_demand_allocation_strategy, null) + on_demand_base_capacity = try(instances_distribution.value.on_demand_base_capacity, null) + on_demand_percentage_above_base_capacity = try(instances_distribution.value.on_demand_percentage_above_base_capacity, null) + spot_allocation_strategy = try(instances_distribution.value.spot_allocation_strategy, null) + spot_instance_pools = try(instances_distribution.value.spot_instance_pools, null) + spot_max_price = try(instances_distribution.value.spot_max_price, null) } } launch_template { launch_template_specification { - launch_template_name = local.launch_template_name - version = local.launch_template_version + launch_template_id = local.launch_template_id + version = local.launch_template_version } dynamic "override" { for_each = try(mixed_instances_policy.value.override, []) + content { - instance_type = lookup(override.value, "instance_type", null) - weighted_capacity = lookup(override.value, "weighted_capacity", null) + dynamic "instance_requirements" { + for_each = try([override.value.instance_requirements], []) + + content { + + dynamic "accelerator_count" { + for_each = try([instance_requirements.value.accelerator_count], []) + + content { + max = try(accelerator_count.value.max, null) + min = try(accelerator_count.value.min, null) + } + } + + accelerator_manufacturers = try(instance_requirements.value.accelerator_manufacturers, []) + accelerator_names = try(instance_requirements.value.accelerator_names, []) + + dynamic "accelerator_total_memory_mib" { + for_each = try([instance_requirements.value.accelerator_total_memory_mib], []) + + content { + max = try(accelerator_total_memory_mib.value.max, null) + min = try(accelerator_total_memory_mib.value.min, null) + } + } + + accelerator_types = try(instance_requirements.value.accelerator_types, []) + bare_metal = try(instance_requirements.value.bare_metal, null) + + dynamic "baseline_ebs_bandwidth_mbps" { + for_each = try([instance_requirements.value.baseline_ebs_bandwidth_mbps], []) + + content { + max = try(baseline_ebs_bandwidth_mbps.value.max, null) + min = try(baseline_ebs_bandwidth_mbps.value.min, null) + } + } + + burstable_performance = try(instance_requirements.value.burstable_performance, null) + cpu_manufacturers = try(instance_requirements.value.cpu_manufacturers, []) + excluded_instance_types = try(instance_requirements.value.excluded_instance_types, []) + instance_generations = try(instance_requirements.value.instance_generations, []) + local_storage = try(instance_requirements.value.local_storage, null) + local_storage_types = try(instance_requirements.value.local_storage_types, []) + + dynamic "memory_gib_per_vcpu" { + for_each = try([instance_requirements.value.memory_gib_per_vcpu], []) + + content { + max = try(memory_gib_per_vcpu.value.max, null) + min = try(memory_gib_per_vcpu.value.min, null) + } + } + + dynamic "memory_mib" { + for_each = [instance_requirements.value.memory_mib] + + content { + max = try(memory_mib.value.max, null) + min = memory_mib.value.min + } + } + + dynamic "network_interface_count" { + for_each = try([instance_requirements.value.network_interface_count], []) + + content { + max = try(network_interface_count.value.max, null) + min = try(network_interface_count.value.min, null) + } + } + + on_demand_max_price_percentage_over_lowest_price = try(instance_requirements.value.on_demand_max_price_percentage_over_lowest_price, null) + require_hibernate_support = try(instance_requirements.value.require_hibernate_support, null) + spot_max_price_percentage_over_lowest_price = try(instance_requirements.value.spot_max_price_percentage_over_lowest_price, null) + + dynamic "total_local_storage_gb" { + for_each = try([instance_requirements.value.total_local_storage_gb], []) + + content { + max = try(total_local_storage_gb.value.max, null) + min = try(total_local_storage_gb.value.min, null) + } + } + + dynamic "vcpu_count" { + for_each = [instance_requirements.value.vcpu_count] + + content { + max = try(vcpu_count.value.max, null) + min = vcpu_count.value.min + } + } + } + } + + instance_type = try(override.value.instance_type, null) dynamic "launch_template_specification" { - for_each = length(lookup(override.value, "launch_template_specification", {})) > 0 ? override.value.launch_template_specification : [] + for_each = try([override.value.launch_template_specification], []) + content { - launch_template_id = lookup(launch_template_specification.value, "launch_template_id", null) + launch_template_id = try(launch_template_specification.value.launch_template_id, null) + version = try(launch_template_specification.value.version, null) } } + + weighted_capacity = try(override.value.weighted_capacity, null) } } } } } - dynamic "warm_pool" { - for_each = length(var.warm_pool) > 0 ? [var.warm_pool] : [] - content { - pool_state = lookup(warm_pool.value, "pool_state", null) - min_size = lookup(warm_pool.value, "min_size", null) - max_group_prepared_capacity = lookup(warm_pool.value, "max_group_prepared_capacity", null) - } - } + name = var.use_name_prefix ? null : var.name + name_prefix = var.use_name_prefix ? "${var.name}-" : null + placement_group = var.placement_group + protect_from_scale_in = var.protect_from_scale_in + service_linked_role_arn = var.service_linked_role_arn + suspended_processes = var.suspended_processes dynamic "tag" { for_each = merge( @@ -405,6 +639,30 @@ resource "aws_autoscaling_group" "this" { } } + target_group_arns = var.target_group_arns + termination_policies = var.termination_policies + vpc_zone_identifier = var.subnet_ids + wait_for_capacity_timeout = var.wait_for_capacity_timeout + wait_for_elb_capacity = var.wait_for_elb_capacity + + dynamic "warm_pool" { + for_each = length(var.warm_pool) > 0 ? [var.warm_pool] : [] + + content { + dynamic "instance_reuse_policy" { + for_each = try([warm_pool.value.instance_reuse_policy], []) + + content { + reuse_on_scale_in = try(instance_reuse_policy.value.reuse_on_scale_in, null) + } + } + + max_group_prepared_capacity = try(warm_pool.value.max_group_prepared_capacity, null) + min_size = try(warm_pool.value.min_size, null) + pool_state = try(warm_pool.value.pool_state, null) + } + } + timeouts { delete = var.delete_timeout } @@ -427,16 +685,16 @@ resource "aws_autoscaling_schedule" "this" { scheduled_action_name = each.key autoscaling_group_name = aws_autoscaling_group.this[0].name - min_size = lookup(each.value, "min_size", null) - max_size = lookup(each.value, "max_size", null) - desired_capacity = lookup(each.value, "desired_size", null) - start_time = lookup(each.value, "start_time", null) - end_time = lookup(each.value, "end_time", null) - time_zone = lookup(each.value, "time_zone", null) + min_size = try(each.value.min_size, null) + max_size = try(each.value.max_size, null) + desired_capacity = try(each.value.desired_size, null) + start_time = try(each.value.start_time, null) + end_time = try(each.value.end_time, null) + time_zone = try(each.value.time_zone, null) # [Minute] [Hour] [Day_of_Month] [Month_of_Year] [Day_of_Week] # Cron examples: https://crontab.guru/examples.html - recurrence = lookup(each.value, "recurrence", null) + recurrence = try(each.value.recurrence, null) } ################################################################################ diff --git a/modules/self-managed-node-group/variables.tf b/modules/self-managed-node-group/variables.tf index 71cbdb7ab0..b3f7f6ba39 100644 --- a/modules/self-managed-node-group/variables.tf +++ b/modules/self-managed-node-group/variables.tf @@ -72,8 +72,14 @@ variable "create_launch_template" { default = true } +variable "launch_template_id" { + description = "The ID of an existing launch template to use. Required when `create_launch_template` = `false`" + type = string + default = "" +} + variable "launch_template_name" { - description = "Launch template name - either to be created (`var.create_launch_template` = `true`) or existing (`var.create_launch_template` = `false`)" + description = "Name of launch template to be created" type = string default = null } @@ -152,7 +158,7 @@ variable "credit_specification" { variable "elastic_gpu_specifications" { description = "The elastic GPU to attach to the instance" - type = map(string) + type = any default = {} } @@ -180,9 +186,15 @@ variable "instance_market_options" { default = {} } +variable "maintenance_options" { + description = "The maintenance options for the instance" + type = any + default = {} +} + variable "license_specifications" { - description = "A list of license specifications to associate with" - type = map(string) + description = "A map of license specifications to associate with" + type = any default = {} } @@ -198,6 +210,12 @@ variable "placement" { default = {} } +variable "private_dns_name_options" { + description = "The options for the instance hostname. The default values are inherited from the subnet" + type = map(string) + default = {} +} + variable "ebs_optimized" { description = "If true, the launched EC2 instance will be EBS-optimized" type = bool @@ -216,6 +234,12 @@ variable "cluster_version" { default = null } +variable "instance_requirements" { + description = "The attribute requirements for the type of instance. If present then `instance_type` cannot be present" + type = any + default = {} +} + variable "instance_type" { description = "The type of the instance to launch" type = string @@ -320,6 +344,12 @@ variable "desired_size" { default = 1 } +variable "context" { + description = "Reserved" + type = string + default = null +} + variable "capacity_rebalance" { description = "Indicates whether capacity rebalance is enabled" type = bool @@ -350,6 +380,12 @@ variable "default_cooldown" { default = null } +variable "default_instance_warmup" { + description = "Amount of time, in seconds, until a newly launched instance can contribute to the Amazon CloudWatch metrics. This delay lets an instance finish initializing before Amazon EC2 Auto Scaling aggregates instance metrics, resulting in more reliable usage data" + type = number + default = null +} + variable "protect_from_scale_in" { description = "Allows setting instance protection. The autoscaling group will not select instances with this setting for termination during scale in events." type = bool @@ -386,6 +422,12 @@ variable "force_delete" { default = null } +variable "force_delete_warm_pool" { + description = "Allows deleting the Auto Scaling Group without waiting for all instances in the warm pool to terminate" + type = bool + default = null +} + variable "termination_policies" { description = "A list of policies to decide how the instances in the Auto Scaling Group should be terminated. The allowed values are `OldestInstance`, `NewestInstance`, `OldestLaunchConfiguration`, `ClosestToNextInstanceHour`, `OldestLaunchTemplate`, `AllocationStrategy`, `Default`" type = list(string) diff --git a/node_groups.tf b/node_groups.tf index e2782b3b81..084934fb3f 100644 --- a/node_groups.tf +++ b/node_groups.tf @@ -357,17 +357,20 @@ module "self_managed_node_group" { wait_for_elb_capacity = try(each.value.wait_for_elb_capacity, var.self_managed_node_group_defaults.wait_for_elb_capacity, null) wait_for_capacity_timeout = try(each.value.wait_for_capacity_timeout, var.self_managed_node_group_defaults.wait_for_capacity_timeout, null) default_cooldown = try(each.value.default_cooldown, var.self_managed_node_group_defaults.default_cooldown, null) + default_instance_warmup = try(each.value.default_instance_warmup, var.self_managed_node_group_defaults.default_instance_warmup, null) protect_from_scale_in = try(each.value.protect_from_scale_in, var.self_managed_node_group_defaults.protect_from_scale_in, null) + context = try(each.value.context, var.self_managed_node_group_defaults.context, null) target_group_arns = try(each.value.target_group_arns, var.self_managed_node_group_defaults.target_group_arns, []) placement_group = try(each.value.placement_group, var.self_managed_node_group_defaults.placement_group, null) health_check_type = try(each.value.health_check_type, var.self_managed_node_group_defaults.health_check_type, null) health_check_grace_period = try(each.value.health_check_grace_period, var.self_managed_node_group_defaults.health_check_grace_period, null) - force_delete = try(each.value.force_delete, var.self_managed_node_group_defaults.force_delete, null) - termination_policies = try(each.value.termination_policies, var.self_managed_node_group_defaults.termination_policies, []) - suspended_processes = try(each.value.suspended_processes, var.self_managed_node_group_defaults.suspended_processes, []) - max_instance_lifetime = try(each.value.max_instance_lifetime, var.self_managed_node_group_defaults.max_instance_lifetime, null) + force_delete = try(each.value.force_delete, var.self_managed_node_group_defaults.force_delete, null) + force_delete_warm_pool = try(each.value.force_delete_warm_pool, var.self_managed_node_group_defaults.force_delete_warm_pool, null) + termination_policies = try(each.value.termination_policies, var.self_managed_node_group_defaults.termination_policies, []) + suspended_processes = try(each.value.suspended_processes, var.self_managed_node_group_defaults.suspended_processes, []) + max_instance_lifetime = try(each.value.max_instance_lifetime, var.self_managed_node_group_defaults.max_instance_lifetime, null) enabled_metrics = try(each.value.enabled_metrics, var.self_managed_node_group_defaults.enabled_metrics, []) metrics_granularity = try(each.value.metrics_granularity, var.self_managed_node_group_defaults.metrics_granularity, null) @@ -395,12 +398,15 @@ module "self_managed_node_group" { user_data_template_path = try(each.value.user_data_template_path, var.self_managed_node_group_defaults.user_data_template_path, "") # Launch Template - create_launch_template = try(each.value.create_launch_template, var.self_managed_node_group_defaults.create_launch_template, true) - launch_template_name = try(each.value.launch_template_name, var.self_managed_node_group_defaults.launch_template_name, each.key) - launch_template_use_name_prefix = try(each.value.launch_template_use_name_prefix, var.self_managed_node_group_defaults.launch_template_use_name_prefix, true) - launch_template_version = try(each.value.launch_template_version, var.self_managed_node_group_defaults.launch_template_version, null) - launch_template_description = try(each.value.launch_template_description, var.self_managed_node_group_defaults.launch_template_description, "Custom launch template for ${try(each.value.name, each.key)} self managed node group") - launch_template_tags = try(each.value.launch_template_tags, var.self_managed_node_group_defaults.launch_template_tags, {}) + create_launch_template = try(each.value.create_launch_template, var.self_managed_node_group_defaults.create_launch_template, true) + launch_template_id = try(each.value.launch_template_id, var.self_managed_node_group_defaults.launch_template_id, "") + launch_template_name = try(each.value.launch_template_name, var.self_managed_node_group_defaults.launch_template_name, each.key) + launch_template_use_name_prefix = try(each.value.launch_template_use_name_prefix, var.self_managed_node_group_defaults.launch_template_use_name_prefix, true) + launch_template_version = try(each.value.launch_template_version, var.self_managed_node_group_defaults.launch_template_version, null) + launch_template_default_version = try(each.value.launch_template_default_version, var.self_managed_node_group_defaults.launch_template_default_version, null) + update_launch_template_default_version = try(each.value.update_launch_template_default_version, var.self_managed_node_group_defaults.update_launch_template_default_version, true) + launch_template_description = try(each.value.launch_template_description, var.self_managed_node_group_defaults.launch_template_description, "Custom launch template for ${try(each.value.name, each.key)} self managed node group") + launch_template_tags = try(each.value.launch_template_tags, var.self_managed_node_group_defaults.launch_template_tags, {}) ebs_optimized = try(each.value.ebs_optimized, var.self_managed_node_group_defaults.ebs_optimized, null) ami_id = try(each.value.ami_id, var.self_managed_node_group_defaults.ami_id, "") @@ -408,12 +414,10 @@ module "self_managed_node_group" { instance_type = try(each.value.instance_type, var.self_managed_node_group_defaults.instance_type, "m6i.large") key_name = try(each.value.key_name, var.self_managed_node_group_defaults.key_name, null) - launch_template_default_version = try(each.value.launch_template_default_version, var.self_managed_node_group_defaults.launch_template_default_version, null) - update_launch_template_default_version = try(each.value.update_launch_template_default_version, var.self_managed_node_group_defaults.update_launch_template_default_version, true) - disable_api_termination = try(each.value.disable_api_termination, var.self_managed_node_group_defaults.disable_api_termination, null) - instance_initiated_shutdown_behavior = try(each.value.instance_initiated_shutdown_behavior, var.self_managed_node_group_defaults.instance_initiated_shutdown_behavior, null) - kernel_id = try(each.value.kernel_id, var.self_managed_node_group_defaults.kernel_id, null) - ram_disk_id = try(each.value.ram_disk_id, var.self_managed_node_group_defaults.ram_disk_id, null) + disable_api_termination = try(each.value.disable_api_termination, var.self_managed_node_group_defaults.disable_api_termination, null) + instance_initiated_shutdown_behavior = try(each.value.instance_initiated_shutdown_behavior, var.self_managed_node_group_defaults.instance_initiated_shutdown_behavior, null) + kernel_id = try(each.value.kernel_id, var.self_managed_node_group_defaults.kernel_id, null) + ram_disk_id = try(each.value.ram_disk_id, var.self_managed_node_group_defaults.ram_disk_id, null) block_device_mappings = try(each.value.block_device_mappings, var.self_managed_node_group_defaults.block_device_mappings, {}) capacity_reservation_specification = try(each.value.capacity_reservation_specification, var.self_managed_node_group_defaults.capacity_reservation_specification, {}) @@ -423,12 +427,15 @@ module "self_managed_node_group" { elastic_inference_accelerator = try(each.value.elastic_inference_accelerator, var.self_managed_node_group_defaults.elastic_inference_accelerator, {}) enclave_options = try(each.value.enclave_options, var.self_managed_node_group_defaults.enclave_options, {}) hibernation_options = try(each.value.hibernation_options, var.self_managed_node_group_defaults.hibernation_options, {}) + instance_requirements = try(each.value.instance_requirements, var.self_managed_node_group_defaults.instance_requirements, {}) instance_market_options = try(each.value.instance_market_options, var.self_managed_node_group_defaults.instance_market_options, {}) license_specifications = try(each.value.license_specifications, var.self_managed_node_group_defaults.license_specifications, {}) metadata_options = try(each.value.metadata_options, var.self_managed_node_group_defaults.metadata_options, local.metadata_options) enable_monitoring = try(each.value.enable_monitoring, var.self_managed_node_group_defaults.enable_monitoring, true) network_interfaces = try(each.value.network_interfaces, var.self_managed_node_group_defaults.network_interfaces, []) placement = try(each.value.placement, var.self_managed_node_group_defaults.placement, {}) + maintenance_options = try(each.value.maintenance_options, var.self_managed_node_group_defaults.maintenance_options, {}) + private_dns_name_options = try(each.value.private_dns_name_options, var.self_managed_node_group_defaults.private_dns_name_options, {}) # IAM role create_iam_instance_profile = try(each.value.create_iam_instance_profile, var.self_managed_node_group_defaults.create_iam_instance_profile, true) From 4754012ceef1758f6a7b01dddf262b15c97f8f84 Mon Sep 17 00:00:00 2001 From: Bryant Biggs Date: Wed, 26 Oct 2022 15:45:14 -0400 Subject: [PATCH 24/33] feat: Update `eks-managed-node-group` to use latest settings provided by AWS provider --- docs/UPGRADE-19.0.md | 22 +- examples/eks_managed_node_group/main.tf | 2 +- modules/eks-managed-node-group/README.md | 11 +- modules/eks-managed-node-group/main.tf | 216 ++++++++++++------- modules/eks-managed-node-group/variables.tf | 30 ++- modules/self-managed-node-group/README.md | 2 +- modules/self-managed-node-group/main.tf | 2 +- modules/self-managed-node-group/variables.tf | 7 +- node_groups.tf | 46 ++-- 9 files changed, 223 insertions(+), 115 deletions(-) diff --git a/docs/UPGRADE-19.0.md b/docs/UPGRADE-19.0.md index 20751a3ee3..7a5d5dcc52 100644 --- a/docs/UPGRADE-19.0.md +++ b/docs/UPGRADE-19.0.md @@ -32,6 +32,16 @@ Please consult the `examples` directory for reference example configurations. If - `cluster_endpoint_private_access` previously defaulted to `false` and now defaults to `true` - The addon configuration now sets `"OVERWRITE"` as the default value for `resolve_conflicts` to ease addon upgrade management. Users can opt out of this by instead setting `"NONE"` as the value for `resolve_conflicts` - The `kms` module used has been updated from `v1.0.2` to `v1.1.0` - no material changes other than updated to latest +- The default value for EKS managed node group `update_config` has been updated to the recommended `{ max_unavailable_percentage = 33 }` +- The default value for the self-managed node group `instance_refresh` has been updated to the recommended: + ```hcl + { + strategy = "Rolling" + preferences = { + min_healthy_percentage = 66 + } + } + ``` ### Removed @@ -83,7 +93,10 @@ Please consult the `examples` directory for reference example configurations. If - `force_delete_warm_pool` - EKS managed node groups: - `use_custom_launch_template` was added to better clarify how users can switch betweeen a custom launch template or the default launch template provided by the EKS managed node group. Previously, to achieve this same functionality of using the default launch template, users needed to set `create_launch_template = false` and `launch_template_name = ""` which is not very intuitive. - + - `launch_template_id` for use when using an existing/externally created launch template (Ref: https://github.com/terraform-aws-modules/terraform-aws-autoscaling/pull/204) + - `maintenance_options` + - `private_dns_name_options` + - 4. Removed outputs: - Self managed node groups: @@ -140,9 +153,10 @@ EKS managed node groups on `v18.x` by default create a security group that does 3. New instances will launch without the EKS managed node group security group, and prior instances will be terminated 4. Once the EKS managed node group has cycled, the security group will be deleted -2. Once the node group security group(s) have been removed, you can update your module definition to specify the `v19.x` version of the module. -3. Using the documentation provided above, update your module definition to reflect the changes in the module from `v18.x` to `v19.x`. You can utilize `terraform plan` as you go to help highlight any changes that you wish to make. See below for `terraform state mv ...` commands related to the use of `iam_role_additional_policies`. If you are not providing any values to these variables, you can skip this section. -4. Once you are satisifed with the changes and the `terraform plan` output, you can apply the changes to sync your infrastructure with the updated module definition (or vice versa). +2. Once the node group security group(s) have been removed, you can update your module definition to specify the `v19.x` version of the module +3. Run `terraform init -upgrade=true` to update your configuration and pull in the v19 changes +4. Using the documentation provided above, update your module definition to reflect the changes in the module from `v18.x` to `v19.x`. You can utilize `terraform plan` as you go to help highlight any changes that you wish to make. See below for `terraform state mv ...` commands related to the use of `iam_role_additional_policies`. If you are not providing any values to these variables, you can skip this section. +5. Once you are satisifed with the changes and the `terraform plan` output, you can apply the changes to sync your infrastructure with the updated module definition (or vice versa). ### Diff of Before (v18.x) vs After (v19.x) diff --git a/examples/eks_managed_node_group/main.tf b/examples/eks_managed_node_group/main.tf index e492c5eedc..23b681ac6d 100644 --- a/examples/eks_managed_node_group/main.tf +++ b/examples/eks_managed_node_group/main.tf @@ -212,7 +212,7 @@ module "eks" { # Use existing/external launch template external_lt = { create_launch_template = false - launch_template_name = aws_launch_template.external.name + launch_template_id = aws_launch_template.external.id launch_template_version = aws_launch_template.external.default_version } diff --git a/modules/eks-managed-node-group/README.md b/modules/eks-managed-node-group/README.md index 847c5da00b..699d016f11 100644 --- a/modules/eks-managed-node-group/README.md +++ b/modules/eks-managed-node-group/README.md @@ -117,7 +117,7 @@ module "eks_managed_node_group" { | [credit\_specification](#input\_credit\_specification) | Customize the credit specification of the instance | `map(string)` | `{}` | no | | [desired\_size](#input\_desired\_size) | Desired number of instances/nodes | `number` | `1` | no | | [disable\_api\_termination](#input\_disable\_api\_termination) | If true, enables EC2 instance termination protection | `bool` | `null` | no | -| [disk\_size](#input\_disk\_size) | Disk size in GiB for nodes. Defaults to `20` | `number` | `null` | no | +| [disk\_size](#input\_disk\_size) | Disk size in GiB for nodes. Defaults to `20`. Only valid when `use_custom_launch_template` = `false` | `number` | `null` | no | | [ebs\_optimized](#input\_ebs\_optimized) | If true, the launched EC2 instance(s) will be EBS-optimized | `bool` | `null` | no | | [elastic\_gpu\_specifications](#input\_elastic\_gpu\_specifications) | The elastic GPU to attach to the instance | `map(string)` | `{}` | no | | [elastic\_inference\_accelerator](#input\_elastic\_inference\_accelerator) | Configuration block containing an Elastic Inference Accelerator to attach to the instance | `map(string)` | `{}` | no | @@ -141,11 +141,13 @@ module "eks_managed_node_group" { | [labels](#input\_labels) | Key-value map of Kubernetes labels. Only labels that are applied with the EKS API are managed by this argument. Other Kubernetes labels applied to the EKS Node Group will not be managed | `map(string)` | `null` | no | | [launch\_template\_default\_version](#input\_launch\_template\_default\_version) | Default version of the launch template | `string` | `null` | no | | [launch\_template\_description](#input\_launch\_template\_description) | Description of the launch template | `string` | `null` | no | -| [launch\_template\_name](#input\_launch\_template\_name) | Launch template name - either to be created (`var.create_launch_template` = `true`) or existing (`var.create_launch_template` = `false`) | `string` | `""` | no | +| [launch\_template\_id](#input\_launch\_template\_id) | The ID of an existing launch template to use. Required when `create_launch_template` = `false` and `use_custom_launch_template` = `true` | `string` | `""` | no | +| [launch\_template\_name](#input\_launch\_template\_name) | Name of launch template to be created | `string` | `null` | no | | [launch\_template\_tags](#input\_launch\_template\_tags) | A map of additional tags to add to the tag\_specifications of launch template created | `map(string)` | `{}` | no | | [launch\_template\_use\_name\_prefix](#input\_launch\_template\_use\_name\_prefix) | Determines whether to use `launch_template_name` as is or create a unique name beginning with the `launch_template_name` as the prefix | `bool` | `true` | no | | [launch\_template\_version](#input\_launch\_template\_version) | Launch template version number. The default is `$Default` | `string` | `null` | no | | [license\_specifications](#input\_license\_specifications) | A list of license specifications to associate with | `map(string)` | `{}` | no | +| [maintenance\_options](#input\_maintenance\_options) | The maintenance options for the instance | `any` | `{}` | no | | [max\_size](#input\_max\_size) | Maximum number of instances/nodes | `number` | `3` | no | | [metadata\_options](#input\_metadata\_options) | Customize the metadata options for the instance | `map(string)` |
{
"http_endpoint": "enabled",
"http_put_response_hop_limit": 2,
"http_tokens": "required"
}
| no | | [min\_size](#input\_min\_size) | Minimum number of instances/nodes | `number` | `0` | no | @@ -155,13 +157,14 @@ module "eks_managed_node_group" { | [platform](#input\_platform) | Identifies if the OS platform is `bottlerocket` or `linux` based; `windows` is not supported | `string` | `"linux"` | no | | [post\_bootstrap\_user\_data](#input\_post\_bootstrap\_user\_data) | User data that is appended to the user data script after of the EKS bootstrap script. Not used when `platform` = `bottlerocket` | `string` | `""` | no | | [pre\_bootstrap\_user\_data](#input\_pre\_bootstrap\_user\_data) | User data that is injected into the user data script ahead of the EKS bootstrap script. Not used when `platform` = `bottlerocket` | `string` | `""` | no | +| [private\_dns\_name\_options](#input\_private\_dns\_name\_options) | The options for the instance hostname. The default values are inherited from the subnet | `map(string)` | `{}` | no | | [ram\_disk\_id](#input\_ram\_disk\_id) | The ID of the ram disk | `string` | `null` | no | -| [remote\_access](#input\_remote\_access) | Configuration block with remote access settings | `any` | `{}` | no | +| [remote\_access](#input\_remote\_access) | Configuration block with remote access settings. Only valid when `use_custom_launch_template` = `false` | `any` | `{}` | no | | [subnet\_ids](#input\_subnet\_ids) | Identifiers of EC2 Subnets to associate with the EKS Node Group. These subnets must have the following resource tag: `kubernetes.io/cluster/CLUSTER_NAME` | `list(string)` | `null` | no | | [tags](#input\_tags) | A map of tags to add to all resources | `map(string)` | `{}` | no | | [taints](#input\_taints) | The Kubernetes taints to be applied to the nodes in the node group. Maximum of 50 taints per node group | `any` | `{}` | no | | [timeouts](#input\_timeouts) | Create, update, and delete timeout configurations for the node group | `map(string)` | `{}` | no | -| [update\_config](#input\_update\_config) | Configuration block of settings for max unavailable resources during node group updates | `map(string)` | `{}` | no | +| [update\_config](#input\_update\_config) | Configuration block of settings for max unavailable resources during node group updates | `map(string)` |
{
"max_unavailable_percentage": 33
}
| no | | [update\_launch\_template\_default\_version](#input\_update\_launch\_template\_default\_version) | Whether to update the launch templates default version on each update. Conflicts with `launch_template_default_version` | `bool` | `true` | no | | [use\_custom\_launch\_template](#input\_use\_custom\_launch\_template) | Determines whether to use a custom launch template or not. If set to `false`, EKS will use its own default launch template | `bool` | `true` | no | | [use\_name\_prefix](#input\_use\_name\_prefix) | Determines whether to use `name` as is or create a unique name beginning with the `name` as the prefix | `bool` | `true` | no | diff --git a/modules/eks-managed-node-group/main.tf b/modules/eks-managed-node-group/main.tf index 4f51107de0..06275f00a2 100644 --- a/modules/eks-managed-node-group/main.tf +++ b/modules/eks-managed-node-group/main.tf @@ -29,67 +29,51 @@ module "user_data" { ################################################################################ locals { - launch_template_name_int = coalesce(var.launch_template_name, "${var.name}-eks-node-group") - security_group_ids = compact(concat([var.cluster_primary_security_group_id], var.vpc_security_group_ids)) + launch_template_name = coalesce(var.launch_template_name, "${var.name}-eks-node-group") + security_group_ids = compact(concat([var.cluster_primary_security_group_id], var.vpc_security_group_ids)) } resource "aws_launch_template" "this" { count = var.create && var.create_launch_template && var.use_custom_launch_template ? 1 : 0 - name = var.launch_template_use_name_prefix ? null : local.launch_template_name_int - name_prefix = var.launch_template_use_name_prefix ? "${local.launch_template_name_int}-" : null - description = var.launch_template_description - - ebs_optimized = var.ebs_optimized - image_id = var.ami_id - # # Set on node group instead - # instance_type = var.launch_template_instance_type - key_name = var.key_name - user_data = module.user_data.user_data - - vpc_security_group_ids = length(var.network_interfaces) > 0 ? [] : local.security_group_ids - - default_version = var.launch_template_default_version - update_default_version = var.update_launch_template_default_version - disable_api_termination = var.disable_api_termination - # Set on EKS managed node group, will fail if set here - # https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-basics - # instance_initiated_shutdown_behavior = var.instance_initiated_shutdown_behavior - kernel_id = var.kernel_id - ram_disk_id = var.ram_disk_id - dynamic "block_device_mappings" { for_each = var.block_device_mappings + content { - device_name = block_device_mappings.value.device_name - no_device = lookup(block_device_mappings.value, "no_device", null) - virtual_name = lookup(block_device_mappings.value, "virtual_name", null) + device_name = try(block_device_mappings.value.device_name, null) dynamic "ebs" { - for_each = flatten([lookup(block_device_mappings.value, "ebs", [])]) + for_each = try([block_device_mappings.value.ebs], []) + content { - delete_on_termination = lookup(ebs.value, "delete_on_termination", null) - encrypted = lookup(ebs.value, "encrypted", null) - kms_key_id = lookup(ebs.value, "kms_key_id", null) - iops = lookup(ebs.value, "iops", null) - throughput = lookup(ebs.value, "throughput", null) - snapshot_id = lookup(ebs.value, "snapshot_id", null) - volume_size = lookup(ebs.value, "volume_size", null) - volume_type = lookup(ebs.value, "volume_type", null) + delete_on_termination = try(ebs.value.delete_on_termination, null) + encrypted = try(ebs.value.encrypted, null) + iops = try(ebs.value.iops, null) + kms_key_id = try(ebs.value.kms_key_id, null) + snapshot_id = try(ebs.value.snapshot_id, null) + throughput = try(ebs.value.throughput, null) + volume_size = try(ebs.value.volume_size, null) + volume_type = try(ebs.value.volume_type, null) } } + + no_device = try(block_device_mappings.value.no_device, null) + virtual_name = try(block_device_mappings.value.virtual_name, null) } } dynamic "capacity_reservation_specification" { for_each = length(var.capacity_reservation_specification) > 0 ? [var.capacity_reservation_specification] : [] + content { - capacity_reservation_preference = lookup(capacity_reservation_specification.value, "capacity_reservation_preference", null) + capacity_reservation_preference = try(capacity_reservation_specification.value.capacity_reservation_preference, null) dynamic "capacity_reservation_target" { for_each = try([capacity_reservation_specification.value.capacity_reservation_target], []) + content { - capacity_reservation_id = lookup(capacity_reservation_target.value, "capacity_reservation_id", null) + capacity_reservation_id = try(capacity_reservation_target.value.capacity_reservation_id, null) + capacity_reservation_resource_group_arn = try(capacity_reservation_target.value.capacity_reservation_resource_group_arn, null) } } } @@ -97,21 +81,29 @@ resource "aws_launch_template" "this" { dynamic "cpu_options" { for_each = length(var.cpu_options) > 0 ? [var.cpu_options] : [] + content { - core_count = cpu_options.value.core_count - threads_per_core = cpu_options.value.threads_per_core + core_count = try(cpu_options.value.core_count, null) + threads_per_core = try(cpu_options.value.threads_per_core, null) } } dynamic "credit_specification" { for_each = length(var.credit_specification) > 0 ? [var.credit_specification] : [] + content { - cpu_credits = credit_specification.value.cpu_credits + cpu_credits = try(credit_specification.value.cpu_credits, null) } } + default_version = var.launch_template_default_version + description = var.launch_template_description + disable_api_termination = var.disable_api_termination + ebs_optimized = var.ebs_optimized + dynamic "elastic_gpu_specifications" { - for_each = length(var.elastic_gpu_specifications) > 0 ? [var.elastic_gpu_specifications] : [] + for_each = var.elastic_gpu_specifications + content { type = elastic_gpu_specifications.value.type } @@ -119,6 +111,7 @@ resource "aws_launch_template" "this" { dynamic "elastic_inference_accelerator" { for_each = length(var.elastic_inference_accelerator) > 0 ? [var.elastic_inference_accelerator] : [] + content { type = elastic_inference_accelerator.value.type } @@ -126,6 +119,7 @@ resource "aws_launch_template" "this" { dynamic "enclave_options" { for_each = length(var.enclave_options) > 0 ? [var.enclave_options] : [] + content { enabled = enclave_options.value.enabled } @@ -134,7 +128,8 @@ resource "aws_launch_template" "this" { # Set on EKS managed node group, will fail if set here # https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-basics # dynamic "hibernation_options" { - # for_each = var.hibernation_options != null ? [var.hibernation_options] : [] + # for_each = length(var.hibernation_options) > 0 ? [var.hibernation_options] : [] + # content { # configured = hibernation_options.value.configured # } @@ -150,95 +145,144 @@ resource "aws_launch_template" "this" { # } # } + image_id = var.ami_id + # Set on EKS managed node group, will fail if set here + # https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-basics + # instance_initiated_shutdown_behavior = var.instance_initiated_shutdown_behavior + dynamic "instance_market_options" { for_each = length(var.instance_market_options) > 0 ? [var.instance_market_options] : [] + content { - market_type = instance_market_options.value.market_type + market_type = try(instance_market_options.value.market_type, null) dynamic "spot_options" { - for_each = length(lookup(instance_market_options.value, "spot_options", {})) > 0 ? [instance_market_options.value.spot_options] : [] + for_each = try([instance_market_options.value.spot_options], []) + content { - block_duration_minutes = lookup(spot_options.value, "block_duration_minutes", null) - instance_interruption_behavior = lookup(spot_options.value, "instance_interruption_behavior", null) - max_price = lookup(spot_options.value, "max_price", null) - spot_instance_type = lookup(spot_options.value, "spot_instance_type", null) - valid_until = lookup(spot_options.value, "valid_until", null) + block_duration_minutes = try(spot_options.value.block_duration_minutes, null) + instance_interruption_behavior = try(spot_options.value.instance_interruption_behavior, null) + max_price = try(spot_options.value.max_price, null) + spot_instance_type = try(spot_options.value.spot_instance_type, null) + valid_until = try(spot_options.value.valid_until, null) } } } } + # # Set on node group instead + # instance_type = var.launch_template_instance_type + kernel_id = var.kernel_id + key_name = var.key_name + dynamic "license_specification" { - for_each = length(var.license_specifications) > 0 ? [var.license_specifications] : [] + for_each = length(var.license_specifications) > 0 ? var.license_specifications : {} + content { license_configuration_arn = license_specifications.value.license_configuration_arn } } + dynamic "maintenance_options" { + for_each = length(var.maintenance_options) > 0 ? [var.maintenance_options] : [] + + content { + auto_recovery = try(maintenance_options.value.auto_recovery, null) + } + } + dynamic "metadata_options" { for_each = length(var.metadata_options) > 0 ? [var.metadata_options] : [] + content { - http_endpoint = lookup(metadata_options.value, "http_endpoint", null) - http_tokens = lookup(metadata_options.value, "http_tokens", null) - http_put_response_hop_limit = lookup(metadata_options.value, "http_put_response_hop_limit", null) - http_protocol_ipv6 = lookup(metadata_options.value, "http_protocol_ipv6", null) - instance_metadata_tags = lookup(metadata_options.value, "instance_metadata_tags", null) + http_endpoint = try(metadata_options.value.http_endpoint, null) + http_protocol_ipv6 = try(metadata_options.value.http_protocol_ipv6, null) + http_put_response_hop_limit = try(metadata_options.value.http_put_response_hop_limit, null) + http_tokens = try(metadata_options.value.http_tokens, null) + instance_metadata_tags = try(metadata_options.value.instance_metadata_tags, null) } } dynamic "monitoring" { - for_each = var.enable_monitoring != null ? [1] : [] + for_each = var.enable_monitoring ? [1] : [] + content { enabled = var.enable_monitoring } } + name = var.launch_template_use_name_prefix ? null : local.launch_template_name + name_prefix = var.launch_template_use_name_prefix ? "${local.launch_template_name}-" : null + dynamic "network_interfaces" { for_each = var.network_interfaces content { - associate_carrier_ip_address = lookup(network_interfaces.value, "associate_carrier_ip_address", null) - associate_public_ip_address = lookup(network_interfaces.value, "associate_public_ip_address", null) - delete_on_termination = lookup(network_interfaces.value, "delete_on_termination", null) - description = lookup(network_interfaces.value, "description", null) - device_index = lookup(network_interfaces.value, "device_index", null) - interface_type = lookup(network_interfaces.value, "interface_type", null) + associate_carrier_ip_address = try(network_interfaces.value.associate_carrier_ip_address, null) + associate_public_ip_address = try(network_interfaces.value.associate_public_ip_address, null) + delete_on_termination = try(network_interfaces.value.delete_on_termination, null) + description = try(network_interfaces.value.description, null) + device_index = try(network_interfaces.value.device_index, null) + interface_type = try(network_interfaces.value.interface_type, null) + ipv4_address_count = try(network_interfaces.value.ipv4_address_count, null) ipv4_addresses = try(network_interfaces.value.ipv4_addresses, []) - ipv4_address_count = lookup(network_interfaces.value, "ipv4_address_count", null) + ipv4_prefix_count = try(network_interfaces.value.ipv4_prefix_count, null) + ipv4_prefixes = try(network_interfaces.value.ipv4_prefixes, null) + ipv6_address_count = try(network_interfaces.value.ipv6_address_count, null) ipv6_addresses = try(network_interfaces.value.ipv6_addresses, []) - ipv6_address_count = lookup(network_interfaces.value, "ipv6_address_count", null) - network_interface_id = lookup(network_interfaces.value, "network_interface_id", null) - private_ip_address = lookup(network_interfaces.value, "private_ip_address", null) - security_groups = compact(concat(try(network_interfaces.value.security_groups, []), local.security_group_ids)) + ipv6_prefix_count = try(network_interfaces.value.ipv6_prefix_count, null) + ipv6_prefixes = try(network_interfaces.value.ipv6_prefixes, []) + network_card_index = try(network_interfaces.value.network_card_index, null) + network_interface_id = try(network_interfaces.value.network_interface_id, null) + private_ip_address = try(network_interfaces.value.private_ip_address, null) + # Ref: https://github.com/hashicorp/terraform-provider-aws/issues/4570 + security_groups = compact(concat(try(network_interfaces.value.security_groups, []), local.security_group_ids)) # Set on EKS managed node group, will fail if set here # https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-basics - # subnet_id = lookup(network_interfaces.value, "subnet_id", null) + # subnet_id = try(network_interfaces.value.subnet_id, null) } } dynamic "placement" { for_each = length(var.placement) > 0 ? [var.placement] : [] + + content { + affinity = try(placement.value.affinity, null) + availability_zone = try(placement.value.availability_zone, null) + group_name = try(placement.value.group_name, null) + host_id = try(placement.value.host_id, null) + host_resource_group_arn = try(placement.value.host_resource_group_arn, null) + partition_number = try(placement.value.partition_number, null) + spread_domain = try(placement.value.spread_domain, null) + tenancy = try(placement.value.tenancy, null) + } + } + + dynamic "private_dns_name_options" { + for_each = length(var.private_dns_name_options) > 0 ? [var.private_dns_name_options] : [] + content { - affinity = lookup(placement.value, "affinity", null) - availability_zone = lookup(placement.value, "availability_zone", null) - group_name = lookup(placement.value, "group_name", null) - host_id = lookup(placement.value, "host_id", null) - spread_domain = lookup(placement.value, "spread_domain", null) - tenancy = lookup(placement.value, "tenancy", null) - partition_number = lookup(placement.value, "partition_number", null) + enable_resource_name_dns_aaaa_record = try(private_dns_name_options.value.enable_resource_name_dns_aaaa_record, null) + enable_resource_name_dns_a_record = try(private_dns_name_options.value.enable_resource_name_dns_a_record, null) + hostname_type = try(private_dns_name_options.value.hostname_type, null) } } + ram_disk_id = var.ram_disk_id + dynamic "tag_specifications" { for_each = toset(["instance", "volume", "network-interface"]) + content { resource_type = tag_specifications.key tags = merge(var.tags, { Name = var.name }, var.launch_template_tags) } } - lifecycle { - create_before_destroy = true - } + update_default_version = var.update_launch_template_default_version + user_data = module.user_data.user_data + vpc_security_group_ids = length(var.network_interfaces) > 0 ? [] : local.security_group_ids + + tags = var.tags # Prevent premature access of policies by pods that # require permissions on create/destroy that depend on nodes @@ -246,7 +290,9 @@ resource "aws_launch_template" "this" { aws_iam_role_policy_attachment.this, ] - tags = var.tags + lifecycle { + create_before_destroy = true + } } ################################################################################ @@ -254,7 +300,7 @@ resource "aws_launch_template" "this" { ################################################################################ locals { - launch_template_name = try(aws_launch_template.this[0].name, var.launch_template_name, null) + launch_template_id = var.create && var.create_launch_template ? aws_launch_template.this[0].id : var.launch_template_id # Change order to allow users to set version priority before using defaults launch_template_version = coalesce(var.launch_template_version, try(aws_launch_template.this[0].default_version, "$Default")) } @@ -290,14 +336,16 @@ resource "aws_eks_node_group" "this" { dynamic "launch_template" { for_each = var.use_custom_launch_template ? [1] : [] + content { - name = local.launch_template_name + id = local.launch_template_id version = local.launch_template_version } } dynamic "remote_access" { for_each = length(var.remote_access) > 0 ? [var.remote_access] : [] + content { ec2_ssh_key = try(remote_access.value.ec2_ssh_key, null) source_security_group_ids = try(remote_access.value.source_security_group_ids, []) @@ -306,15 +354,17 @@ resource "aws_eks_node_group" "this" { dynamic "taint" { for_each = var.taints + content { key = taint.value.key - value = lookup(taint.value, "value") + value = try(taint.value.value, null) effect = taint.value.effect } } dynamic "update_config" { for_each = length(var.update_config) > 0 ? [var.update_config] : [] + content { max_unavailable_percentage = try(update_config.value.max_unavailable_percentage, null) max_unavailable = try(update_config.value.max_unavailable, null) diff --git a/modules/eks-managed-node-group/variables.tf b/modules/eks-managed-node-group/variables.tf index a29658886e..27b79c9576 100644 --- a/modules/eks-managed-node-group/variables.tf +++ b/modules/eks-managed-node-group/variables.tf @@ -90,12 +90,18 @@ variable "use_custom_launch_template" { default = true } -variable "launch_template_name" { - description = "Launch template name - either to be created (`var.create_launch_template` = `true`) or existing (`var.create_launch_template` = `false`)" +variable "launch_template_id" { + description = "The ID of an existing launch template to use. Required when `create_launch_template` = `false` and `use_custom_launch_template` = `true`" type = string default = "" } +variable "launch_template_name" { + description = "Name of launch template to be created" + type = string + default = null +} + variable "launch_template_use_name_prefix" { description = "Determines whether to use `launch_template_name` as is or create a unique name beginning with the `launch_template_name` as the prefix" type = bool @@ -216,6 +222,12 @@ variable "instance_market_options" { default = {} } +variable "maintenance_options" { + description = "The maintenance options for the instance" + type = any + default = {} +} + variable "license_specifications" { description = "A list of license specifications to associate with" type = map(string) @@ -250,6 +262,12 @@ variable "placement" { default = {} } +variable "private_dns_name_options" { + description = "The options for the instance hostname. The default values are inherited from the subnet" + type = map(string) + default = {} +} + variable "launch_template_tags" { description = "A map of additional tags to add to the tag_specifications of launch template created" type = map(string) @@ -315,7 +333,7 @@ variable "capacity_type" { } variable "disk_size" { - description = "Disk size in GiB for nodes. Defaults to `20`" + description = "Disk size in GiB for nodes. Defaults to `20`. Only valid when `use_custom_launch_template` = `false`" type = number default = null } @@ -351,7 +369,7 @@ variable "launch_template_version" { } variable "remote_access" { - description = "Configuration block with remote access settings" + description = "Configuration block with remote access settings. Only valid when `use_custom_launch_template` = `false`" type = any default = {} } @@ -365,7 +383,9 @@ variable "taints" { variable "update_config" { description = "Configuration block of settings for max unavailable resources during node group updates" type = map(string) - default = {} + default = { + max_unavailable_percentage = 33 + } } variable "timeouts" { diff --git a/modules/self-managed-node-group/README.md b/modules/self-managed-node-group/README.md index 411342e498..1fa3cd47b9 100644 --- a/modules/self-managed-node-group/README.md +++ b/modules/self-managed-node-group/README.md @@ -126,7 +126,7 @@ module "self_managed_node_group" { | [initial\_lifecycle\_hooks](#input\_initial\_lifecycle\_hooks) | One or more Lifecycle Hooks to attach to the Auto Scaling Group before instances are launched. The syntax is exactly the same as the separate `aws_autoscaling_lifecycle_hook` resource, without the `autoscaling_group_name` attribute. Please note that this will only work when creating a new Auto Scaling Group. For all other use-cases, please use `aws_autoscaling_lifecycle_hook` resource | `list(map(string))` | `[]` | no | | [instance\_initiated\_shutdown\_behavior](#input\_instance\_initiated\_shutdown\_behavior) | Shutdown behavior for the instance. Can be `stop` or `terminate`. (Default: `stop`) | `string` | `null` | no | | [instance\_market\_options](#input\_instance\_market\_options) | The market (purchasing) option for the instance | `any` | `{}` | no | -| [instance\_refresh](#input\_instance\_refresh) | If this block is configured, start an Instance Refresh when this Auto Scaling Group is updated | `any` | `{}` | no | +| [instance\_refresh](#input\_instance\_refresh) | If this block is configured, start an Instance Refresh when this Auto Scaling Group is updated | `any` |
{
"preferences": {
"min_healthy_percentage": 66
},
"strategy": "Rolling"
}
| no | | [instance\_requirements](#input\_instance\_requirements) | The attribute requirements for the type of instance. If present then `instance_type` cannot be present | `any` | `{}` | no | | [instance\_type](#input\_instance\_type) | The type of the instance to launch | `string` | `""` | no | | [kernel\_id](#input\_kernel\_id) | The kernel ID | `string` | `null` | no | diff --git a/modules/self-managed-node-group/main.tf b/modules/self-managed-node-group/main.tf index f938743903..7632d24609 100644 --- a/modules/self-managed-node-group/main.tf +++ b/modules/self-managed-node-group/main.tf @@ -394,7 +394,7 @@ resource "aws_launch_template" "this" { ################################################################################ locals { - launch_template_id = var.create_launch_template ? aws_launch_template.this[0].id : var.launch_template_id + launch_template_id = var.create && var.create_launch_template ? aws_launch_template.this[0].id : var.launch_template_id # Change order to allow users to set version priority before using defaults launch_template_version = coalesce(var.launch_template_version, try(aws_launch_template.this[0].default_version, "$Default")) } diff --git a/modules/self-managed-node-group/variables.tf b/modules/self-managed-node-group/variables.tf index b3f7f6ba39..c2850b659e 100644 --- a/modules/self-managed-node-group/variables.tf +++ b/modules/self-managed-node-group/variables.tf @@ -473,7 +473,12 @@ variable "initial_lifecycle_hooks" { variable "instance_refresh" { description = "If this block is configured, start an Instance Refresh when this Auto Scaling Group is updated" type = any - default = {} + default = { + strategy = "Rolling" + preferences = { + min_healthy_percentage = 66 + } + } } variable "use_mixed_instances_policy" { diff --git a/node_groups.tf b/node_groups.tf index 084934fb3f..168152a37e 100644 --- a/node_groups.tf +++ b/node_groups.tf @@ -4,6 +4,19 @@ locals { http_tokens = "required" http_put_response_hop_limit = 2 } + + # EKS managed node group + default_update_config = { + max_unavailable_percentage = 33 + } + + # Self-managed node group + default_instance_refresh = { + strategy = "Rolling" + preferences = { + min_healthy_percentage = 66 + } + } } ################################################################################ @@ -260,7 +273,7 @@ module "eks_managed_node_group" { remote_access = try(each.value.remote_access, var.eks_managed_node_group_defaults.remote_access, {}) taints = try(each.value.taints, var.eks_managed_node_group_defaults.taints, {}) - update_config = try(each.value.update_config, var.eks_managed_node_group_defaults.update_config, {}) + update_config = try(each.value.update_config, var.eks_managed_node_group_defaults.update_config, local.default_update_config) timeouts = try(each.value.timeouts, var.eks_managed_node_group_defaults.timeouts, {}) # User data @@ -275,21 +288,22 @@ module "eks_managed_node_group" { user_data_template_path = try(each.value.user_data_template_path, var.eks_managed_node_group_defaults.user_data_template_path, "") # Launch Template - create_launch_template = try(each.value.create_launch_template, var.eks_managed_node_group_defaults.create_launch_template, true) - use_custom_launch_template = try(each.value.use_custom_launch_template, var.eks_managed_node_group_defaults.use_custom_launch_template, true) - launch_template_name = try(each.value.launch_template_name, var.eks_managed_node_group_defaults.launch_template_name, each.key) - launch_template_use_name_prefix = try(each.value.launch_template_use_name_prefix, var.eks_managed_node_group_defaults.launch_template_use_name_prefix, true) - launch_template_version = try(each.value.launch_template_version, var.eks_managed_node_group_defaults.launch_template_version, null) - launch_template_description = try(each.value.launch_template_description, var.eks_managed_node_group_defaults.launch_template_description, "Custom launch template for ${try(each.value.name, each.key)} EKS managed node group") - launch_template_tags = try(each.value.launch_template_tags, var.eks_managed_node_group_defaults.launch_template_tags, {}) - - ebs_optimized = try(each.value.ebs_optimized, var.eks_managed_node_group_defaults.ebs_optimized, null) - key_name = try(each.value.key_name, var.eks_managed_node_group_defaults.key_name, null) + create_launch_template = try(each.value.create_launch_template, var.eks_managed_node_group_defaults.create_launch_template, true) + use_custom_launch_template = try(each.value.use_custom_launch_template, var.eks_managed_node_group_defaults.use_custom_launch_template, true) + launch_template_id = try(each.value.launch_template_id, var.eks_managed_node_group_defaults.launch_template_id, "") + launch_template_name = try(each.value.launch_template_name, var.eks_managed_node_group_defaults.launch_template_name, each.key) + launch_template_use_name_prefix = try(each.value.launch_template_use_name_prefix, var.eks_managed_node_group_defaults.launch_template_use_name_prefix, true) + launch_template_version = try(each.value.launch_template_version, var.eks_managed_node_group_defaults.launch_template_version, null) launch_template_default_version = try(each.value.launch_template_default_version, var.eks_managed_node_group_defaults.launch_template_default_version, null) update_launch_template_default_version = try(each.value.update_launch_template_default_version, var.eks_managed_node_group_defaults.update_launch_template_default_version, true) - disable_api_termination = try(each.value.disable_api_termination, var.eks_managed_node_group_defaults.disable_api_termination, null) - kernel_id = try(each.value.kernel_id, var.eks_managed_node_group_defaults.kernel_id, null) - ram_disk_id = try(each.value.ram_disk_id, var.eks_managed_node_group_defaults.ram_disk_id, null) + launch_template_description = try(each.value.launch_template_description, var.eks_managed_node_group_defaults.launch_template_description, "Custom launch template for ${try(each.value.name, each.key)} EKS managed node group") + launch_template_tags = try(each.value.launch_template_tags, var.eks_managed_node_group_defaults.launch_template_tags, {}) + + ebs_optimized = try(each.value.ebs_optimized, var.eks_managed_node_group_defaults.ebs_optimized, null) + key_name = try(each.value.key_name, var.eks_managed_node_group_defaults.key_name, null) + disable_api_termination = try(each.value.disable_api_termination, var.eks_managed_node_group_defaults.disable_api_termination, null) + kernel_id = try(each.value.kernel_id, var.eks_managed_node_group_defaults.kernel_id, null) + ram_disk_id = try(each.value.ram_disk_id, var.eks_managed_node_group_defaults.ram_disk_id, null) block_device_mappings = try(each.value.block_device_mappings, var.eks_managed_node_group_defaults.block_device_mappings, {}) capacity_reservation_specification = try(each.value.capacity_reservation_specification, var.eks_managed_node_group_defaults.capacity_reservation_specification, {}) @@ -304,6 +318,8 @@ module "eks_managed_node_group" { enable_monitoring = try(each.value.enable_monitoring, var.eks_managed_node_group_defaults.enable_monitoring, true) network_interfaces = try(each.value.network_interfaces, var.eks_managed_node_group_defaults.network_interfaces, []) placement = try(each.value.placement, var.eks_managed_node_group_defaults.placement, {}) + maintenance_options = try(each.value.maintenance_options, var.eks_managed_node_group_defaults.maintenance_options, {}) + private_dns_name_options = try(each.value.private_dns_name_options, var.eks_managed_node_group_defaults.private_dns_name_options, {}) # IAM role create_iam_role = try(each.value.create_iam_role, var.eks_managed_node_group_defaults.create_iam_role, true) @@ -377,7 +393,7 @@ module "self_managed_node_group" { service_linked_role_arn = try(each.value.service_linked_role_arn, var.self_managed_node_group_defaults.service_linked_role_arn, null) initial_lifecycle_hooks = try(each.value.initial_lifecycle_hooks, var.self_managed_node_group_defaults.initial_lifecycle_hooks, []) - instance_refresh = try(each.value.instance_refresh, var.self_managed_node_group_defaults.instance_refresh, {}) + instance_refresh = try(each.value.instance_refresh, var.self_managed_node_group_defaults.instance_refresh, local.default_instance_refresh) use_mixed_instances_policy = try(each.value.use_mixed_instances_policy, var.self_managed_node_group_defaults.use_mixed_instances_policy, false) mixed_instances_policy = try(each.value.mixed_instances_policy, var.self_managed_node_group_defaults.mixed_instances_policy, null) warm_pool = try(each.value.warm_pool, var.self_managed_node_group_defaults.warm_pool, {}) From 2596ea356b9c1e244b98005170e053990ba5c2dd Mon Sep 17 00:00:00 2001 From: Bryant Biggs Date: Wed, 26 Oct 2022 16:34:12 -0400 Subject: [PATCH 25/33] feat: Add example for EKS on Outposts with local cluster --- README.md | 6 +- examples/complete/README.md | 4 +- examples/complete/versions.tf | 2 +- examples/eks_managed_node_group/README.md | 4 +- examples/eks_managed_node_group/versions.tf | 2 +- examples/fargate_profile/README.md | 4 +- examples/fargate_profile/versions.tf | 2 +- examples/outposts/README.md | 95 +++++++++ examples/outposts/main.tf | 115 +++++++++++ examples/outposts/outputs.tf | 206 +++++++++++++++++++ examples/outposts/variables.tf | 10 + examples/outposts/versions.tf | 14 ++ examples/self_managed_node_group/README.md | 4 +- examples/self_managed_node_group/versions.tf | 2 +- main.tf | 12 +- modules/eks-managed-node-group/README.md | 4 +- modules/eks-managed-node-group/variables.tf | 6 +- variables.tf | 10 +- 18 files changed, 469 insertions(+), 33 deletions(-) create mode 100644 examples/outposts/README.md create mode 100644 examples/outposts/main.tf create mode 100644 examples/outposts/outputs.tf create mode 100644 examples/outposts/variables.tf create mode 100644 examples/outposts/versions.tf diff --git a/README.md b/README.md index debbc40453..c3960b48e3 100644 --- a/README.md +++ b/README.md @@ -203,6 +203,7 @@ module "eks" { - [Complete](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/complete): EKS Cluster using all available node group types in various combinations demonstrating many of the supported features and configurations - [EKS Managed Node Group](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/eks_managed_node_group): EKS Cluster using EKS managed node groups - [Fargate Profile](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/fargate_profile): EKS cluster using [Fargate Profiles](https://docs.aws.amazon.com/eks/latest/userguide/fargate.html) +- [Outposts](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/outposts): EKS local cluster provisioned on [AWS Outposts](https://docs.aws.amazon.com/eks/latest/userguide/eks-outposts.html) - [Self Managed Node Group](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/self_managed_node_group): EKS Cluster using self-managed node groups - [User Data](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/user_data): Various supported methods of providing necessary bootstrap scripts and configuration settings via user data @@ -301,7 +302,7 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster | `string` | `""` | no | | [cluster\_security\_group\_additional\_rules](#input\_cluster\_security\_group\_additional\_rules) | List of additional security group rules to add to the cluster security group created. Set `source_node_security_group = true` inside rules to set the `node_security_group` as source | `any` | `{}` | no | | [cluster\_security\_group\_description](#input\_cluster\_security\_group\_description) | Description of the cluster security group created | `string` | `"EKS cluster security group"` | no | -| [cluster\_security\_group\_id](#input\_cluster\_security\_group\_id) | Existing security group ID to be attached to the cluster. Required if `create_cluster_security_group` = `false` | `string` | `""` | no | +| [cluster\_security\_group\_id](#input\_cluster\_security\_group\_id) | Existing security group ID to be attached to the cluster | `string` | `""` | no | | [cluster\_security\_group\_name](#input\_cluster\_security\_group\_name) | Name to use on cluster security group created | `string` | `null` | no | | [cluster\_security\_group\_tags](#input\_cluster\_security\_group\_tags) | A map of additional tags to add to the cluster security group created | `map(string)` | `{}` | no | | [cluster\_security\_group\_use\_name\_prefix](#input\_cluster\_security\_group\_use\_name\_prefix) | Determines whether cluster security group name (`cluster_security_group_name`) is used as a prefix | `bool` | `true` | no | @@ -315,7 +316,7 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | [create\_aws\_auth\_configmap](#input\_create\_aws\_auth\_configmap) | Determines whether to create the aws-auth configmap. NOTE - this is only intended for scenarios where the configmap does not exist (i.e. - when using only self-managed node groups). Most users should use `manage_aws_auth_configmap` | `bool` | `false` | no | | [create\_cloudwatch\_log\_group](#input\_create\_cloudwatch\_log\_group) | Determines whether a log group is created by this module for the cluster logs. If not, AWS will automatically create one if logging is enabled | `bool` | `true` | no | | [create\_cluster\_primary\_security\_group\_tags](#input\_create\_cluster\_primary\_security\_group\_tags) | Indicates whether or not to tag the cluster's primary security group. This security group is created by the EKS service, not the module, and therefore tagging is handled after cluster creation | `bool` | `true` | no | -| [create\_cluster\_security\_group](#input\_create\_cluster\_security\_group) | Determines if a security group is created for the cluster or use the existing `cluster_security_group_id` | `bool` | `true` | no | +| [create\_cluster\_security\_group](#input\_create\_cluster\_security\_group) | Determines if a security group is created for the cluster. Note: the EKS service creates a primary security group for the cluster by default | `bool` | `true` | no | | [create\_cni\_ipv6\_iam\_policy](#input\_create\_cni\_ipv6\_iam\_policy) | Determines whether to create an [`AmazonEKS_CNI_IPv6_Policy`](https://docs.aws.amazon.com/eks/latest/userguide/cni-iam-role.html#cni-iam-role-create-ipv6-policy) | `bool` | `false` | no | | [create\_iam\_role](#input\_create\_iam\_role) | Determines whether a an IAM role is created or to use an existing IAM role | `bool` | `true` | no | | [create\_kms\_key](#input\_create\_kms\_key) | Controls if a KMS key for cluster encryption should be created | `bool` | `true` | no | @@ -357,7 +358,6 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | [openid\_connect\_audiences](#input\_openid\_connect\_audiences) | List of OpenID Connect audience client IDs to add to the IRSA provider | `list(string)` | `[]` | no | | [outpost\_config](#input\_outpost\_config) | Configuration for the AWS Outpost to provision the cluster on | `any` | `{}` | no | | [prefix\_separator](#input\_prefix\_separator) | The separator to use between the prefix and the generated timestamp for resource names | `string` | `"-"` | no | -| [provision\_on\_outpost](#input\_provision\_on\_outpost) | Determines whether cluster should be provisioned on an AWS Outpost | `bool` | `false` | no | | [putin\_khuylo](#input\_putin\_khuylo) | Do you agree that Putin doesn't respect Ukrainian sovereignty and territorial integrity? More info: https://en.wikipedia.org/wiki/Putin_khuylo! | `bool` | `true` | no | | [self\_managed\_node\_group\_defaults](#input\_self\_managed\_node\_group\_defaults) | Map of self-managed node group default configurations | `any` | `{}` | no | | [self\_managed\_node\_groups](#input\_self\_managed\_node\_groups) | Map of self-managed node group definitions to create | `any` | `{}` | no | diff --git a/examples/complete/README.md b/examples/complete/README.md index 03d45665ab..515961219a 100644 --- a/examples/complete/README.md +++ b/examples/complete/README.md @@ -34,14 +34,14 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0 | -| [aws](#requirement\_aws) | >= 4.32 | +| [aws](#requirement\_aws) | >= 4.34 | | [kubernetes](#requirement\_kubernetes) | >= 2.10 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 4.32 | +| [aws](#provider\_aws) | >= 4.34 | ## Modules diff --git a/examples/complete/versions.tf b/examples/complete/versions.tf index bd119cf38c..49f7eb81c2 100644 --- a/examples/complete/versions.tf +++ b/examples/complete/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = ">= 4.32" + version = ">= 4.34" } kubernetes = { source = "hashicorp/kubernetes" diff --git a/examples/eks_managed_node_group/README.md b/examples/eks_managed_node_group/README.md index afa4888d3f..1d7ac1b200 100644 --- a/examples/eks_managed_node_group/README.md +++ b/examples/eks_managed_node_group/README.md @@ -58,14 +58,14 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0 | -| [aws](#requirement\_aws) | >= 4.32 | +| [aws](#requirement\_aws) | >= 4.34 | | [kubernetes](#requirement\_kubernetes) | >= 2.10 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 4.32 | +| [aws](#provider\_aws) | >= 4.34 | ## Modules diff --git a/examples/eks_managed_node_group/versions.tf b/examples/eks_managed_node_group/versions.tf index bd119cf38c..49f7eb81c2 100644 --- a/examples/eks_managed_node_group/versions.tf +++ b/examples/eks_managed_node_group/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = ">= 4.32" + version = ">= 4.34" } kubernetes = { source = "hashicorp/kubernetes" diff --git a/examples/fargate_profile/README.md b/examples/fargate_profile/README.md index 66d72da4e8..5f250fdee1 100644 --- a/examples/fargate_profile/README.md +++ b/examples/fargate_profile/README.md @@ -20,7 +20,7 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0 | -| [aws](#requirement\_aws) | >= 4.32 | +| [aws](#requirement\_aws) | >= 4.34 | | [helm](#requirement\_helm) | >= 2.7 | | [null](#requirement\_null) | >= 3.0 | @@ -28,7 +28,7 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 4.32 | +| [aws](#provider\_aws) | >= 4.34 | | [helm](#provider\_helm) | >= 2.7 | | [null](#provider\_null) | >= 3.0 | diff --git a/examples/fargate_profile/versions.tf b/examples/fargate_profile/versions.tf index 0c1b9659f6..bc41fee29b 100644 --- a/examples/fargate_profile/versions.tf +++ b/examples/fargate_profile/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = ">= 4.32" + version = ">= 4.34" } helm = { source = "hashicorp/helm" diff --git a/examples/outposts/README.md b/examples/outposts/README.md new file mode 100644 index 0000000000..7e6ebd4928 --- /dev/null +++ b/examples/outposts/README.md @@ -0,0 +1,95 @@ +# EKS on Outposts + +Configuration in this directory creates an AWS EKS local cluster on AWS Outposts + +See the [AWS documentation](https://docs.aws.amazon.com/eks/latest/userguide/eks-outposts.html) for further details. + +Note: This example requires an an AWS Outpost to provision. + +## Usage + +To run this example you need to: + +1. Copy the `terraform.tfvars.example` to `terraform.tfvars` and fill in the required variables +2. Execute: + +```bash +$ terraform init +$ terraform plan +$ terraform apply +``` + +Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources. + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0 | +| [aws](#requirement\_aws) | >= 4.34 | +| [kubernetes](#requirement\_kubernetes) | >= 2.10 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 4.34 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [eks](#module\_eks) | ../.. | n/a | +| [vpc\_cni\_irsa](#module\_vpc\_cni\_irsa) | terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks | ~> 5.0 | + +## Resources + +| Name | Type | +|------|------| +| [aws_outposts_outposts.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/outposts_outposts) | data source | +| [aws_subnets.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/subnets) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [outpost\_instance\_type](#input\_outpost\_instance\_type) | Instance type supported by the Outposts instance | `string` | `"m5.large"` | no | +| [region](#input\_region) | The AWS region to deploy into (e.g. us-east-1) | `string` | n/a | yes | + +## Outputs + +| Name | Description | +|------|-------------| +| [aws\_auth\_configmap\_yaml](#output\_aws\_auth\_configmap\_yaml) | Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles | +| [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created | +| [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created | +| [cluster\_addons](#output\_cluster\_addons) | Map of attribute maps for all EKS cluster addons enabled | +| [cluster\_arn](#output\_cluster\_arn) | The Amazon Resource Name (ARN) of the cluster | +| [cluster\_certificate\_authority\_data](#output\_cluster\_certificate\_authority\_data) | Base64 encoded certificate data required to communicate with the cluster | +| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for your Kubernetes API server | +| [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster | +| [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster | +| [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role | +| [cluster\_id](#output\_cluster\_id) | The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready | +| [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled | +| [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider | +| [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster | +| [cluster\_primary\_security\_group\_id](#output\_cluster\_primary\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console | +| [cluster\_security\_group\_arn](#output\_cluster\_security\_group\_arn) | Amazon Resource Name (ARN) of the cluster security group | +| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | ID of the cluster security group | +| [cluster\_status](#output\_cluster\_status) | Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED` | +| [cluster\_tls\_certificate\_sha1\_fingerprint](#output\_cluster\_tls\_certificate\_sha1\_fingerprint) | The SHA1 fingerprint of the public key of the cluster's certificate | +| [eks\_managed\_node\_groups](#output\_eks\_managed\_node\_groups) | Map of attribute maps for all EKS managed node groups created | +| [eks\_managed\_node\_groups\_autoscaling\_group\_names](#output\_eks\_managed\_node\_groups\_autoscaling\_group\_names) | List of the autoscaling group names created by EKS managed node groups | +| [fargate\_profiles](#output\_fargate\_profiles) | Map of attribute maps for all EKS Fargate Profiles created | +| [kms\_key\_arn](#output\_kms\_key\_arn) | The Amazon Resource Name (ARN) of the key | +| [kms\_key\_id](#output\_kms\_key\_id) | The globally unique identifier for the key | +| [kms\_key\_policy](#output\_kms\_key\_policy) | The IAM resource policy set on the key | +| [node\_security\_group\_arn](#output\_node\_security\_group\_arn) | Amazon Resource Name (ARN) of the node shared security group | +| [node\_security\_group\_id](#output\_node\_security\_group\_id) | ID of the node shared security group | +| [oidc\_provider](#output\_oidc\_provider) | The OpenID Connect identity provider (issuer URL without leading `https://`) | +| [oidc\_provider\_arn](#output\_oidc\_provider\_arn) | The ARN of the OIDC Provider if `enable_irsa = true` | +| [self\_managed\_node\_groups](#output\_self\_managed\_node\_groups) | Map of attribute maps for all self managed node groups created | +| [self\_managed\_node\_groups\_autoscaling\_group\_names](#output\_self\_managed\_node\_groups\_autoscaling\_group\_names) | List of the autoscaling group names created by self-managed node groups | + diff --git a/examples/outposts/main.tf b/examples/outposts/main.tf new file mode 100644 index 0000000000..bee2f31aa1 --- /dev/null +++ b/examples/outposts/main.tf @@ -0,0 +1,115 @@ +provider "aws" { + region = var.region +} + +provider "kubernetes" { + host = module.eks.cluster_endpoint + cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) + + exec { + api_version = "client.authentication.k8s.io/v1beta1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks.cluster_id] + } +} + +locals { + name = "ex-${basename(path.cwd)}" + cluster_version = "1.21" # Required by EKS on Outposts + + tags = { + Example = local.name + GithubRepo = "terraform-aws-eks" + GithubOrg = "terraform-aws-modules" + } +} + +################################################################################ +# EKS Module +################################################################################ + +module "eks" { + source = "../.." + + cluster_name = local.name + cluster_version = local.cluster_version + cluster_endpoint_public_access = true + + outpost_config = { + control_plane_instance_type = var.outpost_instance_type + outpost_arns = [tolist(data.aws_outposts_outposts.this.arns)[0]] + } + + cluster_addons = { + coredns = { + most_recent = true + } + kube-proxy = { + most_recent = true + } + vpc-cni = { + most_recent = true + service_account_role_arn = module.vpc_cni_irsa.iam_role_arn + } + } + + # Encryption key + create_kms_key = true + cluster_encryption_config = { + resources = ["secrets"] + } + kms_key_deletion_window_in_days = 7 + enable_kms_key_rotation = true + + create_cluster_security_group = false + create_node_security_group = false + subnet_ids = [tolist(data.aws_subnets.this.ids)[0]] + + manage_aws_auth_configmap = true + + eks_managed_node_group_defaults = { + instance_types = [var.outpost_instance_type] + + attach_cluster_primary_security_group = true + } + + eks_managed_node_groups = { + outpost = { + name = local.name + } + } + + tags = local.tags +} + +################################################################################ +# Supporting Resources +################################################################################ + +data "aws_outposts_outposts" "this" {} + +data "aws_subnets" "this" { + filter { + name = "outpost-arn" + values = [tolist(data.aws_outposts_outposts.this.arns)[0]] + } +} + +module "vpc_cni_irsa" { + source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" + version = "~> 5.0" + + role_name_prefix = "VPC-CNI-IRSA" + attach_vpc_cni_policy = true + vpc_cni_enable_ipv4 = true + + oidc_providers = { + main = { + provider_arn = module.eks.oidc_provider_arn + namespace_service_accounts = ["kube-system:aws-node"] + } + } + + tags = local.tags +} diff --git a/examples/outposts/outputs.tf b/examples/outposts/outputs.tf new file mode 100644 index 0000000000..6e31908292 --- /dev/null +++ b/examples/outposts/outputs.tf @@ -0,0 +1,206 @@ +################################################################################ +# Cluster +################################################################################ + +output "cluster_arn" { + description = "The Amazon Resource Name (ARN) of the cluster" + value = module.eks.cluster_arn +} + +output "cluster_certificate_authority_data" { + description = "Base64 encoded certificate data required to communicate with the cluster" + value = module.eks.cluster_certificate_authority_data +} + +output "cluster_endpoint" { + description = "Endpoint for your Kubernetes API server" + value = module.eks.cluster_endpoint +} + +output "cluster_id" { + description = "The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready" + value = module.eks.cluster_id +} + +output "cluster_oidc_issuer_url" { + description = "The URL on the EKS cluster for the OpenID Connect identity provider" + value = module.eks.cluster_oidc_issuer_url +} + +output "cluster_platform_version" { + description = "Platform version for the cluster" + value = module.eks.cluster_platform_version +} + +output "cluster_status" { + description = "Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED`" + value = module.eks.cluster_status +} + +output "cluster_primary_security_group_id" { + description = "Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console" + value = module.eks.cluster_primary_security_group_id +} + +################################################################################ +# KMS Key +################################################################################ + +output "kms_key_arn" { + description = "The Amazon Resource Name (ARN) of the key" + value = module.eks.kms_key_arn +} + +output "kms_key_id" { + description = "The globally unique identifier for the key" + value = module.eks.kms_key_id +} + +output "kms_key_policy" { + description = "The IAM resource policy set on the key" + value = module.eks.kms_key_policy +} + +################################################################################ +# Security Group +################################################################################ + +output "cluster_security_group_arn" { + description = "Amazon Resource Name (ARN) of the cluster security group" + value = module.eks.cluster_security_group_arn +} + +output "cluster_security_group_id" { + description = "ID of the cluster security group" + value = module.eks.cluster_security_group_id +} + +################################################################################ +# Node Security Group +################################################################################ + +output "node_security_group_arn" { + description = "Amazon Resource Name (ARN) of the node shared security group" + value = module.eks.node_security_group_arn +} + +output "node_security_group_id" { + description = "ID of the node shared security group" + value = module.eks.node_security_group_id +} + +################################################################################ +# IRSA +################################################################################ + +output "oidc_provider" { + description = "The OpenID Connect identity provider (issuer URL without leading `https://`)" + value = module.eks.oidc_provider +} + +output "oidc_provider_arn" { + description = "The ARN of the OIDC Provider if `enable_irsa = true`" + value = module.eks.oidc_provider_arn +} + +output "cluster_tls_certificate_sha1_fingerprint" { + description = "The SHA1 fingerprint of the public key of the cluster's certificate" + value = module.eks.cluster_tls_certificate_sha1_fingerprint +} + +################################################################################ +# IAM Role +################################################################################ + +output "cluster_iam_role_name" { + description = "IAM role name of the EKS cluster" + value = module.eks.cluster_iam_role_name +} + +output "cluster_iam_role_arn" { + description = "IAM role ARN of the EKS cluster" + value = module.eks.cluster_iam_role_arn +} + +output "cluster_iam_role_unique_id" { + description = "Stable and unique string identifying the IAM role" + value = module.eks.cluster_iam_role_unique_id +} + +################################################################################ +# EKS Addons +################################################################################ + +output "cluster_addons" { + description = "Map of attribute maps for all EKS cluster addons enabled" + value = module.eks.cluster_addons +} + +################################################################################ +# EKS Identity Provider +################################################################################ + +output "cluster_identity_providers" { + description = "Map of attribute maps for all EKS identity providers enabled" + value = module.eks.cluster_identity_providers +} + +################################################################################ +# CloudWatch Log Group +################################################################################ + +output "cloudwatch_log_group_name" { + description = "Name of cloudwatch log group created" + value = module.eks.cloudwatch_log_group_name +} + +output "cloudwatch_log_group_arn" { + description = "Arn of cloudwatch log group created" + value = module.eks.cloudwatch_log_group_arn +} + +################################################################################ +# Fargate Profile +################################################################################ + +output "fargate_profiles" { + description = "Map of attribute maps for all EKS Fargate Profiles created" + value = module.eks.fargate_profiles +} + +################################################################################ +# EKS Managed Node Group +################################################################################ + +output "eks_managed_node_groups" { + description = "Map of attribute maps for all EKS managed node groups created" + value = module.eks.eks_managed_node_groups +} + +output "eks_managed_node_groups_autoscaling_group_names" { + description = "List of the autoscaling group names created by EKS managed node groups" + value = module.eks.eks_managed_node_groups_autoscaling_group_names +} + +################################################################################ +# Self Managed Node Group +################################################################################ + +output "self_managed_node_groups" { + description = "Map of attribute maps for all self managed node groups created" + value = module.eks.self_managed_node_groups +} + +output "self_managed_node_groups_autoscaling_group_names" { + description = "List of the autoscaling group names created by self-managed node groups" + value = module.eks.self_managed_node_groups_autoscaling_group_names +} + +################################################################################ +# Additional +################################################################################ + +output "aws_auth_configmap_yaml" { + description = "Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles" + value = module.eks.aws_auth_configmap_yaml +} diff --git a/examples/outposts/variables.tf b/examples/outposts/variables.tf new file mode 100644 index 0000000000..be8639ed0e --- /dev/null +++ b/examples/outposts/variables.tf @@ -0,0 +1,10 @@ +variable "region" { + description = "The AWS region to deploy into (e.g. us-east-1)" + type = string +} + +variable "outpost_instance_type" { + description = "Instance type supported by the Outposts instance" + type = string + default = "m5.large" +} diff --git a/examples/outposts/versions.tf b/examples/outposts/versions.tf new file mode 100644 index 0000000000..49f7eb81c2 --- /dev/null +++ b/examples/outposts/versions.tf @@ -0,0 +1,14 @@ +terraform { + required_version = ">= 1.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.34" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.10" + } + } +} diff --git a/examples/self_managed_node_group/README.md b/examples/self_managed_node_group/README.md index f0a20c2342..f1bb474b54 100644 --- a/examples/self_managed_node_group/README.md +++ b/examples/self_managed_node_group/README.md @@ -26,14 +26,14 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0 | -| [aws](#requirement\_aws) | >= 4.32 | +| [aws](#requirement\_aws) | >= 4.34 | | [kubernetes](#requirement\_kubernetes) | >= 2.10 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 4.32 | +| [aws](#provider\_aws) | >= 4.34 | ## Modules diff --git a/examples/self_managed_node_group/versions.tf b/examples/self_managed_node_group/versions.tf index bd119cf38c..49f7eb81c2 100644 --- a/examples/self_managed_node_group/versions.tf +++ b/examples/self_managed_node_group/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = ">= 4.32" + version = ">= 4.34" } kubernetes = { source = "hashicorp/kubernetes" diff --git a/main.tf b/main.tf index eba77b9748..f6542fcdd9 100644 --- a/main.tf +++ b/main.tf @@ -5,6 +5,8 @@ locals { create = var.create && var.putin_khuylo cluster_role = try(aws_iam_role.this[0].arn, var.iam_role_arn) + + create_outposts_local_cluster = length(var.outpost_config) > 0 } ################################################################################ @@ -29,7 +31,7 @@ resource "aws_eks_cluster" "this" { dynamic "kubernetes_network_config" { # Not valid on Outposts - for_each = var.provision_on_outpost ? [1] : [] + for_each = local.create_outposts_local_cluster ? [] : [1] content { ip_family = var.cluster_ip_family @@ -39,7 +41,7 @@ resource "aws_eks_cluster" "this" { } dynamic "outpost_config" { - for_each = var.provision_on_outpost ? [var.outpost_config] : [] + for_each = local.create_outposts_local_cluster ? [var.outpost_config] : [] content { control_plane_instance_type = outpost_config.value.control_plane_instance_type @@ -49,7 +51,7 @@ resource "aws_eks_cluster" "this" { dynamic "encryption_config" { # Not available on Outposts - for_each = length(var.cluster_encryption_config) > 0 && !var.provision_on_outpost ? [var.cluster_encryption_config] : [] + for_each = length(var.cluster_encryption_config) > 0 && !local.create_outposts_local_cluster ? [var.cluster_encryption_config] : [] content { provider { @@ -109,7 +111,7 @@ module "kms" { source = "terraform-aws-modules/kms/aws" version = "1.1.0" # Note - be mindful of Terraform/provider version compatibility between modules - create = local.create && var.create_kms_key && !var.provision_on_outpost # not valid on Outposts + create = local.create && var.create_kms_key && !local.create_outposts_local_cluster # not valid on Outposts description = coalesce(var.kms_key_description, "${var.cluster_name} cluster encryption key") key_usage = "ENCRYPT_DECRYPT" @@ -259,7 +261,7 @@ data "aws_iam_policy_document" "assume_role_policy" { principals { type = "Service" - identifiers = ["eks.${local.dns_suffix}"] + identifiers = local.create_outposts_local_cluster ? ["outposts.eks-local.${local.dns_suffix}"] : ["eks.${local.dns_suffix}"] } } } diff --git a/modules/eks-managed-node-group/README.md b/modules/eks-managed-node-group/README.md index 699d016f11..2619fecd07 100644 --- a/modules/eks-managed-node-group/README.md +++ b/modules/eks-managed-node-group/README.md @@ -119,7 +119,7 @@ module "eks_managed_node_group" { | [disable\_api\_termination](#input\_disable\_api\_termination) | If true, enables EC2 instance termination protection | `bool` | `null` | no | | [disk\_size](#input\_disk\_size) | Disk size in GiB for nodes. Defaults to `20`. Only valid when `use_custom_launch_template` = `false` | `number` | `null` | no | | [ebs\_optimized](#input\_ebs\_optimized) | If true, the launched EC2 instance(s) will be EBS-optimized | `bool` | `null` | no | -| [elastic\_gpu\_specifications](#input\_elastic\_gpu\_specifications) | The elastic GPU to attach to the instance | `map(string)` | `{}` | no | +| [elastic\_gpu\_specifications](#input\_elastic\_gpu\_specifications) | The elastic GPU to attach to the instance | `any` | `{}` | no | | [elastic\_inference\_accelerator](#input\_elastic\_inference\_accelerator) | Configuration block containing an Elastic Inference Accelerator to attach to the instance | `map(string)` | `{}` | no | | [enable\_bootstrap\_user\_data](#input\_enable\_bootstrap\_user\_data) | Determines whether the bootstrap configurations are populated within the user data template. Only valid when using a custom AMI via `ami_id` | `bool` | `false` | no | | [enable\_monitoring](#input\_enable\_monitoring) | Enables/disables detailed monitoring | `bool` | `true` | no | @@ -146,7 +146,7 @@ module "eks_managed_node_group" { | [launch\_template\_tags](#input\_launch\_template\_tags) | A map of additional tags to add to the tag\_specifications of launch template created | `map(string)` | `{}` | no | | [launch\_template\_use\_name\_prefix](#input\_launch\_template\_use\_name\_prefix) | Determines whether to use `launch_template_name` as is or create a unique name beginning with the `launch_template_name` as the prefix | `bool` | `true` | no | | [launch\_template\_version](#input\_launch\_template\_version) | Launch template version number. The default is `$Default` | `string` | `null` | no | -| [license\_specifications](#input\_license\_specifications) | A list of license specifications to associate with | `map(string)` | `{}` | no | +| [license\_specifications](#input\_license\_specifications) | A map of license specifications to associate with | `any` | `{}` | no | | [maintenance\_options](#input\_maintenance\_options) | The maintenance options for the instance | `any` | `{}` | no | | [max\_size](#input\_max\_size) | Maximum number of instances/nodes | `number` | `3` | no | | [metadata\_options](#input\_metadata\_options) | Customize the metadata options for the instance | `map(string)` |
{
"http_endpoint": "enabled",
"http_put_response_hop_limit": 2,
"http_tokens": "required"
}
| no | diff --git a/modules/eks-managed-node-group/variables.tf b/modules/eks-managed-node-group/variables.tf index 27b79c9576..d2790e1acb 100644 --- a/modules/eks-managed-node-group/variables.tf +++ b/modules/eks-managed-node-group/variables.tf @@ -200,7 +200,7 @@ variable "credit_specification" { variable "elastic_gpu_specifications" { description = "The elastic GPU to attach to the instance" - type = map(string) + type = any default = {} } @@ -229,8 +229,8 @@ variable "maintenance_options" { } variable "license_specifications" { - description = "A list of license specifications to associate with" - type = map(string) + description = "A map of license specifications to associate with" + type = any default = {} } diff --git a/variables.tf b/variables.tf index 75a8eaa516..fade3d5979 100644 --- a/variables.tf +++ b/variables.tf @@ -92,12 +92,6 @@ variable "cluster_service_ipv6_cidr" { default = null } -variable "provision_on_outpost" { - description = "Determines whether cluster should be provisioned on an AWS Outpost" - type = bool - default = false -} - variable "outpost_config" { description = "Configuration for the AWS Outpost to provision the cluster on" type = any @@ -239,13 +233,13 @@ variable "cloudwatch_log_group_kms_key_id" { ################################################################################ variable "create_cluster_security_group" { - description = "Determines if a security group is created for the cluster or use the existing `cluster_security_group_id`" + description = "Determines if a security group is created for the cluster. Note: the EKS service creates a primary security group for the cluster by default" type = bool default = true } variable "cluster_security_group_id" { - description = "Existing security group ID to be attached to the cluster. Required if `create_cluster_security_group` = `false`" + description = "Existing security group ID to be attached to the cluster" type = string default = "" } From 2b694828072439bb125ae3db57c78516709a45b7 Mon Sep 17 00:00:00 2001 From: Bryant Biggs Date: Fri, 28 Oct 2022 10:09:17 -0400 Subject: [PATCH 26/33] fix: Updates from testing with Outposts, still need final remote validation hence the git push --- examples/outposts/README.md | 40 +++-- examples/outposts/main.tf | 166 ++++++++++++----- examples/outposts/outputs.tf | 79 --------- examples/outposts/prerequisites/main.tf | 177 +++++++++++++++++++ examples/outposts/prerequisites/outputs.tf | 4 + examples/outposts/prerequisites/variables.tf | 5 + examples/outposts/prerequisites/versions.tf | 10 ++ examples/outposts/variables.tf | 7 +- main.tf | 41 +++-- node_groups.tf | 4 +- 10 files changed, 373 insertions(+), 160 deletions(-) create mode 100644 examples/outposts/prerequisites/main.tf create mode 100644 examples/outposts/prerequisites/outputs.tf create mode 100644 examples/outposts/prerequisites/variables.tf create mode 100644 examples/outposts/prerequisites/versions.tf diff --git a/examples/outposts/README.md b/examples/outposts/README.md index 7e6ebd4928..7de18c21cc 100644 --- a/examples/outposts/README.md +++ b/examples/outposts/README.md @@ -10,10 +10,25 @@ Note: This example requires an an AWS Outpost to provision. To run this example you need to: -1. Copy the `terraform.tfvars.example` to `terraform.tfvars` and fill in the required variables -2. Execute: +1. Deploy the remote host where the cluster will be provisioned from. The remote host is required since only private access is permitted to clusters created on Outposts. If you have access to the network where Outposts are provisioned (VPN, etc.), you can skip this step: ```bash +$ cd prerequisites +$ terraform init +$ terraform plan +$ terraform apply +``` + +2. If provisioning using the remote host deployed in step 1, connect to the remote host using SSM. Note, you will need to have the [SSM plugin for the AWS CLI installed](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-working-with-install-plugin.html). You can use the output generated by step 1 to connect: + +```bash +$ aws ssm start-session --region --target +``` + +3. Once connected to the remote host, navigate to the cloned project example directory and deploy the example: + +```bash +$ cd $HOME/terraform-aws-eks/examples/outposts $ terraform init $ terraform plan $ terraform apply @@ -35,27 +50,31 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Version | |------|---------| | [aws](#provider\_aws) | >= 4.34 | +| [kubernetes](#provider\_kubernetes) | >= 2.10 | ## Modules | Name | Source | Version | |------|--------|---------| | [eks](#module\_eks) | ../.. | n/a | -| [vpc\_cni\_irsa](#module\_vpc\_cni\_irsa) | terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks | ~> 5.0 | ## Resources | Name | Type | |------|------| +| [kubernetes_storage_class_v1.this](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/storage_class_v1) | resource | +| [aws_outposts_outpost_instance_types.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/outposts_outpost_instance_types) | data source | | [aws_outposts_outposts.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/outposts_outposts) | data source | +| [aws_subnet.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/subnet) | data source | +| [aws_subnets.lookup](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/subnets) | data source | | [aws_subnets.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/subnets) | data source | +| [aws_vpc.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/vpc) | data source | ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| [outpost\_instance\_type](#input\_outpost\_instance\_type) | Instance type supported by the Outposts instance | `string` | `"m5.large"` | no | -| [region](#input\_region) | The AWS region to deploy into (e.g. us-east-1) | `string` | n/a | yes | +| [region](#input\_region) | The AWS region to deploy into (e.g. us-east-1) | `string` | `"us-west-2"` | no | ## Outputs @@ -64,7 +83,6 @@ Note that this example may create resources which cost money. Run `terraform des | [aws\_auth\_configmap\_yaml](#output\_aws\_auth\_configmap\_yaml) | Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles | | [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created | | [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created | -| [cluster\_addons](#output\_cluster\_addons) | Map of attribute maps for all EKS cluster addons enabled | | [cluster\_arn](#output\_cluster\_arn) | The Amazon Resource Name (ARN) of the cluster | | [cluster\_certificate\_authority\_data](#output\_cluster\_certificate\_authority\_data) | Base64 encoded certificate data required to communicate with the cluster | | [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for your Kubernetes API server | @@ -72,24 +90,14 @@ Note that this example may create resources which cost money. Run `terraform des | [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster | | [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role | | [cluster\_id](#output\_cluster\_id) | The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready | -| [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled | | [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider | | [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster | | [cluster\_primary\_security\_group\_id](#output\_cluster\_primary\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console | | [cluster\_security\_group\_arn](#output\_cluster\_security\_group\_arn) | Amazon Resource Name (ARN) of the cluster security group | | [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | ID of the cluster security group | | [cluster\_status](#output\_cluster\_status) | Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED` | -| [cluster\_tls\_certificate\_sha1\_fingerprint](#output\_cluster\_tls\_certificate\_sha1\_fingerprint) | The SHA1 fingerprint of the public key of the cluster's certificate | -| [eks\_managed\_node\_groups](#output\_eks\_managed\_node\_groups) | Map of attribute maps for all EKS managed node groups created | -| [eks\_managed\_node\_groups\_autoscaling\_group\_names](#output\_eks\_managed\_node\_groups\_autoscaling\_group\_names) | List of the autoscaling group names created by EKS managed node groups | -| [fargate\_profiles](#output\_fargate\_profiles) | Map of attribute maps for all EKS Fargate Profiles created | -| [kms\_key\_arn](#output\_kms\_key\_arn) | The Amazon Resource Name (ARN) of the key | -| [kms\_key\_id](#output\_kms\_key\_id) | The globally unique identifier for the key | -| [kms\_key\_policy](#output\_kms\_key\_policy) | The IAM resource policy set on the key | | [node\_security\_group\_arn](#output\_node\_security\_group\_arn) | Amazon Resource Name (ARN) of the node shared security group | | [node\_security\_group\_id](#output\_node\_security\_group\_id) | ID of the node shared security group | -| [oidc\_provider](#output\_oidc\_provider) | The OpenID Connect identity provider (issuer URL without leading `https://`) | -| [oidc\_provider\_arn](#output\_oidc\_provider\_arn) | The ARN of the OIDC Provider if `enable_irsa = true` | | [self\_managed\_node\_groups](#output\_self\_managed\_node\_groups) | Map of attribute maps for all self managed node groups created | | [self\_managed\_node\_groups\_autoscaling\_group\_names](#output\_self\_managed\_node\_groups\_autoscaling\_group\_names) | List of the autoscaling group names created by self-managed node groups | diff --git a/examples/outposts/main.tf b/examples/outposts/main.tf index bee2f31aa1..ea4d5dc64c 100644 --- a/examples/outposts/main.tf +++ b/examples/outposts/main.tf @@ -9,8 +9,7 @@ provider "kubernetes" { exec { api_version = "client.authentication.k8s.io/v1beta1" command = "aws" - # This requires the awscli to be installed locally where Terraform is executed - args = ["eks", "get-token", "--cluster-name", module.eks.cluster_id] + args = ["eks", "get-token", "--cluster-id", module.eks.cluster_id, "--region", var.region] } } @@ -18,6 +17,9 @@ locals { name = "ex-${basename(path.cwd)}" cluster_version = "1.21" # Required by EKS on Outposts + outpost_arn = element(tolist(data.aws_outposts_outposts.this.arns), 0) + instance_type = element(tolist(data.aws_outposts_outpost_instance_types.this.instance_types), 0) + tags = { Example = local.name GithubRepo = "terraform-aws-eks" @@ -32,84 +34,158 @@ locals { module "eks" { source = "../.." - cluster_name = local.name - cluster_version = local.cluster_version - cluster_endpoint_public_access = true + cluster_name = local.name + cluster_version = local.cluster_version + + cluster_endpoint_public_access = false # Not available on Outpost + cluster_endpoint_private_access = true + + vpc_id = data.aws_vpc.this.id + subnet_ids = data.aws_subnets.this.ids outpost_config = { - control_plane_instance_type = var.outpost_instance_type - outpost_arns = [tolist(data.aws_outposts_outposts.this.arns)[0]] + control_plane_instance_type = local.instance_type + outpost_arns = [local.outpost_arn] } - cluster_addons = { - coredns = { - most_recent = true + # Extend cluster security group rules + cluster_security_group_additional_rules = { + ingress_vpc_https = { + description = "Remote host to control plane" + protocol = "tcp" + from_port = 443 + to_port = 443 + type = "ingress" + cidr_blocks = [data.aws_vpc.this.cidr_block] } - kube-proxy = { - most_recent = true + inress_nodes_ephemeral_ports_tcp = { + description = "To node 1025-65535" + protocol = "tcp" + from_port = 1025 + to_port = 65535 + type = "ingress" + source_node_security_group = true } - vpc-cni = { - most_recent = true - service_account_role_arn = module.vpc_cni_irsa.iam_role_arn + egress_nodes_ephemeral_ports_tcp = { + description = "To node 1025-65535" + protocol = "tcp" + from_port = 1025 + to_port = 65535 + type = "egress" + source_node_security_group = true } } - # Encryption key - create_kms_key = true - cluster_encryption_config = { - resources = ["secrets"] + # Extend node-to-node security group rules + node_security_group_additional_rules = { + ingress_vpc_all = { + description = "VPC" + protocol = "-1" + from_port = 0 + to_port = 0 + type = "ingress" + cidr_blocks = [data.aws_vpc.this.cidr_block] + } + ingress_self_all = { + description = "Node to node all ports/protocols" + protocol = "-1" + from_port = 0 + to_port = 0 + type = "ingress" + self = true + } + egress_all = { + description = "Node all egress" + protocol = "-1" + from_port = 0 + to_port = 0 + type = "egress" + cidr_blocks = ["0.0.0.0/0"] + ipv6_cidr_blocks = ["::/0"] + } } - kms_key_deletion_window_in_days = 7 - enable_kms_key_rotation = true - - create_cluster_security_group = false - create_node_security_group = false - subnet_ids = [tolist(data.aws_subnets.this.ids)[0]] - - manage_aws_auth_configmap = true - - eks_managed_node_group_defaults = { - instance_types = [var.outpost_instance_type] + self_managed_node_group_defaults = { attach_cluster_primary_security_group = true + + iam_role_additional_policies = { + AmazonSSMManagedInstanceCore = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore" + } } - eks_managed_node_groups = { + self_managed_node_groups = { outpost = { name = local.name + + min_size = 2 + max_size = 5 + desired_size = 3 + instance_type = local.instance_type } } + # We need to add the node group IAM role to the aws-auth configmap + create_aws_auth_configmap = true + tags = local.tags } +resource "kubernetes_storage_class_v1" "this" { + metadata { + name = "ebs-sc" + annotations = { + "storageclass.kubernetes.io/is-default-class" = "true" + } + } + + storage_provisioner = "ebs.csi.aws.com" + volume_binding_mode = "WaitForFirstConsumer" + allow_volume_expansion = true + + parameters = { + type = "gp2" + encrypted = "true" + } +} + ################################################################################ # Supporting Resources ################################################################################ data "aws_outposts_outposts" "this" {} -data "aws_subnets" "this" { +data "aws_outposts_outpost_instance_types" "this" { + arn = local.outpost_arn +} + +# This just grabs the first Outpost and returns its subnets +data "aws_subnets" "lookup" { filter { name = "outpost-arn" - values = [tolist(data.aws_outposts_outposts.this.arns)[0]] + values = [local.outpost_arn] } } -module "vpc_cni_irsa" { - source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" - version = "~> 5.0" +# This grabs a single subnet to reverse lookup those that belong to same VPC +# This is whats used for the cluster +data "aws_subnet" "this" { + id = element(tolist(data.aws_subnets.lookup.ids), 0) +} - role_name_prefix = "VPC-CNI-IRSA" - attach_vpc_cni_policy = true - vpc_cni_enable_ipv4 = true +# These are subnets for the Outpost and restricted to the same VPC +# This is whats used for the cluster +data "aws_subnets" "this" { + filter { + name = "outpost-arn" + values = [local.outpost_arn] + } - oidc_providers = { - main = { - provider_arn = module.eks.oidc_provider_arn - namespace_service_accounts = ["kube-system:aws-node"] - } + filter { + name = "vpc-id" + values = [data.aws_subnet.this.vpc_id] } +} - tags = local.tags +data "aws_vpc" "this" { + id = data.aws_subnet.this.vpc_id } diff --git a/examples/outposts/outputs.tf b/examples/outposts/outputs.tf index 6e31908292..b71fd86379 100644 --- a/examples/outposts/outputs.tf +++ b/examples/outposts/outputs.tf @@ -42,25 +42,6 @@ output "cluster_primary_security_group_id" { value = module.eks.cluster_primary_security_group_id } -################################################################################ -# KMS Key -################################################################################ - -output "kms_key_arn" { - description = "The Amazon Resource Name (ARN) of the key" - value = module.eks.kms_key_arn -} - -output "kms_key_id" { - description = "The globally unique identifier for the key" - value = module.eks.kms_key_id -} - -output "kms_key_policy" { - description = "The IAM resource policy set on the key" - value = module.eks.kms_key_policy -} - ################################################################################ # Security Group ################################################################################ @@ -89,25 +70,6 @@ output "node_security_group_id" { value = module.eks.node_security_group_id } -################################################################################ -# IRSA -################################################################################ - -output "oidc_provider" { - description = "The OpenID Connect identity provider (issuer URL without leading `https://`)" - value = module.eks.oidc_provider -} - -output "oidc_provider_arn" { - description = "The ARN of the OIDC Provider if `enable_irsa = true`" - value = module.eks.oidc_provider_arn -} - -output "cluster_tls_certificate_sha1_fingerprint" { - description = "The SHA1 fingerprint of the public key of the cluster's certificate" - value = module.eks.cluster_tls_certificate_sha1_fingerprint -} - ################################################################################ # IAM Role ################################################################################ @@ -127,24 +89,6 @@ output "cluster_iam_role_unique_id" { value = module.eks.cluster_iam_role_unique_id } -################################################################################ -# EKS Addons -################################################################################ - -output "cluster_addons" { - description = "Map of attribute maps for all EKS cluster addons enabled" - value = module.eks.cluster_addons -} - -################################################################################ -# EKS Identity Provider -################################################################################ - -output "cluster_identity_providers" { - description = "Map of attribute maps for all EKS identity providers enabled" - value = module.eks.cluster_identity_providers -} - ################################################################################ # CloudWatch Log Group ################################################################################ @@ -159,29 +103,6 @@ output "cloudwatch_log_group_arn" { value = module.eks.cloudwatch_log_group_arn } -################################################################################ -# Fargate Profile -################################################################################ - -output "fargate_profiles" { - description = "Map of attribute maps for all EKS Fargate Profiles created" - value = module.eks.fargate_profiles -} - -################################################################################ -# EKS Managed Node Group -################################################################################ - -output "eks_managed_node_groups" { - description = "Map of attribute maps for all EKS managed node groups created" - value = module.eks.eks_managed_node_groups -} - -output "eks_managed_node_groups_autoscaling_group_names" { - description = "List of the autoscaling group names created by EKS managed node groups" - value = module.eks.eks_managed_node_groups_autoscaling_group_names -} - ################################################################################ # Self Managed Node Group ################################################################################ diff --git a/examples/outposts/prerequisites/main.tf b/examples/outposts/prerequisites/main.tf new file mode 100644 index 0000000000..e3f765e120 --- /dev/null +++ b/examples/outposts/prerequisites/main.tf @@ -0,0 +1,177 @@ +provider "aws" { + region = var.region +} + +locals { + name = "ex-${basename(path.cwd)}" + + terraform_version = "1.3.3" + + outpost_arn = element(tolist(data.aws_outposts_outposts.this.arns), 0) + instance_type = element(tolist(data.aws_outposts_outpost_instance_types.this.instance_types), 0) + + tags = { + Example = local.name + GithubRepo = "terraform-aws-eks" + GithubOrg = "terraform-aws-modules" + } +} + +################################################################################ +# Pre-Requisites +################################################################################ + +resource "aws_iam_role" "ec2" { + name_prefix = "${local.name}-bastion" + + # Using admin to be able to provision resources from remote host + managed_policy_arns = ["arn:aws:iam::aws:policy/AdministratorAccess"] + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Action = "sts:AssumeRole" + Effect = "Allow" + Principal = { + Service = "ec2.amazonaws.com" + } + }, + ] + }) + + tags = local.tags +} + +resource "aws_iam_instance_profile" "ec2" { + name = "${local.name}-bastion" + role = aws_iam_role.ec2.name +} + +data "aws_ssm_parameter" "al2" { + name = "/aws/service/ami-amazon-linux-latest/amzn2-ami-hvm-x86_64-gp2" +} + +module "ssm_bastion_ec2" { + source = "terraform-aws-modules/ec2-instance/aws" + version = "~> 4.0" + + name = "${local.name}-bastion" + + ami = data.aws_ssm_parameter.al2.value + instance_type = local.instance_type + iam_instance_profile = aws_iam_instance_profile.ec2.name + + user_data = <<-EOT + #!/bin/bash + + # Add ssm-user since it won't exist until first login + adduser -m ssm-user + tee /etc/sudoers.d/ssm-agent-users <<'EOF' + # User rules for ssm-user + ssm-user ALL=(ALL) NOPASSWD:ALL + EOF + chmod 440 /etc/sudoers.d/ssm-agent-users + + cd /home/ssm-user + + # Install git to clone repo + yum install git -y + + # Install Terraform + curl -sSO https://releases.hashicorp.com/terraform/${local.terraform_version}/terraform_${local.terraform_version}_linux_amd64.zip + sudo unzip -qq terraform_${local.terraform_version}_linux_amd64.zip terraform -d /usr/bin/ + rm terraform_${local.terraform_version}_linux_amd64.zip 2> /dev/null + + # Install kubectl + curl -LO https://dl.k8s.io/release/v1.21.0/bin/linux/amd64/kubectl + install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl + + # Remove default awscli which is v1 - we want latest v2 + yum remove awscli -y + curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" + unzip -qq awscliv2.zip + ./aws/install + + # Clone repo + git clone https://github.com/bryantbiggs/terraform-aws-eks.git \ + && cd /home/ssm-user/terraform-aws-eks \ + && git checkout refactor/v19 + + chown -R ssm-user:ssm-user /home/ssm-user/ + EOT + + vpc_security_group_ids = [module.bastion_security_group.security_group_id] + subnet_id = element(data.aws_subnets.this.ids, 0) + + tags = local.tags +} + +module "bastion_security_group" { + source = "terraform-aws-modules/security-group/aws" + version = "~> 4.0" + + name = "${local.name}-bastion" + description = "Security group to allow provisioning ${local.name} EKS local cluster on Outposts" + vpc_id = data.aws_vpc.this.id + + ingress_with_cidr_blocks = [ + { + from_port = 443 + to_port = 443 + protocol = "tcp" + cidr_blocks = data.aws_vpc.this.cidr_block + }, + ] + egress_with_cidr_blocks = [ + { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = "0.0.0.0/0" + }, + ] + + tags = local.tags +} + +################################################################################ +# Supporting Resources +################################################################################ + +data "aws_outposts_outposts" "this" {} + +data "aws_outposts_outpost_instance_types" "this" { + arn = local.outpost_arn +} + +# This just grabs the first Outpost and returns its subnets +data "aws_subnets" "lookup" { + filter { + name = "outpost-arn" + values = [local.outpost_arn] + } +} + +# This grabs a single subnet to reverse lookup those that belong to same VPC +# This is whats used for the cluster +data "aws_subnet" "this" { + id = element(tolist(data.aws_subnets.lookup.ids), 0) +} + +# These are subnets for the Outpost and restricted to the same VPC +# This is whats used for the cluster +data "aws_subnets" "this" { + filter { + name = "outpost-arn" + values = [local.outpost_arn] + } + + filter { + name = "vpc-id" + values = [data.aws_subnet.this.vpc_id] + } +} + +data "aws_vpc" "this" { + id = data.aws_subnet.this.vpc_id +} diff --git a/examples/outposts/prerequisites/outputs.tf b/examples/outposts/prerequisites/outputs.tf new file mode 100644 index 0000000000..f2ff81ab70 --- /dev/null +++ b/examples/outposts/prerequisites/outputs.tf @@ -0,0 +1,4 @@ +output "ssm_start_session" { + description = "SSM start session command to connect to remote host created" + value = "aws ssm start-session --region ${var.region} --target ${module.ssm_bastion_ec2.id}" +} diff --git a/examples/outposts/prerequisites/variables.tf b/examples/outposts/prerequisites/variables.tf new file mode 100644 index 0000000000..47945c8501 --- /dev/null +++ b/examples/outposts/prerequisites/variables.tf @@ -0,0 +1,5 @@ +variable "region" { + description = "The AWS region to deploy into (e.g. us-east-1)" + type = string + default = "us-west-2" +} diff --git a/examples/outposts/prerequisites/versions.tf b/examples/outposts/prerequisites/versions.tf new file mode 100644 index 0000000000..5f058b4c11 --- /dev/null +++ b/examples/outposts/prerequisites/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.34" + } + } +} diff --git a/examples/outposts/variables.tf b/examples/outposts/variables.tf index be8639ed0e..47945c8501 100644 --- a/examples/outposts/variables.tf +++ b/examples/outposts/variables.tf @@ -1,10 +1,5 @@ variable "region" { description = "The AWS region to deploy into (e.g. us-east-1)" type = string -} - -variable "outpost_instance_type" { - description = "Instance type supported by the Outposts instance" - type = string - default = "m5.large" + default = "us-west-2" } diff --git a/main.tf b/main.tf index f6542fcdd9..652ae9f0fa 100644 --- a/main.tf +++ b/main.tf @@ -6,7 +6,8 @@ locals { cluster_role = try(aws_iam_role.this[0].arn, var.iam_role_arn) - create_outposts_local_cluster = length(var.outpost_config) > 0 + create_outposts_local_cluster = length(var.outpost_config) > 0 + enable_cluster_encryption_config = length(var.cluster_encryption_config) > 0 && !local.create_outposts_local_cluster } ################################################################################ @@ -26,7 +27,7 @@ resource "aws_eks_cluster" "this" { subnet_ids = coalescelist(var.control_plane_subnet_ids, var.subnet_ids) endpoint_private_access = var.cluster_endpoint_private_access endpoint_public_access = var.cluster_endpoint_public_access - public_access_cidrs = var.cluster_endpoint_public_access_cidrs + public_access_cidrs = var.cluster_endpoint_public_access ? var.cluster_endpoint_public_access_cidrs : [] } dynamic "kubernetes_network_config" { @@ -51,7 +52,7 @@ resource "aws_eks_cluster" "this" { dynamic "encryption_config" { # Not available on Outposts - for_each = length(var.cluster_encryption_config) > 0 && !local.create_outposts_local_cluster ? [var.cluster_encryption_config] : [] + for_each = local.enable_cluster_encryption_config ? [var.cluster_encryption_config] : [] content { provider { @@ -218,13 +219,15 @@ resource "aws_security_group_rule" "cluster" { ################################################################################ data "tls_certificate" "this" { - count = local.create && var.enable_irsa ? 1 : 0 + # Not available on outposts + count = local.create && var.enable_irsa && !local.create_outposts_local_cluster ? 1 : 0 url = aws_eks_cluster.this[0].identity[0].oidc[0].issuer } resource "aws_iam_openid_connect_provider" "oidc_provider" { - count = local.create && var.enable_irsa ? 1 : 0 + # Not available on outposts + count = local.create && var.enable_irsa && !local.create_outposts_local_cluster ? 1 : 0 client_id_list = distinct(compact(concat(["sts.${local.dns_suffix}"], var.openid_connect_audiences))) thumbprint_list = concat([data.tls_certificate.this[0].certificates[0].sha1_fingerprint], var.custom_oidc_thumbprints) @@ -261,7 +264,18 @@ data "aws_iam_policy_document" "assume_role_policy" { principals { type = "Service" - identifiers = local.create_outposts_local_cluster ? ["outposts.eks-local.${local.dns_suffix}"] : ["eks.${local.dns_suffix}"] + identifiers = ["eks.${local.dns_suffix}"] + } + + dynamic "principals" { + for_each = local.create_outposts_local_cluster ? [1] : [] + + content { + type = "Service" + identifiers = [ + "ec2.${local.dns_suffix}", + ] + } } } } @@ -307,7 +321,7 @@ resource "aws_iam_role" "this" { # Policies attached ref https://docs.aws.amazon.com/eks/latest/userguide/service_IAM_role.html resource "aws_iam_role_policy_attachment" "this" { for_each = { for k, v in { - AmazonEKSClusterPolicy = "${local.iam_role_policy_prefix}/AmazonEKSClusterPolicy", + AmazonEKSClusterPolicy = local.create_outposts_local_cluster ? "${local.iam_role_policy_prefix}/AmazonEKSLocalOutpostClusterPolicy" : "${local.iam_role_policy_prefix}/AmazonEKSClusterPolicy", AmazonEKSVPCResourceController = "${local.iam_role_policy_prefix}/AmazonEKSVPCResourceController", } : k => v if local.create_iam_role } @@ -324,14 +338,16 @@ resource "aws_iam_role_policy_attachment" "additional" { # Using separate attachment due to `The "for_each" value depends on resource attributes that cannot be determined until apply` resource "aws_iam_role_policy_attachment" "cluster_encryption" { - count = local.create_iam_role && var.attach_cluster_encryption_policy && length(var.cluster_encryption_config) > 0 ? 1 : 0 + # Encryption config not available on Outposts + count = local.create_iam_role && var.attach_cluster_encryption_policy && local.enable_cluster_encryption_config ? 1 : 0 policy_arn = aws_iam_policy.cluster_encryption[0].arn role = aws_iam_role.this[0].name } resource "aws_iam_policy" "cluster_encryption" { - count = local.create_iam_role && var.attach_cluster_encryption_policy && length(var.cluster_encryption_config) > 0 ? 1 : 0 + # Encryption config not available on Outposts + count = local.create_iam_role && var.attach_cluster_encryption_policy && local.enable_cluster_encryption_config ? 1 : 0 name = var.cluster_encryption_policy_use_name_prefix ? null : local.cluster_encryption_policy_name name_prefix = var.cluster_encryption_policy_use_name_prefix ? local.cluster_encryption_policy_name : null @@ -362,7 +378,8 @@ resource "aws_iam_policy" "cluster_encryption" { ################################################################################ resource "aws_eks_addon" "this" { - for_each = { for k, v in var.cluster_addons : k => v if local.create } + # Not supported on outposts + for_each = { for k, v in var.cluster_addons : k => v if local.create && !local.create_outposts_local_cluster } cluster_name = aws_eks_cluster.this[0].name addon_name = try(each.value.name, each.key) @@ -388,7 +405,7 @@ resource "aws_eks_addon" "this" { } data "aws_eks_addon_version" "this" { - for_each = { for k, v in var.cluster_addons : k => v if local.create } + for_each = { for k, v in var.cluster_addons : k => v if local.create && !local.create_outposts_local_cluster } addon_name = try(each.value.name, each.key) kubernetes_version = coalesce(var.cluster_version, aws_eks_cluster.this[0].version) @@ -401,7 +418,7 @@ data "aws_eks_addon_version" "this" { ################################################################################ resource "aws_eks_identity_provider_config" "this" { - for_each = { for k, v in var.cluster_identity_providers : k => v if local.create } + for_each = { for k, v in var.cluster_identity_providers : k => v if local.create && !local.create_outposts_local_cluster } cluster_name = aws_eks_cluster.this[0].name diff --git a/node_groups.tf b/node_groups.tf index 168152a37e..8e8d4407b7 100644 --- a/node_groups.tf +++ b/node_groups.tf @@ -207,7 +207,7 @@ resource "aws_security_group_rule" "node" { module "fargate_profile" { source = "./modules/fargate-profile" - for_each = { for k, v in var.fargate_profiles : k => v if var.create } + for_each = { for k, v in var.fargate_profiles : k => v if var.create && !local.create_outposts_local_cluster } create = try(each.value.create, true) @@ -243,7 +243,7 @@ module "fargate_profile" { module "eks_managed_node_group" { source = "./modules/eks-managed-node-group" - for_each = { for k, v in var.eks_managed_node_groups : k => v if var.create } + for_each = { for k, v in var.eks_managed_node_groups : k => v if var.create && !local.create_outposts_local_cluster } create = try(each.value.create, true) From 15d28369097b2bc080cf290c92223321907ba8e4 Mon Sep 17 00:00:00 2001 From: Bryant Biggs Date: Tue, 1 Nov 2022 12:19:23 -0400 Subject: [PATCH 27/33] feat: Update defaults; simplify security group settings and forward compatibilityf or cluster ID vs name --- .github/images/security_groups.svg | 2 +- README.md | 17 +++-- docs/UPGRADE-19.0.md | 59 +++++++-------- examples/complete/README.md | 3 +- examples/complete/main.tf | 40 ++++------ examples/complete/outputs.tf | 7 +- examples/eks_managed_node_group/README.md | 3 +- examples/eks_managed_node_group/main.tf | 56 +------------- examples/eks_managed_node_group/outputs.tf | 7 +- examples/fargate_profile/README.md | 3 +- examples/fargate_profile/main.tf | 16 +--- examples/fargate_profile/outputs.tf | 7 +- examples/outposts/README.md | 3 +- examples/outposts/main.tf | 49 +------------ examples/outposts/outputs.tf | 7 +- examples/self_managed_node_group/README.md | 1 - examples/self_managed_node_group/main.tf | 54 +------------- main.tf | 25 ++----- node_groups.tf | 85 ++++++++++------------ outputs.tf | 7 +- variables.tf | 20 ++--- 21 files changed, 152 insertions(+), 319 deletions(-) diff --git a/.github/images/security_groups.svg b/.github/images/security_groups.svg index 33fce81d9b..6012962597 100644 --- a/.github/images/security_groups.svg +++ b/.github/images/security_groups.svg @@ -1 +1 @@ - + diff --git a/README.md b/README.md index c3960b48e3..a01b52f879 100644 --- a/README.md +++ b/README.md @@ -83,11 +83,13 @@ module "eks" { cluster_addons = { coredns = { - resolve_conflicts = "OVERWRITE" + most_recent = true + } + kube-proxy = { + most_recent = true } - kube-proxy = {} vpc-cni = { - resolve_conflicts = "OVERWRITE" + most_recent = true } } @@ -134,7 +136,6 @@ module "eks" { # EKS Managed Node Group(s) eks_managed_node_group_defaults = { - disk_size = 50 instance_types = ["m6i.large", "m5.large", "m5n.large", "m5zn.large"] } @@ -286,7 +287,7 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | [cluster\_additional\_security\_group\_ids](#input\_cluster\_additional\_security\_group\_ids) | List of additional, externally created security group IDs to attach to the cluster control plane | `list(string)` | `[]` | no | | [cluster\_addons](#input\_cluster\_addons) | Map of cluster addon configurations to enable for the cluster. Addon name can be the map keys or set with `name` | `any` | `{}` | no | | [cluster\_addons\_timeouts](#input\_cluster\_addons\_timeouts) | Create, update, and delete timeout configurations for the cluster addons | `map(string)` | `{}` | no | -| [cluster\_enabled\_log\_types](#input\_cluster\_enabled\_log\_types) | A list of the desired control plane logs to enable. For more information, see Amazon EKS Control Plane Logging documentation (https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html) | `list(string)` |
[
"audit",
"api",
"authenticator"
]
| no | +| [cluster\_enabled\_log\_types](#input\_cluster\_enabled\_log\_types) | A list of the desired control plane logs to enable. For more information, see Amazon EKS Control Plane Logging documentation (https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html) | `list(string)` |
[
"audit",
"api",
"authenticator",
"controllerManager"
]
| no | | [cluster\_encryption\_config](#input\_cluster\_encryption\_config) | Configuration block with encryption configuration for the cluster | `any` |
{
"resources": [
"secrets"
]
}
| no | | [cluster\_encryption\_policy\_description](#input\_cluster\_encryption\_policy\_description) | Description of the cluster encryption policy created | `string` | `"Cluster encryption policy to allow cluster role to utilize CMK provided"` | no | | [cluster\_encryption\_policy\_name](#input\_cluster\_encryption\_policy\_name) | Name to use on cluster encryption policy created | `string` | `null` | no | @@ -349,10 +350,9 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | [manage\_aws\_auth\_configmap](#input\_manage\_aws\_auth\_configmap) | Determines whether to manage the aws-auth configmap | `bool` | `false` | no | | [node\_security\_group\_additional\_rules](#input\_node\_security\_group\_additional\_rules) | List of additional security group rules to add to the node security group created. Set `source_cluster_security_group = true` inside rules to set the `cluster_security_group` as source | `any` | `{}` | no | | [node\_security\_group\_description](#input\_node\_security\_group\_description) | Description of the node security group created | `string` | `"EKS node shared security group"` | no | +| [node\_security\_group\_enable\_recommended\_rules](#input\_node\_security\_group\_enable\_recommended\_rules) | Determines whether to enable recommended security group rules for the node security group created. This includes node-to-node TCP ingress on ephemeral ports and allows all egress traffic | `bool` | `true` | no | | [node\_security\_group\_id](#input\_node\_security\_group\_id) | ID of an existing security group to attach to the node groups created | `string` | `""` | no | | [node\_security\_group\_name](#input\_node\_security\_group\_name) | Name to use on node security group created | `string` | `null` | no | -| [node\_security\_group\_ntp\_ipv4\_cidr\_block](#input\_node\_security\_group\_ntp\_ipv4\_cidr\_block) | IPv4 CIDR block to allow NTP egress. Default is public IP space, but [Amazon Time Sync Service](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/set-time.html) can be used as well with `["169.254.169.123/32"]` | `list(string)` |
[
"169.254.169.123/32"
]
| no | -| [node\_security\_group\_ntp\_ipv6\_cidr\_block](#input\_node\_security\_group\_ntp\_ipv6\_cidr\_block) | IPv4 CIDR block to allow NTP egress. Default is public IP space, but [Amazon Time Sync Service](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/set-time.html) can be used as well with `["fd00:ec2::123/128"]` | `list(string)` |
[
"fd00:ec2::123/128"
]
| no | | [node\_security\_group\_tags](#input\_node\_security\_group\_tags) | A map of additional tags to add to the node security group created | `map(string)` | `{}` | no | | [node\_security\_group\_use\_name\_prefix](#input\_node\_security\_group\_use\_name\_prefix) | Determines whether node security group name (`node_security_group_name`) is used as a prefix | `bool` | `true` | no | | [openid\_connect\_audiences](#input\_openid\_connect\_audiences) | List of OpenID Connect audience client IDs to add to the IRSA provider | `list(string)` | `[]` | no | @@ -379,8 +379,9 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster | | [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster | | [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role | -| [cluster\_id](#output\_cluster\_id) | The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready | +| [cluster\_id](#output\_cluster\_id) | The ID of the EKS cluster | | [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled | +| [cluster\_name](#output\_cluster\_name) | The name of the EKS cluster | | [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider | | [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster | | [cluster\_primary\_security\_group\_id](#output\_cluster\_primary\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console | diff --git a/docs/UPGRADE-19.0.md b/docs/UPGRADE-19.0.md index 7a5d5dcc52..6119e0535c 100644 --- a/docs/UPGRADE-19.0.md +++ b/docs/UPGRADE-19.0.md @@ -18,13 +18,16 @@ Please consult the `examples` directory for reference example configurations. If - Support for setting `preserve` as well as `most_recent` on addons. - `preserve` indicates if you want to preserve the created resources when deleting the EKS add-on - `most_recent` indicates if you want to use the most recent revision of the add-on or the default version (default) +- Support for setting default node security group rules for common access patterns required: + - Egress all for `0.0.0.0/0`/`::/0` + - Ingress from cluster security group for 8443/TCP and 9443/TCP for common applications such as ALB Ingress Controller, Karpenter, OPA Gatekeeper, etc. These are commonly used as webhook ports for validating and mutating webhooks ### Modified - `cluster_security_group_additional_rules` and `node_security_group_additional_rules` have been modified to use `lookup()` instead of `try()` to avoid the well known issue of [unkown values within a `for_each` loop](https://github.com/hashicorp/terraform/issues/4149) +- Default cluster security group rules have removed egress rules for TCP/443 and TCP/10250 to node groups since the cluster primary security group includes a default rule for ALL to `0.0.0.0/0`/`::/0` +- Default node security group rules have removed egress rules have been removed since the default security group settings have egress rule for ALL to `0.0.0.0/0`/`::/0` - `block_device_mappings` previously required a map of maps but has since changed to an array of maps. Users can remove the outer key for each block device mapping and replace the outermost map `{}` with an array `[]`. There are no state changes required for this change. -- `node_security_group_ntp_ipv4_cidr_block` previously defaulted to `["0.0.0.0/0"]` and now defaults to `["169.254.169.123/32"]` (Referenc: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/set-time.html) -- `node_security_group_ntp_ipv6_cidr_block` previously defaulted to `["::/0"]` and now defaults to `["fd00:ec2::123/128"]` (Referenc: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/set-time.html) - `create_kms_key` previously defaulted to `false` and now defaults to `true`. Clusters created with this module now default to enabling secret encryption by default with a customer managed KMS key created by this module - `cluster_encryption_config` previously used a type of `list(any)` and now uses a type of `any` -> users can simply remove the outer `[`...`]` brackets on `v19.x` - `cluster_encryption_config` previously defaulted to `[]` and now defaults to `{resources = ["secrets"]}` to encrypt secrets by default @@ -53,6 +56,9 @@ Please consult the `examples` directory for reference example configurations. If 1. Removed variables: + - `node_security_group_ntp_ipv4_cidr_block` - default security group settings have egress rule for ALL to `0.0.0.0/0`/`::/0` + - `node_security_group_ntp_ipv6_cidr_block` - default security group settings have egress rule for ALL to `0.0.0.0/0`/`::/0` + - Self managed node groups: - `create_security_group` - `security_group_name` @@ -82,6 +88,7 @@ Please consult the `examples` directory for reference example configurations. If - `outpost_config` for Outposts support - `cluster_addons_timeouts` for setting a common set of timeouts for all addons (unless a specific value is provided within the addon configuration) - `service_ipv6_cidr` for setting the IPv6 CIDR block for the Kubernetes service addresses + - `node_security_group_enable_recommended_rules` for enabling recommended node security group rules for common access patterns - Self managed node groups: - `launch_template_id` for use when using an existing/externally created launch template (Ref: https://github.com/terraform-aws-modules/terraform-aws-autoscaling/pull/204) @@ -112,7 +119,7 @@ Please consult the `examples` directory for reference example configurations. If 6. Added outputs: - - N/A + - `cluster_name` - The `cluster_id` currently set by the AWS provider is actually the cluster name, but in the future this will change and there will be a distinction between the `cluster_name` and `clsuter_id`. [Reference](https://github.com/hashicorp/terraform-provider-aws/issues/27560) ## Upgrade Migrations @@ -205,38 +212,26 @@ EKS managed node groups on `v18.x` by default create a security group that does subnet_ids = module.vpc.private_subnets control_plane_subnet_ids = module.vpc.intra_subnets - # Extend cluster security group rules - cluster_security_group_additional_rules = { - egress_nodes_ephemeral_ports_tcp = { - description = "To node 1025-65535" - protocol = "tcp" - from_port = 1025 - to_port = 65535 - type = "egress" - source_node_security_group = true - } - } - # Extend node-to-node security group rules - node_security_group_ntp_ipv4_cidr_block = ["169.254.169.123/32"] # now the default node_security_group_additional_rules = { - ingress_self_all = { - description = "Node to node all ports/protocols" - protocol = "-1" - from_port = 0 - to_port = 0 - type = "ingress" - self = true - } - egress_all = { - description = "Node all egress" - protocol = "-1" - from_port = 0 - to_port = 0 - type = "egress" - cidr_blocks = ["0.0.0.0/0"] - ipv6_cidr_blocks = ["::/0"] - } +- ingress_self_ephemeral = { +- description = "Node to node ephemeral ports" +- protocol = "tcp" +- from_port = 0 +- to_port = 0 +- type = "ingress" +- self = true +- } +- egress_all = { +- description = "Node all egress" +- protocol = "-1" +- from_port = 0 +- to_port = 0 +- type = "egress" +- cidr_blocks = ["0.0.0.0/0"] +- ipv6_cidr_blocks = ["::/0"] +- } } # Self Managed Node Group(s) diff --git a/examples/complete/README.md b/examples/complete/README.md index 515961219a..0c690660ca 100644 --- a/examples/complete/README.md +++ b/examples/complete/README.md @@ -83,8 +83,9 @@ No inputs. | [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster | | [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster | | [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role | -| [cluster\_id](#output\_cluster\_id) | The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready | +| [cluster\_id](#output\_cluster\_id) | The ID of the EKS cluster | | [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled | +| [cluster\_name](#output\_cluster\_name) | The name of the EKS cluster | | [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider | | [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster | | [cluster\_security\_group\_arn](#output\_cluster\_security\_group\_arn) | Amazon Resource Name (ARN) of the cluster security group | diff --git a/examples/complete/main.tf b/examples/complete/main.tf index 46e2ee26dc..423f2d6c82 100644 --- a/examples/complete/main.tf +++ b/examples/complete/main.tf @@ -16,7 +16,7 @@ provider "kubernetes" { api_version = "client.authentication.k8s.io/v1beta1" command = "aws" # This requires the awscli to be installed locally where Terraform is executed - args = ["eks", "get-token", "--cluster-name", module.eks.cluster_id] + args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] } } @@ -82,18 +82,17 @@ module "eks" { # Extend cluster security group rules cluster_security_group_additional_rules = { - egress_nodes_ephemeral_ports_tcp = { - description = "To node 1025-65535" + ingress_nodes_ephemeral_ports_tcp = { + description = "Nodes on ephemeral ports" protocol = "tcp" from_port = 1025 to_port = 65535 - type = "egress" + type = "ingress" source_node_security_group = true } } # Extend node-to-node security group rules - node_security_group_ntp_ipv4_cidr_block = ["169.254.169.123/32"] node_security_group_additional_rules = { ingress_self_all = { description = "Node to node all ports/protocols" @@ -103,15 +102,6 @@ module "eks" { type = "ingress" self = true } - egress_all = { - description = "Node all egress" - protocol = "-1" - from_port = 0 - to_port = 0 - type = "egress" - cidr_blocks = ["0.0.0.0/0"] - ipv6_cidr_blocks = ["::/0"] - } } # Self Managed Node Group(s) @@ -224,12 +214,14 @@ module "eks" { } } - # OIDC Identity provider - cluster_identity_providers = { - sts = { - client_id = "sts.amazonaws.com" - } - } + # Create a new cluster where both an identity provider and Fargate profile is created + # will result in conflicts since only one can take place at a time + # # OIDC Identity provider + # cluster_identity_providers = { + # sts = { + # client_id = "sts.amazonaws.com" + # } + # } # aws-auth configmap manage_aws_auth_configmap = true @@ -279,7 +271,7 @@ module "eks_managed_node_group" { source = "../../modules/eks-managed-node-group" name = "separate-eks-mng" - cluster_name = module.eks.cluster_id + cluster_name = module.eks.cluster_name cluster_version = module.eks.cluster_version subnet_ids = module.vpc.private_subnets @@ -295,7 +287,7 @@ module "self_managed_node_group" { source = "../../modules/self-managed-node-group" name = "separate-self-mng" - cluster_name = module.eks.cluster_id + cluster_name = module.eks.cluster_name cluster_version = module.eks.cluster_version cluster_endpoint = module.eks.cluster_endpoint cluster_auth_base64 = module.eks.cluster_certificate_authority_data @@ -315,7 +307,7 @@ module "fargate_profile" { source = "../../modules/fargate-profile" name = "separate-fargate-profile" - cluster_name = module.eks.cluster_id + cluster_name = module.eks.cluster_name subnet_ids = module.vpc.private_subnets selectors = [{ @@ -403,7 +395,7 @@ resource "aws_security_group" "additional" { ] } - tags = local.tags + tags = merge(local.tags, { Name = "${local.name}-additional" }) } resource "aws_iam_policy" "additional" { diff --git a/examples/complete/outputs.tf b/examples/complete/outputs.tf index c6b06de812..c53b9099c8 100644 --- a/examples/complete/outputs.tf +++ b/examples/complete/outputs.tf @@ -18,10 +18,15 @@ output "cluster_endpoint" { } output "cluster_id" { - description = "The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready" + description = "The ID of the EKS cluster" value = module.eks.cluster_id } +output "cluster_name" { + description = "The name of the EKS cluster" + value = module.eks.cluster_name +} + output "cluster_oidc_issuer_url" { description = "The URL on the EKS cluster for the OpenID Connect identity provider" value = module.eks.cluster_oidc_issuer_url diff --git a/examples/eks_managed_node_group/README.md b/examples/eks_managed_node_group/README.md index 1d7ac1b200..5239aee780 100644 --- a/examples/eks_managed_node_group/README.md +++ b/examples/eks_managed_node_group/README.md @@ -110,8 +110,9 @@ No inputs. | [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster | | [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster | | [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role | -| [cluster\_id](#output\_cluster\_id) | The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready | +| [cluster\_id](#output\_cluster\_id) | The ID of the EKS cluster | | [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled | +| [cluster\_name](#output\_cluster\_name) | The name of the EKS cluster | | [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider | | [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster | | [cluster\_primary\_security\_group\_id](#output\_cluster\_primary\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console | diff --git a/examples/eks_managed_node_group/main.tf b/examples/eks_managed_node_group/main.tf index 23b681ac6d..67199f8c73 100644 --- a/examples/eks_managed_node_group/main.tf +++ b/examples/eks_managed_node_group/main.tf @@ -10,7 +10,7 @@ provider "kubernetes" { api_version = "client.authentication.k8s.io/v1beta1" command = "aws" # This requires the awscli to be installed locally where Terraform is executed - args = ["eks", "get-token", "--cluster-name", module.eks.cluster_id] + args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] } } @@ -56,7 +56,6 @@ module "eks" { cluster_addons = { coredns = { - preserve = true most_recent = true } kube-proxy = { @@ -68,61 +67,12 @@ module "eks" { } } - # Encryption key - create_kms_key = true - cluster_encryption_config = { - resources = ["secrets"] - } - kms_key_deletion_window_in_days = 7 - enable_kms_key_rotation = true - - cluster_tags = { - # This should not affect the name of the cluster primary security group - # Ref: https://github.com/terraform-aws-modules/terraform-aws-eks/pull/2006 - # Ref: https://github.com/terraform-aws-modules/terraform-aws-eks/pull/2008 - Name = local.name - } - vpc_id = module.vpc.vpc_id subnet_ids = module.vpc.private_subnets control_plane_subnet_ids = module.vpc.intra_subnets manage_aws_auth_configmap = true - # Extend cluster security group rules - cluster_security_group_additional_rules = { - egress_nodes_ephemeral_ports_tcp = { - description = "To node 1025-65535" - protocol = "tcp" - from_port = 1025 - to_port = 65535 - type = "egress" - source_node_security_group = true - } - } - - # Extend node-to-node security group rules - node_security_group_ntp_ipv6_cidr_block = ["fd00:ec2::123/128"] - node_security_group_additional_rules = { - ingress_self_all = { - description = "Node to node all ports/protocols" - protocol = "-1" - from_port = 0 - to_port = 0 - type = "ingress" - self = true - } - egress_all = { - description = "Node all egress" - protocol = "-1" - from_port = 0 - to_port = 0 - type = "egress" - cidr_blocks = ["0.0.0.0/0"] - ipv6_cidr_blocks = ["::/0"] - } - } - eks_managed_node_group_defaults = { ami_type = "AL2_x86_64" instance_types = ["m6i.large", "m5.large", "m5n.large", "m5zn.large"] @@ -419,7 +369,7 @@ resource "aws_security_group" "additional" { ] } - tags = local.tags + tags = merge(local.tags, { Name = "${local.name}-additional" }) } module "ebs_kms_key" { @@ -550,7 +500,7 @@ resource "aws_security_group" "remote_access" { ipv6_cidr_blocks = ["::/0"] } - tags = local.tags + tags = merge(local.tags, { Name = "${local.name}-remote" }) } resource "aws_iam_policy" "node_additional" { diff --git a/examples/eks_managed_node_group/outputs.tf b/examples/eks_managed_node_group/outputs.tf index 6e31908292..816f30368a 100644 --- a/examples/eks_managed_node_group/outputs.tf +++ b/examples/eks_managed_node_group/outputs.tf @@ -18,10 +18,15 @@ output "cluster_endpoint" { } output "cluster_id" { - description = "The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready" + description = "The ID of the EKS cluster" value = module.eks.cluster_id } +output "cluster_name" { + description = "The name of the EKS cluster" + value = module.eks.cluster_name +} + output "cluster_oidc_issuer_url" { description = "The URL on the EKS cluster for the OpenID Connect identity provider" value = module.eks.cluster_oidc_issuer_url diff --git a/examples/fargate_profile/README.md b/examples/fargate_profile/README.md index 5f250fdee1..67ce99e59d 100644 --- a/examples/fargate_profile/README.md +++ b/examples/fargate_profile/README.md @@ -70,8 +70,9 @@ No inputs. | [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster | | [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster | | [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role | -| [cluster\_id](#output\_cluster\_id) | The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready | +| [cluster\_id](#output\_cluster\_id) | The ID of the EKS cluster | | [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled | +| [cluster\_name](#output\_cluster\_name) | The name of the EKS cluster | | [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider | | [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster | | [cluster\_primary\_security\_group\_id](#output\_cluster\_primary\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console | diff --git a/examples/fargate_profile/main.tf b/examples/fargate_profile/main.tf index ca02d4ac18..f4277719cc 100644 --- a/examples/fargate_profile/main.tf +++ b/examples/fargate_profile/main.tf @@ -11,7 +11,7 @@ provider "helm" { api_version = "client.authentication.k8s.io/v1beta1" command = "aws" # This requires the awscli to be installed locally where Terraform is executed - args = ["eks", "get-token", "--cluster-name", module.eks.cluster_id] + args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] } } } @@ -48,14 +48,6 @@ module "eks" { vpc-cni = {} } - # Encryption key - create_kms_key = true - cluster_encryption_config = { - resources = ["secrets"] - } - kms_key_deletion_window_in_days = 7 - enable_kms_key_rotation = true - vpc_id = module.vpc.vpc_id subnet_ids = module.vpc.private_subnets control_plane_subnet_ids = module.vpc.intra_subnets @@ -117,7 +109,7 @@ module "eks" { ################################################################################ data "aws_eks_cluster_auth" "this" { - name = module.eks.cluster_id + name = module.eks.cluster_name } locals { @@ -126,7 +118,7 @@ locals { kind = "Config" current-context = "terraform" clusters = [{ - name = module.eks.cluster_id + name = module.eks.cluster_name cluster = { certificate-authority-data = module.eks.cluster_certificate_authority_data server = module.eks.cluster_endpoint @@ -135,7 +127,7 @@ locals { contexts = [{ name = "terraform" context = { - cluster = module.eks.cluster_id + cluster = module.eks.cluster_name user = "terraform" } }] diff --git a/examples/fargate_profile/outputs.tf b/examples/fargate_profile/outputs.tf index 6e31908292..816f30368a 100644 --- a/examples/fargate_profile/outputs.tf +++ b/examples/fargate_profile/outputs.tf @@ -18,10 +18,15 @@ output "cluster_endpoint" { } output "cluster_id" { - description = "The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready" + description = "The ID of the EKS cluster" value = module.eks.cluster_id } +output "cluster_name" { + description = "The name of the EKS cluster" + value = module.eks.cluster_name +} + output "cluster_oidc_issuer_url" { description = "The URL on the EKS cluster for the OpenID Connect identity provider" value = module.eks.cluster_oidc_issuer_url diff --git a/examples/outposts/README.md b/examples/outposts/README.md index 7de18c21cc..d2012aa0e5 100644 --- a/examples/outposts/README.md +++ b/examples/outposts/README.md @@ -89,7 +89,8 @@ Note that this example may create resources which cost money. Run `terraform des | [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster | | [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster | | [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role | -| [cluster\_id](#output\_cluster\_id) | The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready | +| [cluster\_id](#output\_cluster\_id) | The ID of the EKS cluster | +| [cluster\_name](#output\_cluster\_name) | The name of the EKS cluster | | [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider | | [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster | | [cluster\_primary\_security\_group\_id](#output\_cluster\_primary\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console | diff --git a/examples/outposts/main.tf b/examples/outposts/main.tf index ea4d5dc64c..4b98278b9e 100644 --- a/examples/outposts/main.tf +++ b/examples/outposts/main.tf @@ -9,7 +9,7 @@ provider "kubernetes" { exec { api_version = "client.authentication.k8s.io/v1beta1" command = "aws" - args = ["eks", "get-token", "--cluster-id", module.eks.cluster_id, "--region", var.region] + args = ["eks", "get-token", "--cluster-id", module.eks.cluster_id, "--region", var.region] } } @@ -58,58 +58,13 @@ module "eks" { type = "ingress" cidr_blocks = [data.aws_vpc.this.cidr_block] } - inress_nodes_ephemeral_ports_tcp = { - description = "To node 1025-65535" - protocol = "tcp" - from_port = 1025 - to_port = 65535 - type = "ingress" - source_node_security_group = true - } - egress_nodes_ephemeral_ports_tcp = { - description = "To node 1025-65535" - protocol = "tcp" - from_port = 1025 - to_port = 65535 - type = "egress" - source_node_security_group = true - } - } - - # Extend node-to-node security group rules - node_security_group_additional_rules = { - ingress_vpc_all = { - description = "VPC" - protocol = "-1" - from_port = 0 - to_port = 0 - type = "ingress" - cidr_blocks = [data.aws_vpc.this.cidr_block] - } - ingress_self_all = { - description = "Node to node all ports/protocols" - protocol = "-1" - from_port = 0 - to_port = 0 - type = "ingress" - self = true - } - egress_all = { - description = "Node all egress" - protocol = "-1" - from_port = 0 - to_port = 0 - type = "egress" - cidr_blocks = ["0.0.0.0/0"] - ipv6_cidr_blocks = ["::/0"] - } } self_managed_node_group_defaults = { attach_cluster_primary_security_group = true iam_role_additional_policies = { - AmazonSSMManagedInstanceCore = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore" + AmazonSSMManagedInstanceCore = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore" } } diff --git a/examples/outposts/outputs.tf b/examples/outposts/outputs.tf index b71fd86379..ebafe608e8 100644 --- a/examples/outposts/outputs.tf +++ b/examples/outposts/outputs.tf @@ -18,10 +18,15 @@ output "cluster_endpoint" { } output "cluster_id" { - description = "The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready" + description = "The ID of the EKS cluster" value = module.eks.cluster_id } +output "cluster_name" { + description = "The name of the EKS cluster" + value = module.eks.cluster_name +} + output "cluster_oidc_issuer_url" { description = "The URL on the EKS cluster for the OpenID Connect identity provider" value = module.eks.cluster_oidc_issuer_url diff --git a/examples/self_managed_node_group/README.md b/examples/self_managed_node_group/README.md index f1bb474b54..e488d9c9dd 100644 --- a/examples/self_managed_node_group/README.md +++ b/examples/self_managed_node_group/README.md @@ -50,7 +50,6 @@ Note that this example may create resources which cost money. Run `terraform des |------|------| | [aws_ec2_capacity_reservation.targeted](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ec2_capacity_reservation) | resource | | [aws_iam_policy.node_additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | -| [aws_kms_key.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource | | [aws_security_group.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | | [aws_ami.eks_default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source | | [aws_ami.eks_default_bottlerocket](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source | diff --git a/examples/self_managed_node_group/main.tf b/examples/self_managed_node_group/main.tf index 1131150d41..3314981712 100644 --- a/examples/self_managed_node_group/main.tf +++ b/examples/self_managed_node_group/main.tf @@ -10,7 +10,7 @@ provider "kubernetes" { api_version = "client.authentication.k8s.io/v1beta1" command = "aws" # This requires the awscli to be installed locally where Terraform is executed - args = ["eks", "get-token", "--cluster-name", module.eks.cluster_id] + args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] } } @@ -45,7 +45,6 @@ module "eks" { cluster_addons = { coredns = { - preserve = true most_recent = true } kube-proxy = { @@ -56,14 +55,6 @@ module "eks" { } } - # Encryption key - create_kms_key = true - cluster_encryption_config = { - resources = ["secrets"] - } - kms_key_deletion_window_in_days = 7 - enable_kms_key_rotation = true - vpc_id = module.vpc.vpc_id subnet_ids = module.vpc.private_subnets control_plane_subnet_ids = module.vpc.intra_subnets @@ -72,39 +63,6 @@ module "eks" { create_aws_auth_configmap = true manage_aws_auth_configmap = true - # Extend cluster security group rules - cluster_security_group_additional_rules = { - egress_nodes_ephemeral_ports_tcp = { - description = "To node 1025-65535" - protocol = "tcp" - from_port = 1025 - to_port = 65535 - type = "egress" - source_node_security_group = true - } - } - - # Extend node-to-node security group rules - node_security_group_additional_rules = { - ingress_self_all = { - description = "Node to node all ports/protocols" - protocol = "-1" - from_port = 0 - to_port = 0 - type = "ingress" - self = true - } - egress_all = { - description = "Node all egress" - protocol = "-1" - from_port = 0 - to_port = 0 - type = "egress" - cidr_blocks = ["0.0.0.0/0"] - ipv6_cidr_blocks = ["::/0"] - } - } - self_managed_node_group_defaults = { # enable discovery of autoscaling groups by cluster-autoscaler autoscaling_group_tags = { @@ -351,15 +309,7 @@ resource "aws_security_group" "additional" { ] } - tags = local.tags -} - -resource "aws_kms_key" "eks" { - description = "EKS Secret Encryption Key" - deletion_window_in_days = 7 - enable_key_rotation = true - - tags = local.tags + tags = merge(local.tags, { Name = "${local.name}-additional" }) } data "aws_ami" "eks_default" { diff --git a/main.tf b/main.tf index 652ae9f0fa..8342c2e96d 100644 --- a/main.tf +++ b/main.tf @@ -146,7 +146,7 @@ locals { cluster_security_group_id = local.create_cluster_sg ? aws_security_group.cluster[0].id : var.cluster_security_group_id # Do not add rules to node security group if the module is not creating it - cluster_security_group_rules = local.create_node_sg ? { + cluster_security_group_rules = { for k, v in { ingress_nodes_443 = { description = "Node groups to cluster API" protocol = "tcp" @@ -155,23 +155,7 @@ locals { type = "ingress" source_node_security_group = true } - egress_nodes_443 = { - description = "Cluster API to node groups" - protocol = "tcp" - from_port = 443 - to_port = 443 - type = "egress" - source_node_security_group = true - } - egress_nodes_kubelet = { - description = "Cluster API to node kubelets" - protocol = "tcp" - from_port = 10250 - to_port = 10250 - type = "egress" - source_node_security_group = true - } - } : {} + } : k => v if local.create_node_sg } } resource "aws_security_group" "cluster" { @@ -194,7 +178,10 @@ resource "aws_security_group" "cluster" { } resource "aws_security_group_rule" "cluster" { - for_each = { for k, v in merge(local.cluster_security_group_rules, var.cluster_security_group_additional_rules) : k => v if local.create_cluster_sg } + for_each = { for k, v in merge( + local.cluster_security_group_rules, + var.cluster_security_group_additional_rules + ) : k => v if local.create_cluster_sg } # Required security_group_id = aws_security_group.cluster[0].id diff --git a/node_groups.tf b/node_groups.tf index 8e8d4407b7..36f071610c 100644 --- a/node_groups.tf +++ b/node_groups.tf @@ -72,14 +72,6 @@ locals { node_security_group_id = local.create_node_sg ? aws_security_group.node[0].id : var.node_security_group_id node_security_group_rules = { - egress_cluster_443 = { - description = "Node groups to cluster API" - protocol = "tcp" - from_port = 443 - to_port = 443 - type = "egress" - source_cluster_security_group = true - } ingress_cluster_443 = { description = "Cluster API to node groups" protocol = "tcp" @@ -104,58 +96,51 @@ locals { type = "ingress" self = true } - egress_self_coredns_tcp = { - description = "Node to node CoreDNS" - protocol = "tcp" - from_port = 53 - to_port = 53 - type = "egress" - self = true - } ingress_self_coredns_udp = { - description = "Node to node CoreDNS" + description = "Node to node CoreDNS UDP" protocol = "udp" from_port = 53 to_port = 53 type = "ingress" self = true } - egress_self_coredns_udp = { - description = "Node to node CoreDNS" - protocol = "udp" - from_port = 53 - to_port = 53 - type = "egress" + } + + node_secuirty_group_recommended_rules = { for k, v in { + ingress_nodes_ephemeral = { + description = "Node to node ingress on ephemeral ports" + protocol = "tcp" + from_port = 1025 + to_port = 65535 + type = "ingress" self = true } - egress_https = { - description = "Egress all HTTPS to internet" - protocol = "tcp" - from_port = 443 - to_port = 443 - type = "egress" - cidr_blocks = ["0.0.0.0/0"] - ipv6_cidr_blocks = var.cluster_ip_family == "ipv6" ? ["::/0"] : null + ingress_cluster_8443_webhook = { + description = "Cluster API to node 8443/tcp webhook" + protocol = "tcp" + from_port = 8443 + to_port = 8443 + type = "ingress" + source_cluster_security_group = true } - egress_ntp_tcp = { - description = "Egress NTP/TCP to internet" - protocol = "tcp" - from_port = 123 - to_port = 123 - type = "egress" - cidr_blocks = var.node_security_group_ntp_ipv4_cidr_block - ipv6_cidr_blocks = var.cluster_ip_family == "ipv6" ? var.node_security_group_ntp_ipv6_cidr_block : null + ingress_cluster_9443_webhook = { + description = "Cluster API to node 9443/tcp webhook" + protocol = "tcp" + from_port = 9443 + to_port = 9443 + type = "ingress" + source_cluster_security_group = true } - egress_ntp_udp = { - description = "Egress NTP/UDP to internet" - protocol = "udp" - from_port = 123 - to_port = 123 + egress_all = { + description = "Allow all egress" + protocol = "-1" + from_port = 0 + to_port = 0 type = "egress" - cidr_blocks = var.node_security_group_ntp_ipv4_cidr_block - ipv6_cidr_blocks = var.cluster_ip_family == "ipv6" ? var.node_security_group_ntp_ipv6_cidr_block : null + cidr_blocks = ["0.0.0.0/0"] + ipv6_cidr_blocks = var.cluster_ip_family == "ipv6" ? ["::/0"] : null } - } + } : k => v if var.node_security_group_enable_recommended_rules } } resource "aws_security_group" "node" { @@ -181,7 +166,11 @@ resource "aws_security_group" "node" { } resource "aws_security_group_rule" "node" { - for_each = { for k, v in merge(local.node_security_group_rules, var.node_security_group_additional_rules) : k => v if local.create_node_sg } + for_each = { for k, v in merge( + local.node_security_group_rules, + local.node_secuirty_group_recommended_rules, + var.node_security_group_additional_rules, + ) : k => v if local.create_node_sg } # Required security_group_id = aws_security_group.node[0].id diff --git a/outputs.tf b/outputs.tf index 7fb6d8ce98..9764fccf37 100644 --- a/outputs.tf +++ b/outputs.tf @@ -18,10 +18,15 @@ output "cluster_endpoint" { } output "cluster_id" { - description = "The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready" + description = "The ID of the EKS cluster" value = try(aws_eks_cluster.this[0].id, "") } +output "cluster_name" { + description = "The name of the EKS cluster" + value = try(aws_eks_cluster.this[0].name, "") +} + output "cluster_oidc_issuer_url" { description = "The URL on the EKS cluster for the OpenID Connect identity provider" value = try(aws_eks_cluster.this[0].identity[0].oidc[0].issuer, "") diff --git a/variables.tf b/variables.tf index fade3d5979..ddaa75b9b6 100644 --- a/variables.tf +++ b/variables.tf @@ -35,7 +35,7 @@ variable "cluster_version" { variable "cluster_enabled_log_types" { description = "A list of the desired control plane logs to enable. For more information, see Amazon EKS Control Plane Logging documentation (https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html)" type = list(string) - default = ["audit", "api", "authenticator"] + default = ["audit", "api", "authenticator", "controllerManager"] } variable "cluster_additional_security_group_ids" { @@ -330,24 +330,18 @@ variable "node_security_group_additional_rules" { default = {} } +variable "node_security_group_enable_recommended_rules" { + description = "Determines whether to enable recommended security group rules for the node security group created. This includes node-to-node TCP ingress on ephemeral ports and allows all egress traffic" + type = bool + default = true +} + variable "node_security_group_tags" { description = "A map of additional tags to add to the node security group created" type = map(string) default = {} } -variable "node_security_group_ntp_ipv4_cidr_block" { - description = "IPv4 CIDR block to allow NTP egress. Default is public IP space, but [Amazon Time Sync Service](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/set-time.html) can be used as well with `[\"169.254.169.123/32\"]`" - type = list(string) - default = ["169.254.169.123/32"] -} - -variable "node_security_group_ntp_ipv6_cidr_block" { - description = "IPv4 CIDR block to allow NTP egress. Default is public IP space, but [Amazon Time Sync Service](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/set-time.html) can be used as well with `[\"fd00:ec2::123/128\"]`" - type = list(string) - default = ["fd00:ec2::123/128"] -} - ################################################################################ # IRSA ################################################################################ From 63f2d89beb0bf50f45c063e9bb3b95e86b775cd8 Mon Sep 17 00:00:00 2001 From: Bryant Biggs Date: Tue, 8 Nov 2022 10:51:51 -0500 Subject: [PATCH 28/33] chore: Clean up and simplify examples --- examples/eks_managed_node_group/README.md | 3 - examples/eks_managed_node_group/main.tf | 149 --------------------- examples/fargate_profile/README.md | 1 - examples/fargate_profile/main.tf | 8 -- examples/outposts/prerequisites/main.tf | 46 +------ examples/self_managed_node_group/README.md | 3 +- examples/self_managed_node_group/main.tf | 27 +--- 7 files changed, 11 insertions(+), 226 deletions(-) diff --git a/examples/eks_managed_node_group/README.md b/examples/eks_managed_node_group/README.md index 5239aee780..ba1c3a26c6 100644 --- a/examples/eks_managed_node_group/README.md +++ b/examples/eks_managed_node_group/README.md @@ -81,10 +81,7 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Type | |------|------| -| [aws_autoscaling_group_tag.cluster_autoscaler_label_tags](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/autoscaling_group_tag) | resource | | [aws_iam_policy.node_additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | -| [aws_launch_template.external](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource | -| [aws_security_group.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | | [aws_security_group.remote_access](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | | [aws_ami.eks_default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source | | [aws_ami.eks_default_arm](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source | diff --git a/examples/eks_managed_node_group/main.tf b/examples/eks_managed_node_group/main.tf index 67199f8c73..90c442e5dc 100644 --- a/examples/eks_managed_node_group/main.tf +++ b/examples/eks_managed_node_group/main.tf @@ -159,13 +159,6 @@ module "eks" { EOT } - # Use existing/external launch template - external_lt = { - create_launch_template = false - launch_template_id = aws_launch_template.external.id - launch_template_version = aws_launch_template.external.default_version - } - # Use a custom AMI custom_ami = { ami_type = "AL2_ARM_64" @@ -247,7 +240,6 @@ module "eks" { description = "EKS managed node group example launch template" ebs_optimized = true - vpc_security_group_ids = [aws_security_group.additional.id] disable_api_termination = false enable_monitoring = true @@ -354,24 +346,6 @@ module "vpc_cni_irsa" { tags = local.tags } -resource "aws_security_group" "additional" { - name_prefix = "${local.name}-additional" - vpc_id = module.vpc.vpc_id - - ingress { - from_port = 22 - to_port = 22 - protocol = "tcp" - cidr_blocks = [ - "10.0.0.0/8", - "172.16.0.0/12", - "192.168.0.0/16", - ] - } - - tags = merge(local.tags, { Name = "${local.name}-additional" }) -} - module "ebs_kms_key" { source = "terraform-aws-modules/kms/aws" version = "~> 1.1" @@ -395,80 +369,6 @@ module "ebs_kms_key" { tags = local.tags } -# This is based on the LT that EKS would create if no custom one is specified (aws ec2 describe-launch-template-versions --launch-template-id xxx) -# there are several more options one could set but you probably dont need to modify them -# you can take the default and add your custom AMI and/or custom tags -# -# Trivia: AWS transparently creates a copy of your LaunchTemplate and actually uses that copy then for the node group. If you DONT use a custom AMI, -# then the default user-data for bootstrapping a cluster is merged in the copy. - -resource "aws_launch_template" "external" { - name_prefix = "external-eks-ex-" - description = "EKS managed node group external launch template" - update_default_version = true - - block_device_mappings { - device_name = "/dev/xvda" - - ebs { - volume_size = 100 - volume_type = "gp2" - delete_on_termination = true - } - } - - monitoring { - enabled = true - } - - # Disabling due to https://github.com/hashicorp/terraform-provider-aws/issues/23766 - # network_interfaces { - # associate_public_ip_address = false - # delete_on_termination = true - # } - - # if you want to use a custom AMI - # image_id = var.ami_id - - # If you use a custom AMI, you need to supply via user-data, the bootstrap script as EKS DOESNT merge its managed user-data then - # you can add more than the minimum code you see in the template, e.g. install SSM agent, see https://github.com/aws/containers-roadmap/issues/593#issuecomment-577181345 - # (optionally you can use https://registry.terraform.io/providers/hashicorp/cloudinit/latest/docs/data-sources/cloudinit_config to render the script, example: https://github.com/terraform-aws-modules/terraform-aws-eks/pull/997#issuecomment-705286151) - # user_data = base64encode(data.template_file.launch_template_userdata.rendered) - - tag_specifications { - resource_type = "instance" - - tags = { - Name = "external_lt" - CustomTag = "Instance custom tag" - } - } - - tag_specifications { - resource_type = "volume" - - tags = { - CustomTag = "Volume custom tag" - } - } - - tag_specifications { - resource_type = "network-interface" - - tags = { - CustomTag = "EKS example" - } - } - - tags = { - CustomTag = "Launch template custom tag" - } - - lifecycle { - create_before_destroy = true - } -} - module "key_pair" { source = "terraform-aws-modules/key-pair/aws" version = "~> 2.0" @@ -552,52 +452,3 @@ data "aws_ami" "eks_default_bottlerocket" { values = ["bottlerocket-aws-k8s-${local.cluster_version}-x86_64-*"] } } - -################################################################################ -# Tags for the ASG to support cluster-autoscaler scale up from 0 -################################################################################ - -locals { - - # We need to lookup K8s taint effect from the AWS API value - taint_effects = { - NO_SCHEDULE = "NoSchedule" - NO_EXECUTE = "NoExecute" - PREFER_NO_SCHEDULE = "PreferNoSchedule" - } - - cluster_autoscaler_label_tags = merge([ - for name, group in module.eks.eks_managed_node_groups : { - for label_name, label_value in coalesce(group.node_group_labels, {}) : "${name}|label|${label_name}" => { - autoscaling_group = group.node_group_autoscaling_group_names[0], - key = "k8s.io/cluster-autoscaler/node-template/label/${label_name}", - value = label_value, - } - } - ]...) - - cluster_autoscaler_taint_tags = merge([ - for name, group in module.eks.eks_managed_node_groups : { - for taint in coalesce(group.node_group_taints, []) : "${name}|taint|${taint.key}" => { - autoscaling_group = group.node_group_autoscaling_group_names[0], - key = "k8s.io/cluster-autoscaler/node-template/taint/${taint.key}" - value = "${taint.value}:${local.taint_effects[taint.effect]}" - } - } - ]...) - - cluster_autoscaler_asg_tags = merge(local.cluster_autoscaler_label_tags, local.cluster_autoscaler_taint_tags) -} - -resource "aws_autoscaling_group_tag" "cluster_autoscaler_label_tags" { - for_each = local.cluster_autoscaler_asg_tags - - autoscaling_group_name = each.value.autoscaling_group - - tag { - key = each.value.key - value = each.value.value - - propagate_at_launch = false - } -} diff --git a/examples/fargate_profile/README.md b/examples/fargate_profile/README.md index 67ce99e59d..2b68322079 100644 --- a/examples/fargate_profile/README.md +++ b/examples/fargate_profile/README.md @@ -44,7 +44,6 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Type | |------|------| | [aws_iam_policy.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | -| [aws_kms_key.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource | | [helm_release.coredns](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource | | [null_resource.modify_kube_dns](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | | [null_resource.remove_default_coredns_deployment](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | diff --git a/examples/fargate_profile/main.tf b/examples/fargate_profile/main.tf index f4277719cc..dbc672cdb0 100644 --- a/examples/fargate_profile/main.tf +++ b/examples/fargate_profile/main.tf @@ -262,14 +262,6 @@ module "vpc" { tags = local.tags } -resource "aws_kms_key" "eks" { - description = "EKS Secret Encryption Key" - deletion_window_in_days = 7 - enable_key_rotation = true - - tags = local.tags -} - resource "aws_iam_policy" "additional" { name = "${local.name}-additional" diff --git a/examples/outposts/prerequisites/main.tf b/examples/outposts/prerequisites/main.tf index e3f765e120..0717225ba1 100644 --- a/examples/outposts/prerequisites/main.tf +++ b/examples/outposts/prerequisites/main.tf @@ -7,8 +7,7 @@ locals { terraform_version = "1.3.3" - outpost_arn = element(tolist(data.aws_outposts_outposts.this.arns), 0) - instance_type = element(tolist(data.aws_outposts_outpost_instance_types.this.instance_types), 0) + outpost_arn = element(tolist(data.aws_outposts_outposts.this.arns), 0) tags = { Example = local.name @@ -21,45 +20,16 @@ locals { # Pre-Requisites ################################################################################ -resource "aws_iam_role" "ec2" { - name_prefix = "${local.name}-bastion" - - # Using admin to be able to provision resources from remote host - managed_policy_arns = ["arn:aws:iam::aws:policy/AdministratorAccess"] - assume_role_policy = jsonencode({ - Version = "2012-10-17" - Statement = [ - { - Action = "sts:AssumeRole" - Effect = "Allow" - Principal = { - Service = "ec2.amazonaws.com" - } - }, - ] - }) - - tags = local.tags -} - -resource "aws_iam_instance_profile" "ec2" { - name = "${local.name}-bastion" - role = aws_iam_role.ec2.name -} - -data "aws_ssm_parameter" "al2" { - name = "/aws/service/ami-amazon-linux-latest/amzn2-ami-hvm-x86_64-gp2" -} - module "ssm_bastion_ec2" { source = "terraform-aws-modules/ec2-instance/aws" - version = "~> 4.0" + version = "~> 4.2" name = "${local.name}-bastion" - ami = data.aws_ssm_parameter.al2.value - instance_type = local.instance_type - iam_instance_profile = aws_iam_instance_profile.ec2.name + create_iam_instance_profile = true + iam_role_policies = { + AdministratorAccess = "arn:aws:iam::aws:policy/AdministratorAccess" + } user_data = <<-EOT #!/bin/bash @@ -140,10 +110,6 @@ module "bastion_security_group" { data "aws_outposts_outposts" "this" {} -data "aws_outposts_outpost_instance_types" "this" { - arn = local.outpost_arn -} - # This just grabs the first Outpost and returns its subnets data "aws_subnets" "lookup" { filter { diff --git a/examples/self_managed_node_group/README.md b/examples/self_managed_node_group/README.md index e488d9c9dd..d4fa0bbca4 100644 --- a/examples/self_managed_node_group/README.md +++ b/examples/self_managed_node_group/README.md @@ -49,8 +49,7 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Type | |------|------| | [aws_ec2_capacity_reservation.targeted](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ec2_capacity_reservation) | resource | -| [aws_iam_policy.node_additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | -| [aws_security_group.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | +| [aws_iam_policy.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | | [aws_ami.eks_default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source | | [aws_ami.eks_default_bottlerocket](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source | | [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | diff --git a/examples/self_managed_node_group/main.tf b/examples/self_managed_node_group/main.tf index 3314981712..9f699d2dca 100644 --- a/examples/self_managed_node_group/main.tf +++ b/examples/self_managed_node_group/main.tf @@ -200,9 +200,8 @@ module "eks" { launch_template_use_name_prefix = true launch_template_description = "Self managed node group example launch template" - ebs_optimized = true - vpc_security_group_ids = [aws_security_group.additional.id] - enable_monitoring = true + ebs_optimized = true + enable_monitoring = true block_device_mappings = { xvda = { @@ -241,7 +240,7 @@ module "eks" { } iam_role_additional_policies = { AmazonEC2ContainerRegistryReadOnly = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" - additional = aws_iam_policy.node_additional.arn + additional = aws_iam_policy.additional.arn } timeouts = { @@ -294,24 +293,6 @@ module "vpc" { tags = local.tags } -resource "aws_security_group" "additional" { - name_prefix = "${local.name}-additional" - vpc_id = module.vpc.vpc_id - - ingress { - from_port = 22 - to_port = 22 - protocol = "tcp" - cidr_blocks = [ - "10.0.0.0/8", - "172.16.0.0/12", - "192.168.0.0/16", - ] - } - - tags = merge(local.tags, { Name = "${local.name}-additional" }) -} - data "aws_ami" "eks_default" { most_recent = true owners = ["amazon"] @@ -373,7 +354,7 @@ resource "aws_ec2_capacity_reservation" "targeted" { instance_match_criteria = "targeted" } -resource "aws_iam_policy" "node_additional" { +resource "aws_iam_policy" "additional" { name = "${local.name}-additional" description = "Example usage of node additional policy" From 43e57dda3acc788f53fbf080ae46437cb2e25eb7 Mon Sep 17 00:00:00 2001 From: Bryant Biggs Date: Sat, 3 Dec 2022 09:41:20 -0500 Subject: [PATCH 29/33] Update main.tf Co-authored-by: Anton Babenko --- main.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/main.tf b/main.tf index 8342c2e96d..1c3800b6b8 100644 --- a/main.tf +++ b/main.tf @@ -86,7 +86,7 @@ resource "aws_ec2_tag" "cluster_primary_security_group" { # Ref: https://github.com/terraform-aws-modules/terraform-aws-eks/pull/2006 # Ref: https://github.com/terraform-aws-modules/terraform-aws-eks/pull/2008 for_each = { for k, v in merge(var.tags, var.cluster_tags) : - k => v if local.create && k != "Name" && var.create_cluster_primary_security_group_tags + k => v if local.create && k != "Name" && var.create_cluster_primary_security_group_tags && v != null } resource_id = aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id From 0ed15dc1419767f6629be34140b50d8161adf066 Mon Sep 17 00:00:00 2001 From: Bryant Biggs Date: Sat, 3 Dec 2022 10:41:08 -0500 Subject: [PATCH 30/33] chore: Update AWS provider to support changes required by Outposts --- README.md | 7 ++++--- examples/complete/README.md | 4 ++-- examples/complete/versions.tf | 2 +- examples/eks_managed_node_group/README.md | 4 ++-- examples/eks_managed_node_group/versions.tf | 2 +- examples/fargate_profile/README.md | 4 ++-- examples/fargate_profile/main.tf | 5 +++-- examples/fargate_profile/versions.tf | 2 +- examples/karpenter/README.md | 4 ++-- examples/karpenter/versions.tf | 2 +- examples/outposts/README.md | 4 ++-- examples/outposts/main.tf | 3 ++- examples/outposts/versions.tf | 2 +- examples/self_managed_node_group/README.md | 4 ++-- examples/self_managed_node_group/versions.tf | 2 +- modules/eks-managed-node-group/README.md | 4 ++-- modules/eks-managed-node-group/versions.tf | 2 +- modules/fargate-profile/README.md | 4 ++-- modules/fargate-profile/versions.tf | 2 +- modules/karpenter/README.md | 6 +++--- modules/karpenter/versions.tf | 4 ++-- modules/self-managed-node-group/README.md | 4 ++-- modules/self-managed-node-group/versions.tf | 2 +- variables.tf | 2 +- versions.tf | 2 +- 25 files changed, 43 insertions(+), 40 deletions(-) diff --git a/README.md b/README.md index 0ea0b38b65..0e86e5103b 100644 --- a/README.md +++ b/README.md @@ -205,6 +205,7 @@ module "eks" { - [Complete](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/complete): EKS Cluster using all available node group types in various combinations demonstrating many of the supported features and configurations - [EKS Managed Node Group](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/eks_managed_node_group): EKS Cluster using EKS managed node groups - [Fargate Profile](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/fargate_profile): EKS cluster using [Fargate Profiles](https://docs.aws.amazon.com/eks/latest/userguide/fargate.html) +- [Karpenter](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/karpenter): EKS Cluster with [Karpenter](https://karpenter.sh/) provisioned for intelligent data plane management - [Outposts](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/outposts): EKS local cluster provisioned on [AWS Outposts](https://docs.aws.amazon.com/eks/latest/userguide/eks-outposts.html) - [Self Managed Node Group](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/self_managed_node_group): EKS Cluster using self-managed node groups - [User Data](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/user_data): Various supported methods of providing necessary bootstrap scripts and configuration settings via user data @@ -222,7 +223,7 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0 | -| [aws](#requirement\_aws) | >= 4.34 | +| [aws](#requirement\_aws) | >= 4.45 | | [kubernetes](#requirement\_kubernetes) | >= 2.10 | | [tls](#requirement\_tls) | >= 3.0 | @@ -230,7 +231,7 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 4.34 | +| [aws](#provider\_aws) | >= 4.45 | | [kubernetes](#provider\_kubernetes) | >= 2.10 | | [tls](#provider\_tls) | >= 3.0 | @@ -288,7 +289,7 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | [cluster\_additional\_security\_group\_ids](#input\_cluster\_additional\_security\_group\_ids) | List of additional, externally created security group IDs to attach to the cluster control plane | `list(string)` | `[]` | no | | [cluster\_addons](#input\_cluster\_addons) | Map of cluster addon configurations to enable for the cluster. Addon name can be the map keys or set with `name` | `any` | `{}` | no | | [cluster\_addons\_timeouts](#input\_cluster\_addons\_timeouts) | Create, update, and delete timeout configurations for the cluster addons | `map(string)` | `{}` | no | -| [cluster\_enabled\_log\_types](#input\_cluster\_enabled\_log\_types) | A list of the desired control plane logs to enable. For more information, see Amazon EKS Control Plane Logging documentation (https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html) | `list(string)` |
[
"audit",
"api",
"authenticator",
"controllerManager"
]
| no | +| [cluster\_enabled\_log\_types](#input\_cluster\_enabled\_log\_types) | A list of the desired control plane logs to enable. For more information, see Amazon EKS Control Plane Logging documentation (https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html) | `list(string)` |
[
"audit",
"api",
"authenticator"
]
| no | | [cluster\_encryption\_config](#input\_cluster\_encryption\_config) | Configuration block with encryption configuration for the cluster | `any` |
{
"resources": [
"secrets"
]
}
| no | | [cluster\_encryption\_policy\_description](#input\_cluster\_encryption\_policy\_description) | Description of the cluster encryption policy created | `string` | `"Cluster encryption policy to allow cluster role to utilize CMK provided"` | no | | [cluster\_encryption\_policy\_name](#input\_cluster\_encryption\_policy\_name) | Name to use on cluster encryption policy created | `string` | `null` | no | diff --git a/examples/complete/README.md b/examples/complete/README.md index 0c690660ca..c06d87b4b3 100644 --- a/examples/complete/README.md +++ b/examples/complete/README.md @@ -34,14 +34,14 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0 | -| [aws](#requirement\_aws) | >= 4.34 | +| [aws](#requirement\_aws) | >= 4.45 | | [kubernetes](#requirement\_kubernetes) | >= 2.10 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 4.34 | +| [aws](#provider\_aws) | >= 4.45 | ## Modules diff --git a/examples/complete/versions.tf b/examples/complete/versions.tf index 49f7eb81c2..f2f8625d4b 100644 --- a/examples/complete/versions.tf +++ b/examples/complete/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = ">= 4.34" + version = ">= 4.45" } kubernetes = { source = "hashicorp/kubernetes" diff --git a/examples/eks_managed_node_group/README.md b/examples/eks_managed_node_group/README.md index ba1c3a26c6..0cdcea628a 100644 --- a/examples/eks_managed_node_group/README.md +++ b/examples/eks_managed_node_group/README.md @@ -58,14 +58,14 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0 | -| [aws](#requirement\_aws) | >= 4.34 | +| [aws](#requirement\_aws) | >= 4.45 | | [kubernetes](#requirement\_kubernetes) | >= 2.10 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 4.34 | +| [aws](#provider\_aws) | >= 4.45 | ## Modules diff --git a/examples/eks_managed_node_group/versions.tf b/examples/eks_managed_node_group/versions.tf index 49f7eb81c2..f2f8625d4b 100644 --- a/examples/eks_managed_node_group/versions.tf +++ b/examples/eks_managed_node_group/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = ">= 4.34" + version = ">= 4.45" } kubernetes = { source = "hashicorp/kubernetes" diff --git a/examples/fargate_profile/README.md b/examples/fargate_profile/README.md index 2b68322079..07b2280f9d 100644 --- a/examples/fargate_profile/README.md +++ b/examples/fargate_profile/README.md @@ -20,7 +20,7 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0 | -| [aws](#requirement\_aws) | >= 4.34 | +| [aws](#requirement\_aws) | >= 4.45 | | [helm](#requirement\_helm) | >= 2.7 | | [null](#requirement\_null) | >= 3.0 | @@ -28,7 +28,7 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 4.34 | +| [aws](#provider\_aws) | >= 4.45 | | [helm](#provider\_helm) | >= 2.7 | | [null](#provider\_null) | >= 3.0 | diff --git a/examples/fargate_profile/main.tf b/examples/fargate_profile/main.tf index 0583a91193..8eb0c934fe 100644 --- a/examples/fargate_profile/main.tf +++ b/examples/fargate_profile/main.tf @@ -40,8 +40,9 @@ locals { module "eks" { source = "../.." - cluster_name = local.name - cluster_version = local.cluster_version + cluster_name = local.name + cluster_version = local.cluster_version + cluster_endpoint_public_access = true cluster_addons = { kube-proxy = {} diff --git a/examples/fargate_profile/versions.tf b/examples/fargate_profile/versions.tf index bc41fee29b..69ef526bd9 100644 --- a/examples/fargate_profile/versions.tf +++ b/examples/fargate_profile/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = ">= 4.34" + version = ">= 4.45" } helm = { source = "hashicorp/helm" diff --git a/examples/karpenter/README.md b/examples/karpenter/README.md index 2044038103..c4c5be5264 100644 --- a/examples/karpenter/README.md +++ b/examples/karpenter/README.md @@ -52,7 +52,7 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0 | -| [aws](#requirement\_aws) | >= 4.34 | +| [aws](#requirement\_aws) | >= 4.45 | | [helm](#requirement\_helm) | >= 2.7 | | [kubectl](#requirement\_kubectl) | >= 1.14 | | [kubernetes](#requirement\_kubernetes) | >= 2.10 | @@ -62,7 +62,7 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 4.34 | +| [aws](#provider\_aws) | >= 4.45 | | [helm](#provider\_helm) | >= 2.7 | | [kubectl](#provider\_kubectl) | >= 1.14 | | [null](#provider\_null) | >= 3.0 | diff --git a/examples/karpenter/versions.tf b/examples/karpenter/versions.tf index 173122f72c..7ae5250dce 100644 --- a/examples/karpenter/versions.tf +++ b/examples/karpenter/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = ">= 4.34" + version = ">= 4.45" } kubernetes = { source = "hashicorp/kubernetes" diff --git a/examples/outposts/README.md b/examples/outposts/README.md index 619141ecdb..7f970bdccb 100644 --- a/examples/outposts/README.md +++ b/examples/outposts/README.md @@ -42,14 +42,14 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0 | -| [aws](#requirement\_aws) | >= 4.34 | +| [aws](#requirement\_aws) | >= 4.45 | | [kubernetes](#requirement\_kubernetes) | >= 2.10 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 4.34 | +| [aws](#provider\_aws) | >= 4.45 | | [kubernetes](#provider\_kubernetes) | >= 2.10 | ## Modules diff --git a/examples/outposts/main.tf b/examples/outposts/main.tf index 4b98278b9e..9e4d34bfdd 100644 --- a/examples/outposts/main.tf +++ b/examples/outposts/main.tf @@ -9,7 +9,8 @@ provider "kubernetes" { exec { api_version = "client.authentication.k8s.io/v1beta1" command = "aws" - args = ["eks", "get-token", "--cluster-id", module.eks.cluster_id, "--region", var.region] + # Note: `cluster_id` is used with Outposts for auth + args = ["eks", "get-token", "--cluster-id", module.eks.cluster_id, "--region", var.region] } } diff --git a/examples/outposts/versions.tf b/examples/outposts/versions.tf index 49f7eb81c2..f2f8625d4b 100644 --- a/examples/outposts/versions.tf +++ b/examples/outposts/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = ">= 4.34" + version = ">= 4.45" } kubernetes = { source = "hashicorp/kubernetes" diff --git a/examples/self_managed_node_group/README.md b/examples/self_managed_node_group/README.md index a819d978f8..330341b39c 100644 --- a/examples/self_managed_node_group/README.md +++ b/examples/self_managed_node_group/README.md @@ -26,14 +26,14 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0 | -| [aws](#requirement\_aws) | >= 4.34 | +| [aws](#requirement\_aws) | >= 4.45 | | [kubernetes](#requirement\_kubernetes) | >= 2.10 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 4.34 | +| [aws](#provider\_aws) | >= 4.45 | ## Modules diff --git a/examples/self_managed_node_group/versions.tf b/examples/self_managed_node_group/versions.tf index 49f7eb81c2..f2f8625d4b 100644 --- a/examples/self_managed_node_group/versions.tf +++ b/examples/self_managed_node_group/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = ">= 4.34" + version = ">= 4.45" } kubernetes = { source = "hashicorp/kubernetes" diff --git a/modules/eks-managed-node-group/README.md b/modules/eks-managed-node-group/README.md index 415361a7e6..d4d4cc9b40 100644 --- a/modules/eks-managed-node-group/README.md +++ b/modules/eks-managed-node-group/README.md @@ -65,13 +65,13 @@ module "eks_managed_node_group" { | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0 | -| [aws](#requirement\_aws) | >= 4.34 | +| [aws](#requirement\_aws) | >= 4.45 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 4.34 | +| [aws](#provider\_aws) | >= 4.45 | ## Modules diff --git a/modules/eks-managed-node-group/versions.tf b/modules/eks-managed-node-group/versions.tf index 5f058b4c11..325eee94e1 100644 --- a/modules/eks-managed-node-group/versions.tf +++ b/modules/eks-managed-node-group/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = ">= 4.34" + version = ">= 4.45" } } } diff --git a/modules/fargate-profile/README.md b/modules/fargate-profile/README.md index 23aa059b96..4ed9a6fd2d 100644 --- a/modules/fargate-profile/README.md +++ b/modules/fargate-profile/README.md @@ -29,13 +29,13 @@ module "fargate_profile" { | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0 | -| [aws](#requirement\_aws) | >= 4.34 | +| [aws](#requirement\_aws) | >= 4.45 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 4.34 | +| [aws](#provider\_aws) | >= 4.45 | ## Modules diff --git a/modules/fargate-profile/versions.tf b/modules/fargate-profile/versions.tf index 5f058b4c11..325eee94e1 100644 --- a/modules/fargate-profile/versions.tf +++ b/modules/fargate-profile/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = ">= 4.34" + version = ">= 4.45" } } } diff --git a/modules/karpenter/README.md b/modules/karpenter/README.md index 1797c6d2e8..71f4ff9455 100644 --- a/modules/karpenter/README.md +++ b/modules/karpenter/README.md @@ -99,14 +99,14 @@ module "karpenter" { | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 0.13.1 | -| [aws](#requirement\_aws) | >= 3.72 | +| [terraform](#requirement\_terraform) | >= 1.0 | +| [aws](#requirement\_aws) | >= 4.45 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 3.72 | +| [aws](#provider\_aws) | >= 4.45 | ## Modules diff --git a/modules/karpenter/versions.tf b/modules/karpenter/versions.tf index 22e8d7265f..325eee94e1 100644 --- a/modules/karpenter/versions.tf +++ b/modules/karpenter/versions.tf @@ -1,10 +1,10 @@ terraform { - required_version = ">= 0.13.1" + required_version = ">= 1.0" required_providers { aws = { source = "hashicorp/aws" - version = ">= 3.72" + version = ">= 4.45" } } } diff --git a/modules/self-managed-node-group/README.md b/modules/self-managed-node-group/README.md index 8426e810bc..051aa4f4eb 100644 --- a/modules/self-managed-node-group/README.md +++ b/modules/self-managed-node-group/README.md @@ -43,13 +43,13 @@ module "self_managed_node_group" { | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0 | -| [aws](#requirement\_aws) | >= 4.34 | +| [aws](#requirement\_aws) | >= 4.45 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 4.34 | +| [aws](#provider\_aws) | >= 4.45 | ## Modules diff --git a/modules/self-managed-node-group/versions.tf b/modules/self-managed-node-group/versions.tf index 5f058b4c11..325eee94e1 100644 --- a/modules/self-managed-node-group/versions.tf +++ b/modules/self-managed-node-group/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = ">= 4.34" + version = ">= 4.45" } } } diff --git a/variables.tf b/variables.tf index dfd5c27ded..d0047ff1c0 100644 --- a/variables.tf +++ b/variables.tf @@ -35,7 +35,7 @@ variable "cluster_version" { variable "cluster_enabled_log_types" { description = "A list of the desired control plane logs to enable. For more information, see Amazon EKS Control Plane Logging documentation (https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html)" type = list(string) - default = ["audit", "api", "authenticator", "controllerManager"] + default = ["audit", "api", "authenticator"] } variable "cluster_additional_security_group_ids" { diff --git a/versions.tf b/versions.tf index b63c1c5946..fdc407c2ed 100644 --- a/versions.tf +++ b/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = ">= 4.34" + version = ">= 4.45" } tls = { source = "hashicorp/tls" From f6fb069b108047b364229bd75078481a9cfc3b2a Mon Sep 17 00:00:00 2001 From: Bryant Biggs Date: Mon, 5 Dec 2022 13:04:23 -0500 Subject: [PATCH 31/33] fix: Correct `cluster_id` output value for recent provider changes --- README.md | 2 +- docs/UPGRADE-19.0.md | 5 ++- examples/complete/README.md | 2 +- examples/complete/outputs.tf | 2 +- examples/eks_managed_node_group/README.md | 2 +- examples/eks_managed_node_group/outputs.tf | 2 +- examples/fargate_profile/README.md | 2 +- examples/fargate_profile/outputs.tf | 2 +- examples/karpenter/README.md | 2 +- examples/karpenter/outputs.tf | 2 +- examples/outposts/README.md | 2 +- examples/outposts/outputs.tf | 2 +- examples/self_managed_node_group/README.md | 2 +- examples/self_managed_node_group/outputs.tf | 2 +- modules/_user_data/outputs.tf | 2 +- modules/eks-managed-node-group/outputs.tf | 20 ++++----- modules/fargate-profile/outputs.tf | 12 +++--- modules/self-managed-node-group/outputs.tf | 44 ++++++++++---------- outputs.tf | 46 ++++++++++----------- 19 files changed, 78 insertions(+), 77 deletions(-) diff --git a/README.md b/README.md index 0e86e5103b..fb7dd4845f 100644 --- a/README.md +++ b/README.md @@ -381,7 +381,7 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster | | [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster | | [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role | -| [cluster\_id](#output\_cluster\_id) | The ID of the EKS cluster | +| [cluster\_id](#output\_cluster\_id) | The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts | | [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled | | [cluster\_name](#output\_cluster\_name) | The name of the EKS cluster | | [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider | diff --git a/docs/UPGRADE-19.0.md b/docs/UPGRADE-19.0.md index 6119e0535c..4a59716b11 100644 --- a/docs/UPGRADE-19.0.md +++ b/docs/UPGRADE-19.0.md @@ -4,7 +4,8 @@ Please consult the `examples` directory for reference example configurations. If ## List of backwards incompatible changes -- Minimum supported version of Terraform AWS provider updated to v4.34 to support latest features provided via the resources utilized. +- The `cluster_id` output used to output the name of the cluster. This is due to the fact that the cluster name is a unique constraint and therefore its set as the unique identifier within Terraform's state map. However, starting with local EKS clusters created on Outposts, there is now an attribute returned from the `aws eks create-cluster` API named `id`. The `cluster_id` has been updated to return this value which means that for current, standard EKS clusters created in the AWS cloud, no value will be returned (at the time of this writing) for `cluster_id` and only local EKS clusters on Outposts will return a value that looks like a UUID/GUID. Users should switch all instances of `cluster_id` to use `cluster_name` before upgrading to v19. [Reference](https://github.com/hashicorp/terraform-provider-aws/issues/27560) +- Minimum supported version of Terraform AWS provider updated to v4.45 to support latest features provided via the resources utilized. - Minimum supported version of Terraform updated to v1.0 - Individual security group created per EKS managed node group or self managed node group has been removed. This configuration went mostly un-used and would often cause confusion ("Why is there an empty security group attached to my nodes?"). This functionality can easily be replicated by user's providing one or more externally created security groups to attach to nodes launched from the node group. - Previously, `var.iam_role_additional_policies` (one for each of the following: cluster IAM role, EKS managed node group IAM role, self-managed node group IAM role, and Fargate Profile IAM role) accepted a list of strings. This worked well for policies that already existed but failed for policies being created at the same time as the cluster due to the well known issue of unkown values used in a `for_each` loop. To rectify this issue in `v19.x`, two changes were made: @@ -115,7 +116,7 @@ Please consult the `examples` directory for reference example configurations. If 5. Renamed outputs: - - N/A + - `cluster_id` is not renamed but the value it returns is now different. For standard EKS clusters created in the AWS cloud, the value returned at the time of this writing is `null`/empty. For local EKS clusters created on Outposts, the value returned will look like a UUID/GUID. Users should switch all instances of `cluster_id` to use `cluster_name` before upgrading to v19. [Reference](https://github.com/hashicorp/terraform-provider-aws/issues/27560) 6. Added outputs: diff --git a/examples/complete/README.md b/examples/complete/README.md index c06d87b4b3..6f5edbdb41 100644 --- a/examples/complete/README.md +++ b/examples/complete/README.md @@ -83,7 +83,7 @@ No inputs. | [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster | | [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster | | [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role | -| [cluster\_id](#output\_cluster\_id) | The ID of the EKS cluster | +| [cluster\_id](#output\_cluster\_id) | The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts | | [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled | | [cluster\_name](#output\_cluster\_name) | The name of the EKS cluster | | [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider | diff --git a/examples/complete/outputs.tf b/examples/complete/outputs.tf index c53b9099c8..c1020f3333 100644 --- a/examples/complete/outputs.tf +++ b/examples/complete/outputs.tf @@ -18,7 +18,7 @@ output "cluster_endpoint" { } output "cluster_id" { - description = "The ID of the EKS cluster" + description = "The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts" value = module.eks.cluster_id } diff --git a/examples/eks_managed_node_group/README.md b/examples/eks_managed_node_group/README.md index 0cdcea628a..42fb41e82c 100644 --- a/examples/eks_managed_node_group/README.md +++ b/examples/eks_managed_node_group/README.md @@ -107,7 +107,7 @@ No inputs. | [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster | | [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster | | [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role | -| [cluster\_id](#output\_cluster\_id) | The ID of the EKS cluster | +| [cluster\_id](#output\_cluster\_id) | The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts | | [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled | | [cluster\_name](#output\_cluster\_name) | The name of the EKS cluster | | [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider | diff --git a/examples/eks_managed_node_group/outputs.tf b/examples/eks_managed_node_group/outputs.tf index 816f30368a..43334ecc0a 100644 --- a/examples/eks_managed_node_group/outputs.tf +++ b/examples/eks_managed_node_group/outputs.tf @@ -18,7 +18,7 @@ output "cluster_endpoint" { } output "cluster_id" { - description = "The ID of the EKS cluster" + description = "The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts" value = module.eks.cluster_id } diff --git a/examples/fargate_profile/README.md b/examples/fargate_profile/README.md index 07b2280f9d..71a9ee7ad0 100644 --- a/examples/fargate_profile/README.md +++ b/examples/fargate_profile/README.md @@ -69,7 +69,7 @@ No inputs. | [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster | | [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster | | [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role | -| [cluster\_id](#output\_cluster\_id) | The ID of the EKS cluster | +| [cluster\_id](#output\_cluster\_id) | The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts | | [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled | | [cluster\_name](#output\_cluster\_name) | The name of the EKS cluster | | [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider | diff --git a/examples/fargate_profile/outputs.tf b/examples/fargate_profile/outputs.tf index 816f30368a..43334ecc0a 100644 --- a/examples/fargate_profile/outputs.tf +++ b/examples/fargate_profile/outputs.tf @@ -18,7 +18,7 @@ output "cluster_endpoint" { } output "cluster_id" { - description = "The ID of the EKS cluster" + description = "The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts" value = module.eks.cluster_id } diff --git a/examples/karpenter/README.md b/examples/karpenter/README.md index c4c5be5264..03bf5e8e0d 100644 --- a/examples/karpenter/README.md +++ b/examples/karpenter/README.md @@ -108,7 +108,7 @@ No inputs. | [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster | | [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster | | [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role | -| [cluster\_id](#output\_cluster\_id) | The ID of the EKS cluster | +| [cluster\_id](#output\_cluster\_id) | The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts | | [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled | | [cluster\_name](#output\_cluster\_name) | The name of the EKS cluster | | [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider | diff --git a/examples/karpenter/outputs.tf b/examples/karpenter/outputs.tf index a1463c10cc..f0ad50bd6a 100644 --- a/examples/karpenter/outputs.tf +++ b/examples/karpenter/outputs.tf @@ -18,7 +18,7 @@ output "cluster_endpoint" { } output "cluster_id" { - description = "The ID of the EKS cluster" + description = "The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts" value = module.eks.cluster_id } diff --git a/examples/outposts/README.md b/examples/outposts/README.md index 7f970bdccb..20b60fd4e9 100644 --- a/examples/outposts/README.md +++ b/examples/outposts/README.md @@ -90,7 +90,7 @@ Note that this example may create resources which cost money. Run `terraform des | [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster | | [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster | | [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role | -| [cluster\_id](#output\_cluster\_id) | The ID of the EKS cluster | +| [cluster\_id](#output\_cluster\_id) | The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts | | [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled | | [cluster\_name](#output\_cluster\_name) | The name of the EKS cluster | | [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider | diff --git a/examples/outposts/outputs.tf b/examples/outposts/outputs.tf index 816f30368a..43334ecc0a 100644 --- a/examples/outposts/outputs.tf +++ b/examples/outposts/outputs.tf @@ -18,7 +18,7 @@ output "cluster_endpoint" { } output "cluster_id" { - description = "The ID of the EKS cluster" + description = "The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts" value = module.eks.cluster_id } diff --git a/examples/self_managed_node_group/README.md b/examples/self_managed_node_group/README.md index 330341b39c..97deaa59ab 100644 --- a/examples/self_managed_node_group/README.md +++ b/examples/self_managed_node_group/README.md @@ -73,7 +73,7 @@ No inputs. | [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster | | [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster | | [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role | -| [cluster\_id](#output\_cluster\_id) | The ID of the EKS cluster | +| [cluster\_id](#output\_cluster\_id) | The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts | | [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled | | [cluster\_name](#output\_cluster\_name) | The name of the EKS cluster | | [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider | diff --git a/examples/self_managed_node_group/outputs.tf b/examples/self_managed_node_group/outputs.tf index 816f30368a..43334ecc0a 100644 --- a/examples/self_managed_node_group/outputs.tf +++ b/examples/self_managed_node_group/outputs.tf @@ -18,7 +18,7 @@ output "cluster_endpoint" { } output "cluster_id" { - description = "The ID of the EKS cluster" + description = "The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts" value = module.eks.cluster_id } diff --git a/modules/_user_data/outputs.tf b/modules/_user_data/outputs.tf index c2a569b05b..075801b233 100644 --- a/modules/_user_data/outputs.tf +++ b/modules/_user_data/outputs.tf @@ -1,4 +1,4 @@ output "user_data" { description = "Base64 encoded user data rendered for the provided inputs" - value = try(local.platform[var.platform].user_data, "") + value = try(local.platform[var.platform].user_data, null) } diff --git a/modules/eks-managed-node-group/outputs.tf b/modules/eks-managed-node-group/outputs.tf index c9bffb3ec9..58dbe9646f 100644 --- a/modules/eks-managed-node-group/outputs.tf +++ b/modules/eks-managed-node-group/outputs.tf @@ -4,22 +4,22 @@ output "launch_template_id" { description = "The ID of the launch template" - value = try(aws_launch_template.this[0].id, "") + value = try(aws_launch_template.this[0].id, null) } output "launch_template_arn" { description = "The ARN of the launch template" - value = try(aws_launch_template.this[0].arn, "") + value = try(aws_launch_template.this[0].arn, null) } output "launch_template_latest_version" { description = "The latest version of the launch template" - value = try(aws_launch_template.this[0].latest_version, "") + value = try(aws_launch_template.this[0].latest_version, null) } output "launch_template_name" { description = "The name of the launch template" - value = try(aws_launch_template.this[0].name, "") + value = try(aws_launch_template.this[0].name, null) } ################################################################################ @@ -28,17 +28,17 @@ output "launch_template_name" { output "node_group_arn" { description = "Amazon Resource Name (ARN) of the EKS Node Group" - value = try(aws_eks_node_group.this[0].arn, "") + value = try(aws_eks_node_group.this[0].arn, null) } output "node_group_id" { description = "EKS Cluster name and EKS Node Group name separated by a colon (`:`)" - value = try(aws_eks_node_group.this[0].id, "") + value = try(aws_eks_node_group.this[0].id, null) } output "node_group_resources" { description = "List of objects containing information about underlying resources" - value = try(aws_eks_node_group.this[0].resources, "") + value = try(aws_eks_node_group.this[0].resources, null) } output "node_group_autoscaling_group_names" { @@ -48,7 +48,7 @@ output "node_group_autoscaling_group_names" { output "node_group_status" { description = "Status of the EKS Node Group" - value = try(aws_eks_node_group.this[0].arn, "") + value = try(aws_eks_node_group.this[0].arn, null) } output "node_group_labels" { @@ -67,7 +67,7 @@ output "node_group_taints" { output "iam_role_name" { description = "The name of the IAM role" - value = try(aws_iam_role.this[0].name, "") + value = try(aws_iam_role.this[0].name, null) } output "iam_role_arn" { @@ -77,5 +77,5 @@ output "iam_role_arn" { output "iam_role_unique_id" { description = "Stable and unique string identifying the IAM role" - value = try(aws_iam_role.this[0].unique_id, "") + value = try(aws_iam_role.this[0].unique_id, null) } diff --git a/modules/fargate-profile/outputs.tf b/modules/fargate-profile/outputs.tf index c8b663e80e..96763bfb1f 100644 --- a/modules/fargate-profile/outputs.tf +++ b/modules/fargate-profile/outputs.tf @@ -4,7 +4,7 @@ output "iam_role_name" { description = "The name of the IAM role" - value = try(aws_iam_role.this[0].name, "") + value = try(aws_iam_role.this[0].name, null) } output "iam_role_arn" { @@ -14,7 +14,7 @@ output "iam_role_arn" { output "iam_role_unique_id" { description = "Stable and unique string identifying the IAM role" - value = try(aws_iam_role.this[0].unique_id, "") + value = try(aws_iam_role.this[0].unique_id, null) } ################################################################################ @@ -23,20 +23,20 @@ output "iam_role_unique_id" { output "fargate_profile_arn" { description = "Amazon Resource Name (ARN) of the EKS Fargate Profile" - value = try(aws_eks_fargate_profile.this[0].arn, "") + value = try(aws_eks_fargate_profile.this[0].arn, null) } output "fargate_profile_id" { description = "EKS Cluster name and EKS Fargate Profile name separated by a colon (`:`)" - value = try(aws_eks_fargate_profile.this[0].id, "") + value = try(aws_eks_fargate_profile.this[0].id, null) } output "fargate_profile_status" { description = "Status of the EKS Fargate Profile" - value = try(aws_eks_fargate_profile.this[0].status, "") + value = try(aws_eks_fargate_profile.this[0].status, null) } output "fargate_profile_pod_execution_role_arn" { description = "Amazon Resource Name (ARN) of the EKS Fargate Profile Pod execution role ARN" - value = try(aws_eks_fargate_profile.this[0].pod_execution_role_arn, "") + value = try(aws_eks_fargate_profile.this[0].pod_execution_role_arn, null) } diff --git a/modules/self-managed-node-group/outputs.tf b/modules/self-managed-node-group/outputs.tf index f10816b522..c9c9b75254 100644 --- a/modules/self-managed-node-group/outputs.tf +++ b/modules/self-managed-node-group/outputs.tf @@ -4,22 +4,22 @@ output "launch_template_id" { description = "The ID of the launch template" - value = try(aws_launch_template.this[0].id, "") + value = try(aws_launch_template.this[0].id, null) } output "launch_template_arn" { description = "The ARN of the launch template" - value = try(aws_launch_template.this[0].arn, "") + value = try(aws_launch_template.this[0].arn, null) } output "launch_template_latest_version" { description = "The latest version of the launch template" - value = try(aws_launch_template.this[0].latest_version, "") + value = try(aws_launch_template.this[0].latest_version, null) } output "launch_template_name" { description = "The name of the launch template" - value = try(aws_launch_template.this[0].name, "") + value = try(aws_launch_template.this[0].name, null) } ################################################################################ @@ -28,57 +28,57 @@ output "launch_template_name" { output "autoscaling_group_arn" { description = "The ARN for this autoscaling group" - value = try(aws_autoscaling_group.this[0].arn, "") + value = try(aws_autoscaling_group.this[0].arn, null) } output "autoscaling_group_id" { description = "The autoscaling group id" - value = try(aws_autoscaling_group.this[0].id, "") + value = try(aws_autoscaling_group.this[0].id, null) } output "autoscaling_group_name" { description = "The autoscaling group name" - value = try(aws_autoscaling_group.this[0].name, "") + value = try(aws_autoscaling_group.this[0].name, null) } output "autoscaling_group_min_size" { description = "The minimum size of the autoscaling group" - value = try(aws_autoscaling_group.this[0].min_size, "") + value = try(aws_autoscaling_group.this[0].min_size, null) } output "autoscaling_group_max_size" { description = "The maximum size of the autoscaling group" - value = try(aws_autoscaling_group.this[0].max_size, "") + value = try(aws_autoscaling_group.this[0].max_size, null) } output "autoscaling_group_desired_capacity" { description = "The number of Amazon EC2 instances that should be running in the group" - value = try(aws_autoscaling_group.this[0].desired_capacity, "") + value = try(aws_autoscaling_group.this[0].desired_capacity, null) } output "autoscaling_group_default_cooldown" { description = "Time between a scaling activity and the succeeding scaling activity" - value = try(aws_autoscaling_group.this[0].default_cooldown, "") + value = try(aws_autoscaling_group.this[0].default_cooldown, null) } output "autoscaling_group_health_check_grace_period" { description = "Time after instance comes into service before checking health" - value = try(aws_autoscaling_group.this[0].health_check_grace_period, "") + value = try(aws_autoscaling_group.this[0].health_check_grace_period, null) } output "autoscaling_group_health_check_type" { description = "EC2 or ELB. Controls how health checking is done" - value = try(aws_autoscaling_group.this[0].health_check_type, "") + value = try(aws_autoscaling_group.this[0].health_check_type, null) } output "autoscaling_group_availability_zones" { description = "The availability zones of the autoscaling group" - value = try(aws_autoscaling_group.this[0].availability_zones, "") + value = try(aws_autoscaling_group.this[0].availability_zones, null) } output "autoscaling_group_vpc_zone_identifier" { description = "The VPC zone identifier" - value = try(aws_autoscaling_group.this[0].vpc_zone_identifier, "") + value = try(aws_autoscaling_group.this[0].vpc_zone_identifier, null) } ################################################################################ @@ -96,17 +96,17 @@ output "autoscaling_group_schedule_arns" { output "iam_role_name" { description = "The name of the IAM role" - value = try(aws_iam_role.this[0].name, "") + value = try(aws_iam_role.this[0].name, null) } output "iam_role_arn" { description = "The Amazon Resource Name (ARN) specifying the IAM role" - value = try(aws_iam_role.this[0].arn, "") + value = try(aws_iam_role.this[0].arn, null) } output "iam_role_unique_id" { description = "Stable and unique string identifying the IAM role" - value = try(aws_iam_role.this[0].unique_id, "") + value = try(aws_iam_role.this[0].unique_id, null) } ################################################################################ @@ -120,12 +120,12 @@ output "iam_instance_profile_arn" { output "iam_instance_profile_id" { description = "Instance profile's ID" - value = try(aws_iam_instance_profile.this[0].id, "") + value = try(aws_iam_instance_profile.this[0].id, null) } output "iam_instance_profile_unique" { description = "Stable and unique string identifying the IAM instance profile" - value = try(aws_iam_instance_profile.this[0].unique_id, "") + value = try(aws_iam_instance_profile.this[0].unique_id, null) } ################################################################################ @@ -139,10 +139,10 @@ output "platform" { output "image_id" { description = "ID of the image" - value = try(aws_launch_template.this[0].image_id, "") + value = try(aws_launch_template.this[0].image_id, null) } output "user_data" { description = "Base64 encoded user data" - value = try(module.user_data.user_data, "") + value = try(module.user_data.user_data, null) } diff --git a/outputs.tf b/outputs.tf index 9764fccf37..dec0540174 100644 --- a/outputs.tf +++ b/outputs.tf @@ -4,52 +4,52 @@ output "cluster_arn" { description = "The Amazon Resource Name (ARN) of the cluster" - value = try(aws_eks_cluster.this[0].arn, "") + value = try(aws_eks_cluster.this[0].arn, null) } output "cluster_certificate_authority_data" { description = "Base64 encoded certificate data required to communicate with the cluster" - value = try(aws_eks_cluster.this[0].certificate_authority[0].data, "") + value = try(aws_eks_cluster.this[0].certificate_authority[0].data, null) } output "cluster_endpoint" { description = "Endpoint for your Kubernetes API server" - value = try(aws_eks_cluster.this[0].endpoint, "") + value = try(aws_eks_cluster.this[0].endpoint, null) } output "cluster_id" { - description = "The ID of the EKS cluster" - value = try(aws_eks_cluster.this[0].id, "") + description = "The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts" + value = try(aws_eks_cluster.this[0].cluster_id, null) } output "cluster_name" { description = "The name of the EKS cluster" - value = try(aws_eks_cluster.this[0].name, "") + value = try(aws_eks_cluster.this[0].name, null) } output "cluster_oidc_issuer_url" { description = "The URL on the EKS cluster for the OpenID Connect identity provider" - value = try(aws_eks_cluster.this[0].identity[0].oidc[0].issuer, "") + value = try(aws_eks_cluster.this[0].identity[0].oidc[0].issuer, null) } output "cluster_version" { description = "The Kubernetes version for the cluster" - value = try(aws_eks_cluster.this[0].version, "") + value = try(aws_eks_cluster.this[0].version, null) } output "cluster_platform_version" { description = "Platform version for the cluster" - value = try(aws_eks_cluster.this[0].platform_version, "") + value = try(aws_eks_cluster.this[0].platform_version, null) } output "cluster_status" { description = "Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED`" - value = try(aws_eks_cluster.this[0].status, "") + value = try(aws_eks_cluster.this[0].status, null) } output "cluster_primary_security_group_id" { description = "Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console" - value = try(aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id, "") + value = try(aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id, null) } ################################################################################ @@ -77,12 +77,12 @@ output "kms_key_policy" { output "cluster_security_group_arn" { description = "Amazon Resource Name (ARN) of the cluster security group" - value = try(aws_security_group.cluster[0].arn, "") + value = try(aws_security_group.cluster[0].arn, null) } output "cluster_security_group_id" { description = "ID of the cluster security group" - value = try(aws_security_group.cluster[0].id, "") + value = try(aws_security_group.cluster[0].id, null) } ################################################################################ @@ -91,12 +91,12 @@ output "cluster_security_group_id" { output "node_security_group_arn" { description = "Amazon Resource Name (ARN) of the node shared security group" - value = try(aws_security_group.node[0].arn, "") + value = try(aws_security_group.node[0].arn, null) } output "node_security_group_id" { description = "ID of the node shared security group" - value = try(aws_security_group.node[0].id, "") + value = try(aws_security_group.node[0].id, null) } ################################################################################ @@ -105,17 +105,17 @@ output "node_security_group_id" { output "oidc_provider" { description = "The OpenID Connect identity provider (issuer URL without leading `https://`)" - value = try(replace(aws_eks_cluster.this[0].identity[0].oidc[0].issuer, "https://", ""), "") + value = try(replace(aws_eks_cluster.this[0].identity[0].oidc[0].issuer, "https://", null), null) } output "oidc_provider_arn" { description = "The ARN of the OIDC Provider if `enable_irsa = true`" - value = try(aws_iam_openid_connect_provider.oidc_provider[0].arn, "") + value = try(aws_iam_openid_connect_provider.oidc_provider[0].arn, null) } output "cluster_tls_certificate_sha1_fingerprint" { description = "The SHA1 fingerprint of the public key of the cluster's certificate" - value = try(data.tls_certificate.this[0].certificates[0].sha1_fingerprint, "") + value = try(data.tls_certificate.this[0].certificates[0].sha1_fingerprint, null) } ################################################################################ @@ -124,17 +124,17 @@ output "cluster_tls_certificate_sha1_fingerprint" { output "cluster_iam_role_name" { description = "IAM role name of the EKS cluster" - value = try(aws_iam_role.this[0].name, "") + value = try(aws_iam_role.this[0].name, null) } output "cluster_iam_role_arn" { description = "IAM role ARN of the EKS cluster" - value = try(aws_iam_role.this[0].arn, "") + value = try(aws_iam_role.this[0].arn, null) } output "cluster_iam_role_unique_id" { description = "Stable and unique string identifying the IAM role" - value = try(aws_iam_role.this[0].unique_id, "") + value = try(aws_iam_role.this[0].unique_id, null) } ################################################################################ @@ -161,12 +161,12 @@ output "cluster_identity_providers" { output "cloudwatch_log_group_name" { description = "Name of cloudwatch log group created" - value = try(aws_cloudwatch_log_group.this[0].name, "") + value = try(aws_cloudwatch_log_group.this[0].name, null) } output "cloudwatch_log_group_arn" { description = "Arn of cloudwatch log group created" - value = try(aws_cloudwatch_log_group.this[0].arn, "") + value = try(aws_cloudwatch_log_group.this[0].arn, null) } ################################################################################ From 90b6614ba97835ae3b136ef85f97a181797f3de6 Mon Sep 17 00:00:00 2001 From: Bryant Biggs Date: Mon, 5 Dec 2022 14:10:23 -0500 Subject: [PATCH 32/33] fix: Updates from testing on Outpost lab --- examples/outposts/main.tf | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/examples/outposts/main.tf b/examples/outposts/main.tf index 9e4d34bfdd..50cbc9586d 100644 --- a/examples/outposts/main.tf +++ b/examples/outposts/main.tf @@ -9,7 +9,7 @@ provider "kubernetes" { exec { api_version = "client.authentication.k8s.io/v1beta1" command = "aws" - # Note: `cluster_id` is used with Outposts for auth + # Note: `cluster_id` is used with Outposts for auth args = ["eks", "get-token", "--cluster-id", module.eks.cluster_id, "--region", var.region] } } @@ -49,6 +49,9 @@ module "eks" { outpost_arns = [local.outpost_arn] } + # Local clusters will automatically add the node group IAM role to the aws-auth configmap + manage_aws_auth_configmap = true + # Extend cluster security group rules cluster_security_group_additional_rules = { ingress_vpc_https = { @@ -77,12 +80,14 @@ module "eks" { max_size = 5 desired_size = 3 instance_type = local.instance_type + + # Additional information is required to join local clusters to EKS + bootstrap_extra_args = <<-EOT + --enable-local-outpost true --cluster-id ${module.eks.cluster_id} --container-runtime containerd + EOT } } - # We need to add the node group IAM role to the aws-auth configmap - create_aws_auth_configmap = true - tags = local.tags } From eff1df79108c326c5188af61426f6099b91d673a Mon Sep 17 00:00:00 2001 From: Bryant Biggs Date: Mon, 5 Dec 2022 16:21:00 -0500 Subject: [PATCH 33/33] fix: Correct Karpenter example for auth, update Karpenter module for rules name change --- docs/UPGRADE-19.0.md | 1 + examples/karpenter/README.md | 2 ++ examples/karpenter/main.tf | 18 ++++++++++++++---- examples/outposts/prerequisites/main.tf | 11 +++++++++-- modules/karpenter/main.tf | 7 +++++-- 5 files changed, 31 insertions(+), 8 deletions(-) diff --git a/docs/UPGRADE-19.0.md b/docs/UPGRADE-19.0.md index 4a59716b11..429a6a8cbd 100644 --- a/docs/UPGRADE-19.0.md +++ b/docs/UPGRADE-19.0.md @@ -11,6 +11,7 @@ Please consult the `examples` directory for reference example configurations. If - Previously, `var.iam_role_additional_policies` (one for each of the following: cluster IAM role, EKS managed node group IAM role, self-managed node group IAM role, and Fargate Profile IAM role) accepted a list of strings. This worked well for policies that already existed but failed for policies being created at the same time as the cluster due to the well known issue of unkown values used in a `for_each` loop. To rectify this issue in `v19.x`, two changes were made: 1. `var.iam_role_additional_policies` was changed from type `list(string)` to type `map(string)` -> this is a breaking change. More information on managing this change can be found below, under `Terraform State Moves` 2. The logic used in the root module for this variable was changed to replace the use of `try()` with `lookup()`. More details on why can be found [here](https://github.com/clowdhaus/terraform-for-each-unknown) +- The cluster name has been removed from the Karpenter module event rule names. Due to the use of long cluster names appending to the provided naming scheme, the cluster name has moved to a `ClusterName` tag and the event rule name is now a prefix. This guarantees that users can have multiple instances of Karpenter withe their respective event rules/SQS queue without name collisions, while also still being able to identify which queues and event rules belong to which cluster. ## Additional changes diff --git a/examples/karpenter/README.md b/examples/karpenter/README.md index 03bf5e8e0d..f5d4e2dcc3 100644 --- a/examples/karpenter/README.md +++ b/examples/karpenter/README.md @@ -63,6 +63,7 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Version | |------|---------| | [aws](#provider\_aws) | >= 4.45 | +| [aws.virginia](#provider\_aws.virginia) | >= 4.45 | | [helm](#provider\_helm) | >= 2.7 | | [kubectl](#provider\_kubectl) | >= 1.14 | | [null](#provider\_null) | >= 3.0 | @@ -87,6 +88,7 @@ Note that this example may create resources which cost money. Run `terraform des | [null_resource.modify_kube_dns](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | | [null_resource.remove_default_coredns_deployment](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | | [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | +| [aws_ecrpublic_authorization_token.token](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ecrpublic_authorization_token) | data source | | [aws_eks_addon_version.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_addon_version) | data source | | [aws_eks_cluster_auth.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | diff --git a/examples/karpenter/main.tf b/examples/karpenter/main.tf index 5245d39d88..0a8725e466 100644 --- a/examples/karpenter/main.tf +++ b/examples/karpenter/main.tf @@ -2,6 +2,11 @@ provider "aws" { region = local.region } +provider "aws" { + region = "us-east-1" + alias = "virginia" +} + provider "kubernetes" { host = module.eks.cluster_endpoint cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) @@ -43,6 +48,9 @@ provider "kubectl" { } data "aws_availability_zones" "available" {} +data "aws_ecrpublic_authorization_token" "token" { + provider = aws.virginia +} locals { name = "ex-${replace(basename(path.cwd), "_", "-")}" @@ -128,10 +136,12 @@ resource "helm_release" "karpenter" { namespace = "karpenter" create_namespace = true - name = "karpenter" - repository = "oci://public.ecr.aws/karpenter" - chart = "karpenter" - version = "v0.19.3" + name = "karpenter" + repository = "oci://public.ecr.aws/karpenter" + repository_username = data.aws_ecrpublic_authorization_token.token.user_name + repository_password = data.aws_ecrpublic_authorization_token.token.password + chart = "karpenter" + version = "v0.19.3" set { name = "settings.aws.clusterName" diff --git a/examples/outposts/prerequisites/main.tf b/examples/outposts/prerequisites/main.tf index 0717225ba1..e49d7bdbb5 100644 --- a/examples/outposts/prerequisites/main.tf +++ b/examples/outposts/prerequisites/main.tf @@ -5,9 +5,10 @@ provider "aws" { locals { name = "ex-${basename(path.cwd)}" - terraform_version = "1.3.3" + terraform_version = "1.3.6" - outpost_arn = element(tolist(data.aws_outposts_outposts.this.arns), 0) + outpost_arn = element(tolist(data.aws_outposts_outposts.this.arns), 0) + instance_type = element(tolist(data.aws_outposts_outpost_instance_types.this.instance_types), 0) tags = { Example = local.name @@ -31,6 +32,8 @@ module "ssm_bastion_ec2" { AdministratorAccess = "arn:aws:iam::aws:policy/AdministratorAccess" } + instance_type = local.instance_type + user_data = <<-EOT #!/bin/bash @@ -110,6 +113,10 @@ module "bastion_security_group" { data "aws_outposts_outposts" "this" {} +data "aws_outposts_outpost_instance_types" "this" { + arn = local.outpost_arn +} + # This just grabs the first Outpost and returns its subnets data "aws_subnets" "lookup" { filter { diff --git a/modules/karpenter/main.tf b/modules/karpenter/main.tf index d54fa9585a..62640f0550 100644 --- a/modules/karpenter/main.tf +++ b/modules/karpenter/main.tf @@ -261,11 +261,14 @@ locals { resource "aws_cloudwatch_event_rule" "this" { for_each = { for k, v in local.events : k => v if local.enable_spot_termination } - name = "Karpenter${each.value.name}-${var.cluster_name}" + name_prefix = "Karpenter${each.value.name}-" description = each.value.description event_pattern = jsonencode(each.value.event_pattern) - tags = var.tags + tags = merge( + { "ClusterName" : var.cluster_name }, + var.tags, + ) } resource "aws_cloudwatch_event_target" "this" {