diff --git a/.github/images/security_groups.svg b/.github/images/security_groups.svg index 6b120e98ba..6012962597 100644 --- a/.github/images/security_groups.svg +++ b/.github/images/security_groups.svg @@ -1 +1 @@ - + diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 74f3751c74..d5886a6d32 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,6 +1,6 @@ repos: - repo: https://github.com/antonbabenko/pre-commit-terraform - rev: v1.76.0 + rev: v1.77.0 hooks: - id: terraform_fmt - id: terraform_validate @@ -23,7 +23,7 @@ repos: - '--args=--only=terraform_standard_module_structure' - '--args=--only=terraform_workspace_remote' - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.3.0 + rev: v4.4.0 hooks: - id: check-merge-conflict - id: end-of-file-fixer diff --git a/README.md b/README.md index d003e14724..fb7dd4845f 100644 --- a/README.md +++ b/README.md @@ -14,6 +14,7 @@ Terraform module which creates AWS EKS (Kubernetes) resources - Upgrade Guides - [Upgrade to v17.x](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/UPGRADE-17.0.md) - [Upgrade to v18.x](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/UPGRADE-18.0.md) + - [Upgrade to v19.x](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/UPGRADE-19.0.md) ### External Documentation @@ -21,10 +22,17 @@ Please note that we strive to provide a comprehensive suite of documentation for - [AWS EKS Documentation](https://docs.aws.amazon.com/eks/latest/userguide/getting-started.html) - [Kubernetes Documentation](https://kubernetes.io/docs/home/) +#### Reference Architecture + +The examples provided under `examples/` provide a comprehensive suite of configurations that demonstrate nearly all of the possible different configurations and settings that can be used with this module. However, these examples are not representative of clusters that you would normally find in use for production workloads. For reference architectures that utilize this module, please see the following: + +- [EKS Reference Architecture](https://github.com/clowdhaus/eks-reference-architecture) + ## Available Features - AWS EKS Cluster Addons - AWS EKS Identity Provider Configuration +- [AWS EKS on Outposts support](https://aws.amazon.com/blogs/aws/deploy-your-amazon-eks-clusters-locally-on-aws-outposts/) - All [node types](https://docs.aws.amazon.com/eks/latest/userguide/eks-compute.html) are supported: - [EKS Managed Node Group](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html) - [Self Managed Node Group](https://docs.aws.amazon.com/eks/latest/userguide/worker.html) @@ -43,13 +51,22 @@ An IAM role for service accounts (IRSA) sub-module has been created to make depl Some of the addon/controller policies that are currently supported include: +- [Cert-Manager](https://cert-manager.io/docs/configuration/acme/dns01/route53/#set-up-an-iam-role) - [Cluster Autoscaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md) -- [External DNS](https://github.com/kubernetes-sigs/external-dns/blob/master/docs/tutorials/aws.md#iam-policy) - [EBS CSI Driver](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/blob/master/docs/example-iam-policy.json) -- [VPC CNI](https://docs.aws.amazon.com/eks/latest/userguide/cni-iam-role.html) -- [Node Termination Handler](https://github.com/aws/aws-node-termination-handler#5-create-an-iam-role-for-the-pods) -- [Karpenter](https://karpenter.sh/preview/getting-started/getting-started-with-terraform/#create-the-karpentercontroller-iam-role) +- [EFS CSI Driver](https://github.com/kubernetes-sigs/aws-efs-csi-driver/blob/master/docs/iam-policy-example.json) +- [External DNS](https://github.com/kubernetes-sigs/external-dns/blob/master/docs/tutorials/aws.md#iam-policy) +- [External Secrets](https://github.com/external-secrets/kubernetes-external-secrets#add-a-secret) +- [FSx for Lustre CSI Driver](https://github.com/kubernetes-sigs/aws-fsx-csi-driver/blob/master/docs/README.md) +- [Karpenter](https://github.com/aws/karpenter/blob/main/website/content/en/preview/getting-started/cloudformation.yaml) - [Load Balancer Controller](https://github.com/kubernetes-sigs/aws-load-balancer-controller/blob/main/docs/install/iam_policy.json) + - [Load Balancer Controller Target Group Binding Only](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.4/deploy/installation/#iam-permission-subset-for-those-who-use-targetgroupbinding-only-and-dont-plan-to-use-the-aws-load-balancer-controller-to-manage-security-group-rules) +- [App Mesh Controller](https://github.com/aws/aws-app-mesh-controller-for-k8s/blob/master/config/iam/controller-iam-policy.json) + - [App Mesh Envoy Proxy](https://raw.githubusercontent.com/aws/aws-app-mesh-controller-for-k8s/master/config/iam/envoy-iam-policy.json) +- [Managed Service for Prometheus](https://docs.aws.amazon.com/prometheus/latest/userguide/set-up-irsa.html) +- [Node Termination Handler](https://github.com/aws/aws-node-termination-handler#5-create-an-iam-role-for-the-pods) +- [Velero](https://github.com/vmware-tanzu/velero-plugin-for-aws#option-1-set-permissions-with-an-iam-user) +- [VPC CNI](https://docs.aws.amazon.com/eks/latest/userguide/cni-iam-role.html) See [terraform-aws-iam/modules/iam-role-for-service-accounts](https://github.com/terraform-aws-modules/terraform-aws-iam/tree/master/modules/iam-role-for-service-accounts-eks) for current list of supported addon/controller policies as more are added to the project. @@ -58,39 +75,36 @@ See [terraform-aws-iam/modules/iam-role-for-service-accounts](https://github.com ```hcl module "eks" { source = "terraform-aws-modules/eks/aws" - version = "~> 18.0" + version = "~> 19.0" cluster_name = "my-cluster" cluster_version = "1.24" - cluster_endpoint_private_access = true cluster_endpoint_public_access = true cluster_addons = { coredns = { - resolve_conflicts = "OVERWRITE" + most_recent = true + } + kube-proxy = { + most_recent = true } - kube-proxy = {} vpc-cni = { - resolve_conflicts = "OVERWRITE" + most_recent = true } } - cluster_encryption_config = [{ - provider_key_arn = "arn:aws:kms:eu-west-1:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab" - resources = ["secrets"] - }] - - vpc_id = "vpc-1234556abcdef" - subnet_ids = ["subnet-abcde012", "subnet-bcde012a", "subnet-fghi345a"] + vpc_id = "vpc-1234556abcdef" + subnet_ids = ["subnet-abcde012", "subnet-bcde012a", "subnet-fghi345a"] + control_plane_subnet_ids = ["subnet-xyzde987", "subnet-slkjf456", "subnet-qeiru789"] # Self Managed Node Group(s) self_managed_node_group_defaults = { instance_type = "m6i.large" update_launch_template_default_version = true - iam_role_additional_policies = [ - "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore" - ] + iam_role_additional_policies = { + AmazonSSMManagedInstanceCore = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore" + } } self_managed_node_groups = { @@ -123,7 +137,6 @@ module "eks" { # EKS Managed Node Group(s) eks_managed_node_group_defaults = { - disk_size = 50 instance_types = ["m6i.large", "m5.large", "m5n.large", "m5zn.large"] } @@ -192,7 +205,8 @@ module "eks" { - [Complete](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/complete): EKS Cluster using all available node group types in various combinations demonstrating many of the supported features and configurations - [EKS Managed Node Group](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/eks_managed_node_group): EKS Cluster using EKS managed node groups - [Fargate Profile](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/fargate_profile): EKS cluster using [Fargate Profiles](https://docs.aws.amazon.com/eks/latest/userguide/fargate.html) -- [Karpenter](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/karpenter): EKS Cluster with [Karpenter](https://karpenter.sh/) provisioned for managing compute resource scaling +- [Karpenter](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/karpenter): EKS Cluster with [Karpenter](https://karpenter.sh/) provisioned for intelligent data plane management +- [Outposts](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/outposts): EKS local cluster provisioned on [AWS Outposts](https://docs.aws.amazon.com/eks/latest/userguide/eks-outposts.html) - [Self Managed Node Group](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/self_managed_node_group): EKS Cluster using self-managed node groups - [User Data](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/user_data): Various supported methods of providing necessary bootstrap scripts and configuration settings via user data @@ -208,8 +222,8 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 0.13.1 | -| [aws](#requirement\_aws) | >= 3.72 | +| [terraform](#requirement\_terraform) | >= 1.0 | +| [aws](#requirement\_aws) | >= 4.45 | | [kubernetes](#requirement\_kubernetes) | >= 2.10 | | [tls](#requirement\_tls) | >= 3.0 | @@ -217,7 +231,7 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 3.72 | +| [aws](#provider\_aws) | >= 4.45 | | [kubernetes](#provider\_kubernetes) | >= 2.10 | | [tls](#provider\_tls) | >= 3.0 | @@ -227,7 +241,7 @@ We are grateful to the community for contributing bugfixes and improvements! Ple |------|--------|---------| | [eks\_managed\_node\_group](#module\_eks\_managed\_node\_group) | ./modules/eks-managed-node-group | n/a | | [fargate\_profile](#module\_fargate\_profile) | ./modules/fargate-profile | n/a | -| [kms](#module\_kms) | terraform-aws-modules/kms/aws | 1.0.2 | +| [kms](#module\_kms) | terraform-aws-modules/kms/aws | 1.1.0 | | [self\_managed\_node\_group](#module\_self\_managed\_node\_group) | ./modules/self-managed-node-group | n/a | ## Resources @@ -243,6 +257,7 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | [aws_iam_policy.cluster_encryption](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | | [aws_iam_policy.cni_ipv6_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | | [aws_iam_role.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | +| [aws_iam_role_policy_attachment.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | | [aws_iam_role_policy_attachment.cluster_encryption](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | | [aws_iam_role_policy_attachment.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | | [aws_security_group.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | @@ -252,7 +267,7 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | [kubernetes_config_map.aws_auth](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/config_map) | resource | | [kubernetes_config_map_v1_data.aws_auth](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/config_map_v1_data) | resource | | [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | -| [aws_default_tags.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/default_tags) | data source | +| [aws_eks_addon_version.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_addon_version) | data source | | [aws_iam_policy_document.assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | | [aws_iam_policy_document.cni_ipv6_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | | [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source | @@ -273,15 +288,16 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | [cloudwatch\_log\_group\_retention\_in\_days](#input\_cloudwatch\_log\_group\_retention\_in\_days) | Number of days to retain log events. Default retention - 90 days | `number` | `90` | no | | [cluster\_additional\_security\_group\_ids](#input\_cluster\_additional\_security\_group\_ids) | List of additional, externally created security group IDs to attach to the cluster control plane | `list(string)` | `[]` | no | | [cluster\_addons](#input\_cluster\_addons) | Map of cluster addon configurations to enable for the cluster. Addon name can be the map keys or set with `name` | `any` | `{}` | no | +| [cluster\_addons\_timeouts](#input\_cluster\_addons\_timeouts) | Create, update, and delete timeout configurations for the cluster addons | `map(string)` | `{}` | no | | [cluster\_enabled\_log\_types](#input\_cluster\_enabled\_log\_types) | A list of the desired control plane logs to enable. For more information, see Amazon EKS Control Plane Logging documentation (https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html) | `list(string)` |
[
"audit",
"api",
"authenticator"
]
| no | -| [cluster\_encryption\_config](#input\_cluster\_encryption\_config) | Configuration block with encryption configuration for the cluster | `list(any)` | `[]` | no | +| [cluster\_encryption\_config](#input\_cluster\_encryption\_config) | Configuration block with encryption configuration for the cluster | `any` |
{
"resources": [
"secrets"
]
}
| no | | [cluster\_encryption\_policy\_description](#input\_cluster\_encryption\_policy\_description) | Description of the cluster encryption policy created | `string` | `"Cluster encryption policy to allow cluster role to utilize CMK provided"` | no | | [cluster\_encryption\_policy\_name](#input\_cluster\_encryption\_policy\_name) | Name to use on cluster encryption policy created | `string` | `null` | no | | [cluster\_encryption\_policy\_path](#input\_cluster\_encryption\_policy\_path) | Cluster encryption policy path | `string` | `null` | no | | [cluster\_encryption\_policy\_tags](#input\_cluster\_encryption\_policy\_tags) | A map of additional tags to add to the cluster encryption policy created | `map(string)` | `{}` | no | | [cluster\_encryption\_policy\_use\_name\_prefix](#input\_cluster\_encryption\_policy\_use\_name\_prefix) | Determines whether cluster encryption policy name (`cluster_encryption_policy_name`) is used as a prefix | `bool` | `true` | no | -| [cluster\_endpoint\_private\_access](#input\_cluster\_endpoint\_private\_access) | Indicates whether or not the Amazon EKS private API server endpoint is enabled | `bool` | `false` | no | -| [cluster\_endpoint\_public\_access](#input\_cluster\_endpoint\_public\_access) | Indicates whether or not the Amazon EKS public API server endpoint is enabled | `bool` | `true` | no | +| [cluster\_endpoint\_private\_access](#input\_cluster\_endpoint\_private\_access) | Indicates whether or not the Amazon EKS private API server endpoint is enabled | `bool` | `true` | no | +| [cluster\_endpoint\_public\_access](#input\_cluster\_endpoint\_public\_access) | Indicates whether or not the Amazon EKS public API server endpoint is enabled | `bool` | `false` | no | | [cluster\_endpoint\_public\_access\_cidrs](#input\_cluster\_endpoint\_public\_access\_cidrs) | List of CIDR blocks which can access the Amazon EKS public API server endpoint | `list(string)` |
[
"0.0.0.0/0"
]
| no | | [cluster\_iam\_role\_dns\_suffix](#input\_cluster\_iam\_role\_dns\_suffix) | Base DNS domain name for the current partition (e.g., amazonaws.com in AWS Commercial, amazonaws.com.cn in AWS China) | `string` | `null` | no | | [cluster\_identity\_providers](#input\_cluster\_identity\_providers) | Map of cluster identity provider configurations to enable for the cluster. Note - this is different/separate from IRSA | `any` | `{}` | no | @@ -289,11 +305,12 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster | `string` | `""` | no | | [cluster\_security\_group\_additional\_rules](#input\_cluster\_security\_group\_additional\_rules) | List of additional security group rules to add to the cluster security group created. Set `source_node_security_group = true` inside rules to set the `node_security_group` as source | `any` | `{}` | no | | [cluster\_security\_group\_description](#input\_cluster\_security\_group\_description) | Description of the cluster security group created | `string` | `"EKS cluster security group"` | no | -| [cluster\_security\_group\_id](#input\_cluster\_security\_group\_id) | Existing security group ID to be attached to the cluster. Required if `create_cluster_security_group` = `false` | `string` | `""` | no | +| [cluster\_security\_group\_id](#input\_cluster\_security\_group\_id) | Existing security group ID to be attached to the cluster | `string` | `""` | no | | [cluster\_security\_group\_name](#input\_cluster\_security\_group\_name) | Name to use on cluster security group created | `string` | `null` | no | | [cluster\_security\_group\_tags](#input\_cluster\_security\_group\_tags) | A map of additional tags to add to the cluster security group created | `map(string)` | `{}` | no | | [cluster\_security\_group\_use\_name\_prefix](#input\_cluster\_security\_group\_use\_name\_prefix) | Determines whether cluster security group name (`cluster_security_group_name`) is used as a prefix | `bool` | `true` | no | | [cluster\_service\_ipv4\_cidr](#input\_cluster\_service\_ipv4\_cidr) | The CIDR block to assign Kubernetes service IP addresses from. If you don't specify a block, Kubernetes assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks | `string` | `null` | no | +| [cluster\_service\_ipv6\_cidr](#input\_cluster\_service\_ipv6\_cidr) | The CIDR block to assign Kubernetes pod and service IP addresses from if `ipv6` was specified when the cluster was created. Kubernetes assigns service addresses from the unique local address range (fc00::/7) because you can't specify a custom IPv6 CIDR block when you create the cluster | `string` | `null` | no | | [cluster\_tags](#input\_cluster\_tags) | A map of additional tags to add to the cluster | `map(string)` | `{}` | no | | [cluster\_timeouts](#input\_cluster\_timeouts) | Create, update, and delete timeout configurations for the cluster | `map(string)` | `{}` | no | | [cluster\_version](#input\_cluster\_version) | Kubernetes `.` version to use for the EKS cluster (i.e.: `1.24`) | `string` | `null` | no | @@ -302,10 +319,10 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | [create\_aws\_auth\_configmap](#input\_create\_aws\_auth\_configmap) | Determines whether to create the aws-auth configmap. NOTE - this is only intended for scenarios where the configmap does not exist (i.e. - when using only self-managed node groups). Most users should use `manage_aws_auth_configmap` | `bool` | `false` | no | | [create\_cloudwatch\_log\_group](#input\_create\_cloudwatch\_log\_group) | Determines whether a log group is created by this module for the cluster logs. If not, AWS will automatically create one if logging is enabled | `bool` | `true` | no | | [create\_cluster\_primary\_security\_group\_tags](#input\_create\_cluster\_primary\_security\_group\_tags) | Indicates whether or not to tag the cluster's primary security group. This security group is created by the EKS service, not the module, and therefore tagging is handled after cluster creation | `bool` | `true` | no | -| [create\_cluster\_security\_group](#input\_create\_cluster\_security\_group) | Determines if a security group is created for the cluster or use the existing `cluster_security_group_id` | `bool` | `true` | no | +| [create\_cluster\_security\_group](#input\_create\_cluster\_security\_group) | Determines if a security group is created for the cluster. Note: the EKS service creates a primary security group for the cluster by default | `bool` | `true` | no | | [create\_cni\_ipv6\_iam\_policy](#input\_create\_cni\_ipv6\_iam\_policy) | Determines whether to create an [`AmazonEKS_CNI_IPv6_Policy`](https://docs.aws.amazon.com/eks/latest/userguide/cni-iam-role.html#cni-iam-role-create-ipv6-policy) | `bool` | `false` | no | | [create\_iam\_role](#input\_create\_iam\_role) | Determines whether a an IAM role is created or to use an existing IAM role | `bool` | `true` | no | -| [create\_kms\_key](#input\_create\_kms\_key) | Controls if a KMS key for cluster encryption should be created | `bool` | `false` | no | +| [create\_kms\_key](#input\_create\_kms\_key) | Controls if a KMS key for cluster encryption should be created | `bool` | `true` | no | | [create\_node\_security\_group](#input\_create\_node\_security\_group) | Determines whether to create a security group for the node groups or use the existing `node_security_group_id` | `bool` | `true` | no | | [custom\_oidc\_thumbprints](#input\_custom\_oidc\_thumbprints) | Additional list of server certificate thumbprints for the OpenID Connect (OIDC) identity provider's server certificate(s) | `list(string)` | `[]` | no | | [eks\_managed\_node\_group\_defaults](#input\_eks\_managed\_node\_group\_defaults) | Map of EKS managed node group default configurations | `any` | `{}` | no | @@ -314,7 +331,7 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | [enable\_kms\_key\_rotation](#input\_enable\_kms\_key\_rotation) | Specifies whether key rotation is enabled. Defaults to `true` | `bool` | `true` | no | | [fargate\_profile\_defaults](#input\_fargate\_profile\_defaults) | Map of Fargate Profile default configurations | `any` | `{}` | no | | [fargate\_profiles](#input\_fargate\_profiles) | Map of Fargate Profile definitions to create | `any` | `{}` | no | -| [iam\_role\_additional\_policies](#input\_iam\_role\_additional\_policies) | Additional policies to be added to the IAM role | `list(string)` | `[]` | no | +| [iam\_role\_additional\_policies](#input\_iam\_role\_additional\_policies) | Additional policies to be added to the IAM role | `map(string)` | `{}` | no | | [iam\_role\_arn](#input\_iam\_role\_arn) | Existing IAM role ARN for the cluster. Required if `create_iam_role` is set to `false` | `string` | `null` | no | | [iam\_role\_description](#input\_iam\_role\_description) | Description of the role | `string` | `null` | no | | [iam\_role\_name](#input\_iam\_role\_name) | Name to use on IAM role created | `string` | `null` | no | @@ -335,20 +352,20 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | [manage\_aws\_auth\_configmap](#input\_manage\_aws\_auth\_configmap) | Determines whether to manage the aws-auth configmap | `bool` | `false` | no | | [node\_security\_group\_additional\_rules](#input\_node\_security\_group\_additional\_rules) | List of additional security group rules to add to the node security group created. Set `source_cluster_security_group = true` inside rules to set the `cluster_security_group` as source | `any` | `{}` | no | | [node\_security\_group\_description](#input\_node\_security\_group\_description) | Description of the node security group created | `string` | `"EKS node shared security group"` | no | +| [node\_security\_group\_enable\_recommended\_rules](#input\_node\_security\_group\_enable\_recommended\_rules) | Determines whether to enable recommended security group rules for the node security group created. This includes node-to-node TCP ingress on ephemeral ports and allows all egress traffic | `bool` | `true` | no | | [node\_security\_group\_id](#input\_node\_security\_group\_id) | ID of an existing security group to attach to the node groups created | `string` | `""` | no | | [node\_security\_group\_name](#input\_node\_security\_group\_name) | Name to use on node security group created | `string` | `null` | no | -| [node\_security\_group\_ntp\_ipv4\_cidr\_block](#input\_node\_security\_group\_ntp\_ipv4\_cidr\_block) | IPv4 CIDR block to allow NTP egress. Default is public IP space, but [Amazon Time Sync Service](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/set-time.html) can be used as well with `["169.254.169.123/32"]` | `list(string)` |
[
"0.0.0.0/0"
]
| no | -| [node\_security\_group\_ntp\_ipv6\_cidr\_block](#input\_node\_security\_group\_ntp\_ipv6\_cidr\_block) | IPv4 CIDR block to allow NTP egress. Default is public IP space, but [Amazon Time Sync Service](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/set-time.html) can be used as well with `["fd00:ec2::123/128"]` | `list(string)` |
[
"::/0"
]
| no | | [node\_security\_group\_tags](#input\_node\_security\_group\_tags) | A map of additional tags to add to the node security group created | `map(string)` | `{}` | no | | [node\_security\_group\_use\_name\_prefix](#input\_node\_security\_group\_use\_name\_prefix) | Determines whether node security group name (`node_security_group_name`) is used as a prefix | `bool` | `true` | no | | [openid\_connect\_audiences](#input\_openid\_connect\_audiences) | List of OpenID Connect audience client IDs to add to the IRSA provider | `list(string)` | `[]` | no | +| [outpost\_config](#input\_outpost\_config) | Configuration for the AWS Outpost to provision the cluster on | `any` | `{}` | no | | [prefix\_separator](#input\_prefix\_separator) | The separator to use between the prefix and the generated timestamp for resource names | `string` | `"-"` | no | | [putin\_khuylo](#input\_putin\_khuylo) | Do you agree that Putin doesn't respect Ukrainian sovereignty and territorial integrity? More info: https://en.wikipedia.org/wiki/Putin_khuylo! | `bool` | `true` | no | | [self\_managed\_node\_group\_defaults](#input\_self\_managed\_node\_group\_defaults) | Map of self-managed node group default configurations | `any` | `{}` | no | | [self\_managed\_node\_groups](#input\_self\_managed\_node\_groups) | Map of self-managed node group definitions to create | `any` | `{}` | no | | [subnet\_ids](#input\_subnet\_ids) | A list of subnet IDs where the nodes/node groups will be provisioned. If `control_plane_subnet_ids` is not provided, the EKS cluster control plane (ENIs) will be provisioned in these subnets | `list(string)` | `[]` | no | | [tags](#input\_tags) | A map of tags to add to all resources | `map(string)` | `{}` | no | -| [vpc\_id](#input\_vpc\_id) | ID of the VPC where the cluster and its nodes will be provisioned | `string` | `null` | no | +| [vpc\_id](#input\_vpc\_id) | ID of the VPC where the cluster security group will be provisioned | `string` | `null` | no | ## Outputs @@ -364,9 +381,9 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster | | [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster | | [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role | -| [cluster\_id](#output\_cluster\_id) | The id of the EKS cluster. Will block on cluster creation until the cluster is really ready | +| [cluster\_id](#output\_cluster\_id) | The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts | | [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled | -| [cluster\_name](#output\_cluster\_name) | The name of the EKS cluster. Will block on cluster creation until the cluster is really ready | +| [cluster\_name](#output\_cluster\_name) | The name of the EKS cluster | | [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider | | [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster | | [cluster\_primary\_security\_group\_id](#output\_cluster\_primary\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console | diff --git a/docs/UPGRADE-19.0.md b/docs/UPGRADE-19.0.md new file mode 100644 index 0000000000..429a6a8cbd --- /dev/null +++ b/docs/UPGRADE-19.0.md @@ -0,0 +1,466 @@ +# Upgrade from v18.x to v19.x + +Please consult the `examples` directory for reference example configurations. If you find a bug, please open an issue with supporting configuration to reproduce. + +## List of backwards incompatible changes + +- The `cluster_id` output used to output the name of the cluster. This is due to the fact that the cluster name is a unique constraint and therefore its set as the unique identifier within Terraform's state map. However, starting with local EKS clusters created on Outposts, there is now an attribute returned from the `aws eks create-cluster` API named `id`. The `cluster_id` has been updated to return this value which means that for current, standard EKS clusters created in the AWS cloud, no value will be returned (at the time of this writing) for `cluster_id` and only local EKS clusters on Outposts will return a value that looks like a UUID/GUID. Users should switch all instances of `cluster_id` to use `cluster_name` before upgrading to v19. [Reference](https://github.com/hashicorp/terraform-provider-aws/issues/27560) +- Minimum supported version of Terraform AWS provider updated to v4.45 to support latest features provided via the resources utilized. +- Minimum supported version of Terraform updated to v1.0 +- Individual security group created per EKS managed node group or self managed node group has been removed. This configuration went mostly un-used and would often cause confusion ("Why is there an empty security group attached to my nodes?"). This functionality can easily be replicated by user's providing one or more externally created security groups to attach to nodes launched from the node group. +- Previously, `var.iam_role_additional_policies` (one for each of the following: cluster IAM role, EKS managed node group IAM role, self-managed node group IAM role, and Fargate Profile IAM role) accepted a list of strings. This worked well for policies that already existed but failed for policies being created at the same time as the cluster due to the well known issue of unkown values used in a `for_each` loop. To rectify this issue in `v19.x`, two changes were made: + 1. `var.iam_role_additional_policies` was changed from type `list(string)` to type `map(string)` -> this is a breaking change. More information on managing this change can be found below, under `Terraform State Moves` + 2. The logic used in the root module for this variable was changed to replace the use of `try()` with `lookup()`. More details on why can be found [here](https://github.com/clowdhaus/terraform-for-each-unknown) +- The cluster name has been removed from the Karpenter module event rule names. Due to the use of long cluster names appending to the provided naming scheme, the cluster name has moved to a `ClusterName` tag and the event rule name is now a prefix. This guarantees that users can have multiple instances of Karpenter withe their respective event rules/SQS queue without name collisions, while also still being able to identify which queues and event rules belong to which cluster. + +## Additional changes + +### Added + +- Support for setting `preserve` as well as `most_recent` on addons. + - `preserve` indicates if you want to preserve the created resources when deleting the EKS add-on + - `most_recent` indicates if you want to use the most recent revision of the add-on or the default version (default) +- Support for setting default node security group rules for common access patterns required: + - Egress all for `0.0.0.0/0`/`::/0` + - Ingress from cluster security group for 8443/TCP and 9443/TCP for common applications such as ALB Ingress Controller, Karpenter, OPA Gatekeeper, etc. These are commonly used as webhook ports for validating and mutating webhooks + +### Modified + +- `cluster_security_group_additional_rules` and `node_security_group_additional_rules` have been modified to use `lookup()` instead of `try()` to avoid the well known issue of [unkown values within a `for_each` loop](https://github.com/hashicorp/terraform/issues/4149) +- Default cluster security group rules have removed egress rules for TCP/443 and TCP/10250 to node groups since the cluster primary security group includes a default rule for ALL to `0.0.0.0/0`/`::/0` +- Default node security group rules have removed egress rules have been removed since the default security group settings have egress rule for ALL to `0.0.0.0/0`/`::/0` +- `block_device_mappings` previously required a map of maps but has since changed to an array of maps. Users can remove the outer key for each block device mapping and replace the outermost map `{}` with an array `[]`. There are no state changes required for this change. +- `create_kms_key` previously defaulted to `false` and now defaults to `true`. Clusters created with this module now default to enabling secret encryption by default with a customer managed KMS key created by this module +- `cluster_encryption_config` previously used a type of `list(any)` and now uses a type of `any` -> users can simply remove the outer `[`...`]` brackets on `v19.x` + - `cluster_encryption_config` previously defaulted to `[]` and now defaults to `{resources = ["secrets"]}` to encrypt secrets by default +- `cluster_endpoint_public_access` previously defaulted to `true` and now defaults to `false`. Clusters created with this module now default to private only access to the cluster endpoint + - `cluster_endpoint_private_access` previously defaulted to `false` and now defaults to `true` +- The addon configuration now sets `"OVERWRITE"` as the default value for `resolve_conflicts` to ease addon upgrade management. Users can opt out of this by instead setting `"NONE"` as the value for `resolve_conflicts` +- The `kms` module used has been updated from `v1.0.2` to `v1.1.0` - no material changes other than updated to latest +- The default value for EKS managed node group `update_config` has been updated to the recommended `{ max_unavailable_percentage = 33 }` +- The default value for the self-managed node group `instance_refresh` has been updated to the recommended: + ```hcl + { + strategy = "Rolling" + preferences = { + min_healthy_percentage = 66 + } + } + ``` + +### Removed + +- Remove all references of `aws_default_tags` to avoid update conflicts; this is the responsibility of the provider and should be handled at the provider level + - https://github.com/terraform-aws-modules/terraform-aws-eks/issues?q=is%3Aissue+default_tags+is%3Aclosed + - https://github.com/terraform-aws-modules/terraform-aws-eks/pulls?q=is%3Apr+default_tags+is%3Aclosed + +### Variable and output changes + +1. Removed variables: + + - `node_security_group_ntp_ipv4_cidr_block` - default security group settings have egress rule for ALL to `0.0.0.0/0`/`::/0` + - `node_security_group_ntp_ipv6_cidr_block` - default security group settings have egress rule for ALL to `0.0.0.0/0`/`::/0` + + - Self managed node groups: + - `create_security_group` + - `security_group_name` + - `security_group_use_name_prefix` + - `security_group_description` + - `security_group_rules` + - `security_group_tags` + - `cluster_security_group_id` + - `vpc_id` + - EKS managed node groups: + - `create_security_group` + - `security_group_name` + - `security_group_use_name_prefix` + - `security_group_description` + - `security_group_rules` + - `security_group_tags` + - `cluster_security_group_id` + - `vpc_id` + +2. Renamed variables: + + - N/A + +3. Added variables: + + - `provision_on_outpost`for Outposts support + - `outpost_config` for Outposts support + - `cluster_addons_timeouts` for setting a common set of timeouts for all addons (unless a specific value is provided within the addon configuration) + - `service_ipv6_cidr` for setting the IPv6 CIDR block for the Kubernetes service addresses + - `node_security_group_enable_recommended_rules` for enabling recommended node security group rules for common access patterns + + - Self managed node groups: + - `launch_template_id` for use when using an existing/externally created launch template (Ref: https://github.com/terraform-aws-modules/terraform-aws-autoscaling/pull/204) + - `maintenance_options` + - `private_dns_name_options` + - `instance_requirements` + - `context` + - `default_instance_warmup` + - `force_delete_warm_pool` + - EKS managed node groups: + - `use_custom_launch_template` was added to better clarify how users can switch betweeen a custom launch template or the default launch template provided by the EKS managed node group. Previously, to achieve this same functionality of using the default launch template, users needed to set `create_launch_template = false` and `launch_template_name = ""` which is not very intuitive. + - `launch_template_id` for use when using an existing/externally created launch template (Ref: https://github.com/terraform-aws-modules/terraform-aws-autoscaling/pull/204) + - `maintenance_options` + - `private_dns_name_options` + - +4. Removed outputs: + + - Self managed node groups: + - `security_group_arn` + - `security_group_id` + - EKS managed node groups: + - `security_group_arn` + - `security_group_id` + +5. Renamed outputs: + + - `cluster_id` is not renamed but the value it returns is now different. For standard EKS clusters created in the AWS cloud, the value returned at the time of this writing is `null`/empty. For local EKS clusters created on Outposts, the value returned will look like a UUID/GUID. Users should switch all instances of `cluster_id` to use `cluster_name` before upgrading to v19. [Reference](https://github.com/hashicorp/terraform-provider-aws/issues/27560) + +6. Added outputs: + + - `cluster_name` - The `cluster_id` currently set by the AWS provider is actually the cluster name, but in the future this will change and there will be a distinction between the `cluster_name` and `clsuter_id`. [Reference](https://github.com/hashicorp/terraform-provider-aws/issues/27560) + +## Upgrade Migrations + +1. Before upgrading your module definition to `v19.x`, please see below for both EKS managed node group(s) and self-managed node groups and removing the node group(s) security group prior to upgrading. + +### Self Managed Node Groups + +Self managed node groups on `v18.x` by default create a security group that does not specify any rules. In `v19.x`, this security group has been removed due to the predominant lack of usage (most users rely on the the shared node security group). While still using version `v18.x` of your module definition, remove this security group from your node groups by setting `create_security_group = false`. + +- If you are currently utilizing this security group, it is recommended to create an additional security group that matches the rules/settings of the security group created by the node group, and specify that security group ID in `vpc_security_group_ids`. Once this is in place, you can proceed with the original security group removal. +- For most users, the security group is not used and can be safely removed. However, deployed instances will have the security group attached to nodes and require the security group to be disassociated before the security group can be deleted. Because instances are deployed via autoscaling groups, we cannot simply remove the security group from code and have those changes reflected on the instances. Instead, we have to update the code and then trigger the autoscaling groups to cycle the instances deployed so that new instances are provisioned without the security group attached. You can utilize the `instance_refresh` parameter of Autoscaling groups to force nodes to re-deploy when removing the security group since changes to launch templates automatically trigger an instance refresh. An example configuration is provided below. + - Add the following to either/or `self_managed_node_group_defaults` or the individual self-managed node group definitions: + ```hcl + create_security_group = false + instance_refresh = { + strategy = "Rolling" + preferences = { + min_healthy_percentage = 66 + } + } + ``` +- It is recommended to use the `aws-node-termination-handler` while performing this update. Please refer to the [`irsa-autoscale-refresh` example](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/20af82846b4a1f23f3787a8c455f39c0b6164d80/examples/irsa_autoscale_refresh/charts.tf#L86) for usage. This will ensure that pods are safely evicted in a controlled manner to avoid service disruptions. +- Once the necessary configurations are in place, you can apply the changes which will: + 1. Create a new launch template (version) without the self-managed node group security group + 2. Replace instances based on the `instance_refresh` configuration settings + 3. New instances will launch without the self-managed node group security group, and prior instances will be terminated + 4. Once the self-managed node group has cycled, the security group will be deleted + +#### EKS Managed Node Groups + +EKS managed node groups on `v18.x` by default create a security group that does not specify any rules. In `v19.x`, this security group has been removed due to the predominant lack of usage (most users rely on the the shared node security group). While still using version `v18.x` of your module definition, remove this security group from your node groups by setting `create_security_group = false`. + +- If you are currently utilizing this security group, it is recommended to create an additional security group that matches the rules/settings of the security group created by the node group, and specify that security group ID in `vpc_security_group_ids`. Once this is in place, you can proceed with the original security group removal. +- EKS managed node groups rollout changes using a [rolling update strategy](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-update-behavior.html) that can be influenced through `update_config`. No additional changes are required for removing the the security group created by node groups (unlike self-managed node groups which should utilize the `instance_refresh` setting of Autoscaling groups). +- Once `create_security_group = false` has been set, you can apply the changes which will: + 1. Create a new launch template (version) without the EKS managed node group security group + 2. Replace instances based on the `update_config` configuration settings + 3. New instances will launch without the EKS managed node group security group, and prior instances will be terminated + 4. Once the EKS managed node group has cycled, the security group will be deleted + +2. Once the node group security group(s) have been removed, you can update your module definition to specify the `v19.x` version of the module +3. Run `terraform init -upgrade=true` to update your configuration and pull in the v19 changes +4. Using the documentation provided above, update your module definition to reflect the changes in the module from `v18.x` to `v19.x`. You can utilize `terraform plan` as you go to help highlight any changes that you wish to make. See below for `terraform state mv ...` commands related to the use of `iam_role_additional_policies`. If you are not providing any values to these variables, you can skip this section. +5. Once you are satisifed with the changes and the `terraform plan` output, you can apply the changes to sync your infrastructure with the updated module definition (or vice versa). + +### Diff of Before (v18.x) vs After (v19.x) + +```diff + module "eks" { + source = "terraform-aws-modules/eks/aws" +- version = "~> 18.0" ++ version = "~> 19.0" + + cluster_name = local.name ++ cluster_endpoint_public_access = true +- cluster_endpoint_private_access = true # now the default + + cluster_addons = { +- resolve_conflicts = "OVERWRITE" # now the default ++ preserve = true ++ most_recent = true + ++ timeouts = { ++ create = "25m" ++ delete = "10m" + } + kube-proxy = {} + vpc-cni = { +- resolve_conflicts = "OVERWRITE" # now the default + } + } + + # Encryption key + create_kms_key = true +- cluster_encryption_config = [{ +- resources = ["secrets"] +- }] ++ cluster_encryption_config = { ++ resources = ["secrets"] ++ } + kms_key_deletion_window_in_days = 7 + enable_kms_key_rotation = true + +- iam_role_additional_policies = [aws_iam_policy.additional.arn] ++ iam_role_additional_policies = { ++ additional = aws_iam_policy.additional.arn ++ } + + vpc_id = module.vpc.vpc_id + subnet_ids = module.vpc.private_subnets + control_plane_subnet_ids = module.vpc.intra_subnets + + # Extend node-to-node security group rules +- node_security_group_ntp_ipv4_cidr_block = ["169.254.169.123/32"] # now the default + node_security_group_additional_rules = { +- ingress_self_ephemeral = { +- description = "Node to node ephemeral ports" +- protocol = "tcp" +- from_port = 0 +- to_port = 0 +- type = "ingress" +- self = true +- } +- egress_all = { +- description = "Node all egress" +- protocol = "-1" +- from_port = 0 +- to_port = 0 +- type = "egress" +- cidr_blocks = ["0.0.0.0/0"] +- ipv6_cidr_blocks = ["::/0"] +- } + } + + # Self Managed Node Group(s) + self_managed_node_group_defaults = { + vpc_security_group_ids = [aws_security_group.additional.id] +- iam_role_additional_policies = [aws_iam_policy.additional.arn] ++ iam_role_additional_policies = { ++ additional = aws_iam_policy.additional.arn ++ } + } + + self_managed_node_groups = { + spot = { + instance_type = "m5.large" + instance_market_options = { + market_type = "spot" + } + + pre_bootstrap_user_data = <<-EOT + echo "foo" + export FOO=bar + EOT + + bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'" + + post_bootstrap_user_data = <<-EOT + cd /tmp + sudo yum install -y https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_amd64/amazon-ssm-agent.rpm + sudo systemctl enable amazon-ssm-agent + sudo systemctl start amazon-ssm-agent + EOT + +- create_security_group = true +- security_group_name = "eks-managed-node-group-complete-example" +- security_group_use_name_prefix = false +- security_group_description = "EKS managed node group complete example security group" +- security_group_rules = {} +- security_group_tags = {} + } + } + + # EKS Managed Node Group(s) + eks_managed_node_group_defaults = { + ami_type = "AL2_x86_64" + instance_types = ["m6i.large", "m5.large", "m5n.large", "m5zn.large"] + + attach_cluster_primary_security_group = true + vpc_security_group_ids = [aws_security_group.additional.id] +- iam_role_additional_policies = [aws_iam_policy.additional.arn] ++ iam_role_additional_policies = { ++ additional = aws_iam_policy.additional.arn ++ } + } + + eks_managed_node_groups = { + blue = {} + green = { + min_size = 1 + max_size = 10 + desired_size = 1 + + instance_types = ["t3.large"] + capacity_type = "SPOT" + labels = { + Environment = "test" + GithubRepo = "terraform-aws-eks" + GithubOrg = "terraform-aws-modules" + } + + taints = { + dedicated = { + key = "dedicated" + value = "gpuGroup" + effect = "NO_SCHEDULE" + } + } + + update_config = { + max_unavailable_percentage = 33 # or set `max_unavailable` + } + +- create_security_group = true +- security_group_name = "eks-managed-node-group-complete-example" +- security_group_use_name_prefix = false +- security_group_description = "EKS managed node group complete example security group" +- security_group_rules = {} +- security_group_tags = {} + + tags = { + ExtraTag = "example" + } + } + } + + # Fargate Profile(s) + fargate_profile_defaults = { +- iam_role_additional_policies = [aws_iam_policy.additional.arn] ++ iam_role_additional_policies = { ++ additional = aws_iam_policy.additional.arn ++ } + } + + fargate_profiles = { + default = { + name = "default" + selectors = [ + { + namespace = "kube-system" + labels = { + k8s-app = "kube-dns" + } + }, + { + namespace = "default" + } + ] + + tags = { + Owner = "test" + } + + timeouts = { + create = "20m" + delete = "20m" + } + } + } + + # OIDC Identity provider + cluster_identity_providers = { + sts = { + client_id = "sts.amazonaws.com" + } + } + + # aws-auth configmap + manage_aws_auth_configmap = true + + aws_auth_node_iam_role_arns_non_windows = [ + module.eks_managed_node_group.iam_role_arn, + module.self_managed_node_group.iam_role_arn, + ] + aws_auth_fargate_profile_pod_execution_role_arns = [ + module.fargate_profile.fargate_profile_pod_execution_role_arn + ] + + aws_auth_roles = [ + { + rolearn = "arn:aws:iam::66666666666:role/role1" + username = "role1" + groups = ["system:masters"] + }, + ] + + aws_auth_users = [ + { + userarn = "arn:aws:iam::66666666666:user/user1" + username = "user1" + groups = ["system:masters"] + }, + { + userarn = "arn:aws:iam::66666666666:user/user2" + username = "user2" + groups = ["system:masters"] + }, + ] + + aws_auth_accounts = [ + "777777777777", + "888888888888", + ] + + tags = local.tags +} +``` + +## Terraform State Moves + +The following Terraform state move commands are optional but recommended if you are providing additional IAM policies that are to be attached to IAM roles created by this module (cluster IAM role, node group IAM role, Fargate profile IAM role). Because the resources affected are `aws_iam_role_policy_attachment`, in theory you could get away with simply applying the configuration and letting Terraform detach and re-attach the policies. However, during this brief period of update, you could experience permission failures as the policy is detached and re-attached and therefore the state move route is recommended. + +Where `""` is specified, this should be replaced with the full ARN of the policy, and `""` should be replaced with the key used in the `iam_role_additional_policies` map for the associated policy. For example, if you have the following`v19.x` configuration: + +```hcl + ... + # This is demonstrating the cluster IAM role addtional policies + iam_role_additional_policies = { + additional = aws_iam_policy.additional.arn + } + ... +``` + +The associated state move command would look similar to (albeit with your correct policy ARN): + +```sh +terraform state mv 'module.eks.aws_iam_role_policy_attachment.this["arn:aws:iam::111111111111:policy/ex-complete-additional"]' 'module.eks.aws_iam_role_policy_attachment.additional["additional"]' +``` + +If you are not providing any additional IAM policies, no actions are required. + +### Cluster IAM Role + +Repeat for each policy provided in `iam_role_additional_policies`: + +```sh +terraform state mv 'module.eks.aws_iam_role_policy_attachment.this[""]' 'module.eks.aws_iam_role_policy_attachment.additional[""]' +``` + +### EKS Managed Node Group IAM Role + +Where `""` is the key used in the `eks_managed_node_groups` map for the associated node group. Repeat for each policy provided in `iam_role_additional_policies` in either/or `eks_managed_node_group_defaults` or the individual node group definitions: + +```sh +terraform state mv 'module.eks.module.eks_managed_node_group[""].aws_iam_role_policy_attachment.this[""]' 'module.eks.module.eks_managed_node_group[""].aws_iam_role_policy_attachment.additional[""]' +``` + +### Self-Managed Node Group IAM Role + +Where `""` is the key used in the `self_managed_node_groups` map for the associated node group. Repeat for each policy provided in `iam_role_additional_policies` in either/or `self_managed_node_group_defaults` or the individual node group definitions: + +```sh +terraform state mv 'module.eks.module.self_managed_node_group[""].aws_iam_role_policy_attachment.this[""]' 'module.eks.module.self_managed_node_group[""].aws_iam_role_policy_attachment.additional[""]' +``` + +### Fargate Profile IAM Role + +Where `""` is the key used in the `fargate_profiles` map for the associated profile. Repeat for each policy provided in `iam_role_additional_policies` in either/or `fargate_profile_defaults` or the individual profile definitions: + +```sh +terraform state mv 'module.eks.module.fargate_profile[""].aws_iam_role_policy_attachment.this[""]' 'module.eks.module.fargate_profile[""].aws_iam_role_policy_attachment.additional[""]' +``` diff --git a/docs/compute_resources.md b/docs/compute_resources.md index d90cf5f0a8..05d5ea1dd8 100644 --- a/docs/compute_resources.md +++ b/docs/compute_resources.md @@ -18,8 +18,7 @@ Refer to the [EKS Managed Node Group documentation](https://docs.aws.amazon.com/ ```hcl eks_managed_node_groups = { default = { - create_launch_template = false - launch_template_name = "" + use_custom_launch_template = false } } ``` @@ -29,8 +28,7 @@ Refer to the [EKS Managed Node Group documentation](https://docs.aws.amazon.com/ ```hcl eks_managed_node_groups = { bottlerocket_default = { - create_launch_template = false - launch_template_name = "" + use_custom_launch_template = false ami_type = "BOTTLEROCKET_x86_64" platform = "bottlerocket" @@ -45,15 +43,15 @@ Refer to the [EKS Managed Node Group documentation](https://docs.aws.amazon.com/ prepend_userdata = { # See issue https://github.com/awslabs/amazon-eks-ami/issues/844 pre_bootstrap_user_data = <<-EOT - #!/bin/bash - set -ex - cat <<-EOF > /etc/profile.d/bootstrap.sh - export CONTAINER_RUNTIME="containerd" - export USE_MAX_PODS=false - export KUBELET_EXTRA_ARGS="--max-pods=110" - EOF - # Source extra environment variables in bootstrap script - sed -i '/^set -o errexit/a\\nsource /etc/profile.d/bootstrap.sh' /etc/eks/bootstrap.sh + #!/bin/bash + set -ex + cat <<-EOF > /etc/profile.d/bootstrap.sh + export CONTAINER_RUNTIME="containerd" + export USE_MAX_PODS=false + export KUBELET_EXTRA_ARGS="--max-pods=110" + EOF + # Source extra environment variables in bootstrap script + sed -i '/^set -o errexit/a\\nsource /etc/profile.d/bootstrap.sh' /etc/eks/bootstrap.sh EOT } } @@ -68,9 +66,9 @@ Refer to the [EKS Managed Node Group documentation](https://docs.aws.amazon.com/ platform = "bottlerocket" bootstrap_extra_args = <<-EOT - # extra args added - [settings.kernel] - lockdown = "integrity" + # extra args added + [settings.kernel] + lockdown = "integrity" EOT } } @@ -116,17 +114,17 @@ Refer to the [EKS Managed Node Group documentation](https://docs.aws.amazon.com/ enable_bootstrap_user_data = true # this will get added to the template bootstrap_extra_args = <<-EOT - # extra args added - [settings.kernel] - lockdown = "integrity" + # extra args added + [settings.kernel] + lockdown = "integrity" - [settings.kubernetes.node-labels] - "label1" = "foo" - "label2" = "bar" + [settings.kubernetes.node-labels] + "label1" = "foo" + "label2" = "bar" - [settings.kubernetes.node-taints] - "dedicated" = "experimental:PreferNoSchedule" - "special" = "true:NoSchedule" + [settings.kubernetes.node-taints] + "dedicated" = "experimental:PreferNoSchedule" + "special" = "true:NoSchedule" EOT } } diff --git a/docs/faq.md b/docs/faq.md index c1ab564ee0..c53b027dca 100644 --- a/docs/faq.md +++ b/docs/faq.md @@ -1,7 +1,6 @@ # Frequently Asked Questions - [I received an error: `expect exactly one securityGroup tagged with kubernetes.io/cluster/ ...`](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/faq.md#i-received-an-error-expect-exactly-one-securitygroup-tagged-with-kubernetesioclustername-) -- [I received an error: `Error: Invalid for_each argument ...`](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/faq.md#i-received-an-error-error-invalid-for_each-argument-) - [Why are nodes not being registered?](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/faq.md#why-are-nodes-not-being-registered) - [Why are there no changes when a node group's `desired_size` is modified?](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/faq.md#why-are-there-no-changes-when-a-node-groups-desired_size-is-modified) - [How can I deploy Windows based nodes?](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/faq.md#how-can-i-deploy-windows-based-nodes) @@ -48,41 +47,6 @@ By default, EKS creates a cluster primary security group that is created outside In theory, if you are attaching the cluster primary security group, you shouldn't need to use the shared node security group created by the module. However, this is left up to users to decide for their requirements and use case. -### I received an error: `Error: Invalid for_each argument ...` - -Users may encounter an error such as `Error: Invalid for_each argument - The "for_each" value depends on resource attributes that cannot be determined until apply, so Terraform cannot predict how many instances will be created. To work around this, use the -target argument to first apply ...` - -This error is due to an upstream issue with [Terraform core](https://github.com/hashicorp/terraform/issues/4149). There are two potential options you can take to help mitigate this issue: - -1. Create the dependent resources before the cluster => `terraform apply -target ` and then `terraform apply` for the cluster (or other similar means to just ensure the referenced resources exist before creating the cluster) - -- Note: this is the route users will have to take for adding additional security groups to nodes since there isn't a separate "security group attachment" resource - -2. For additional IAM policies, users can attach the policies outside of the cluster definition as demonstrated below - -```hcl -resource "aws_iam_role_policy_attachment" "additional" { - for_each = module.eks.eks_managed_node_groups - # you could also do the following or any combination: - # for_each = merge( - # module.eks.eks_managed_node_groups, - # module.eks.self_managed_node_group, - # module.eks.fargate_profile, - # ) - - # This policy does not have to exist at the time of cluster creation. Terraform can - # deduce the proper order of its creation to avoid errors during creation - policy_arn = aws_iam_policy.node_additional.arn - role = each.value.iam_role_name -} -``` - -TL;DR - Terraform resource passed into the modules map definition _must_ be known before you can apply the EKS module. The variables this potentially affects are: - -- `cluster_security_group_additional_rules` (i.e. - referencing an external security group resource in a rule) -- `node_security_group_additional_rules` (i.e. - referencing an external security group resource in a rule) -- `iam_role_additional_policies` (i.e. - referencing an external policy resource) - ### Why are nodes not being registered? Nodes not being able to register with the EKS control plane is generally due to networking mis-configurations. diff --git a/docs/network_connectivity.md b/docs/network_connectivity.md index 67805aa77c..9d38fc130d 100644 --- a/docs/network_connectivity.md +++ b/docs/network_connectivity.md @@ -20,8 +20,7 @@ Please refer to the [AWS documentation](https://docs.aws.amazon.com/eks/latest/u - Lastly, users are able to opt in to attaching the primary security group automatically created by the EKS service by setting `attach_cluster_primary_security_group` = `true` from the root module for the respective node group (or set it within the node group defaults). This security group is not managed by the module; it is created by the EKS service. It permits all traffic within the domain of the security group as well as all egress traffic to the internet. - Node Group Security Group(s) - - Each node group (EKS Managed Node Group and Self Managed Node Group) by default creates its own security group. By default, this security group does not contain any additional security group rules. It is merely an "empty container" that offers users the ability to opt into any addition inbound our outbound rules as necessary - - Users also have the option to supply their own, and/or additional, externally created security group(s) to the node group as well via the `vpc_security_group_ids` variable + - Users have the option to assign their own externally created security group(s) to the node group via the `vpc_security_group_ids` variable See the example snippet below which adds additional security group rules to the cluster security group as well as the shared node security group (for node-to-node access). Users can use this extensibility to open up network access as they see fit using the security groups provided by the module: diff --git a/docs/user_data.md b/docs/user_data.md index e5c247b798..4ce9a13ac4 100644 --- a/docs/user_data.md +++ b/docs/user_data.md @@ -55,15 +55,15 @@ Since the EKS Managed Node Group service provides the necessary bootstrap user d ```hcl # See issue https://github.com/awslabs/amazon-eks-ami/issues/844 pre_bootstrap_user_data = <<-EOT - #!/bin/bash - set -ex - cat <<-EOF > /etc/profile.d/bootstrap.sh - export CONTAINER_RUNTIME="containerd" - export USE_MAX_PODS=false - export KUBELET_EXTRA_ARGS="--max-pods=110" - EOF - # Source extra environment variables in bootstrap script - sed -i '/^set -o errexit/a\\nsource /etc/profile.d/bootstrap.sh' /etc/eks/bootstrap.sh + #!/bin/bash + set -ex + cat <<-EOF > /etc/profile.d/bootstrap.sh + export CONTAINER_RUNTIME="containerd" + export USE_MAX_PODS=false + export KUBELET_EXTRA_ARGS="--max-pods=110" + EOF + # Source extra environment variables in bootstrap script + sed -i '/^set -o errexit/a\\nsource /etc/profile.d/bootstrap.sh' /etc/eks/bootstrap.sh EOT ``` diff --git a/examples/complete/README.md b/examples/complete/README.md index 66f3e8fdf1..6f5edbdb41 100644 --- a/examples/complete/README.md +++ b/examples/complete/README.md @@ -33,15 +33,15 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 0.13.1 | -| [aws](#requirement\_aws) | >= 3.72 | +| [terraform](#requirement\_terraform) | >= 1.0 | +| [aws](#requirement\_aws) | >= 4.45 | | [kubernetes](#requirement\_kubernetes) | >= 2.10 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 3.72 | +| [aws](#provider\_aws) | >= 4.45 | ## Modules @@ -61,7 +61,9 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Type | |------|------| +| [aws_iam_policy.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | | [aws_security_group.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | +| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | ## Inputs @@ -81,9 +83,9 @@ No inputs. | [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster | | [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster | | [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role | -| [cluster\_id](#output\_cluster\_id) | The id of the EKS cluster. Will block on cluster creation until the cluster is really ready | +| [cluster\_id](#output\_cluster\_id) | The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts | | [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled | -| [cluster\_name](#output\_cluster\_name) | The name of the EKS cluster. Will block on cluster creation until the cluster is really ready | +| [cluster\_name](#output\_cluster\_name) | The name of the EKS cluster | | [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider | | [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster | | [cluster\_security\_group\_arn](#output\_cluster\_security\_group\_arn) | Amazon Resource Name (ARN) of the cluster security group | diff --git a/examples/complete/main.tf b/examples/complete/main.tf index 04817fa78b..a040d750f2 100644 --- a/examples/complete/main.tf +++ b/examples/complete/main.tf @@ -14,10 +14,15 @@ provider "kubernetes" { } } +data "aws_availability_zones" "available" {} + locals { name = "ex-${replace(basename(path.cwd), "_", "-")}" region = "eu-west-1" + vpc_cidr = "10.0.0.0/16" + azs = slice(data.aws_availability_zones.available.names, 0, 3) + tags = { Example = local.name GithubRepo = "terraform-aws-eks" @@ -32,46 +37,56 @@ locals { module "eks" { source = "../.." - cluster_name = local.name - cluster_endpoint_private_access = true - cluster_endpoint_public_access = true + cluster_name = local.name + cluster_endpoint_public_access = true cluster_addons = { coredns = { - resolve_conflicts = "OVERWRITE" + preserve = true + most_recent = true + + timeouts = { + create = "25m" + delete = "10m" + } + } + kube-proxy = { + most_recent = true } - kube-proxy = {} vpc-cni = { - resolve_conflicts = "OVERWRITE" + most_recent = true } } # Encryption key create_kms_key = true - cluster_encryption_config = [{ + cluster_encryption_config = { resources = ["secrets"] - }] + } kms_key_deletion_window_in_days = 7 enable_kms_key_rotation = true + iam_role_additional_policies = { + additional = aws_iam_policy.additional.arn + } + vpc_id = module.vpc.vpc_id subnet_ids = module.vpc.private_subnets control_plane_subnet_ids = module.vpc.intra_subnets # Extend cluster security group rules cluster_security_group_additional_rules = { - egress_nodes_ephemeral_ports_tcp = { - description = "To node 1025-65535" + ingress_nodes_ephemeral_ports_tcp = { + description = "Nodes on ephemeral ports" protocol = "tcp" from_port = 1025 to_port = 65535 - type = "egress" + type = "ingress" source_node_security_group = true } } # Extend node-to-node security group rules - node_security_group_ntp_ipv4_cidr_block = ["169.254.169.123/32"] node_security_group_additional_rules = { ingress_self_all = { description = "Node to node all ports/protocols" @@ -81,21 +96,21 @@ module "eks" { type = "ingress" self = true } - egress_all = { - description = "Node all egress" - protocol = "-1" - from_port = 0 - to_port = 0 - type = "egress" - cidr_blocks = ["0.0.0.0/0"] - ipv6_cidr_blocks = ["::/0"] - } } # Self Managed Node Group(s) self_managed_node_group_defaults = { - vpc_security_group_ids = [aws_security_group.additional.id] - iam_role_additional_policies = ["arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"] + vpc_security_group_ids = [aws_security_group.additional.id] + iam_role_additional_policies = { + additional = aws_iam_policy.additional.arn + } + + instance_refresh = { + strategy = "Rolling" + preferences = { + min_healthy_percentage = 66 + } + } } self_managed_node_groups = { @@ -106,17 +121,17 @@ module "eks" { } pre_bootstrap_user_data = <<-EOT - echo "foo" - export FOO=bar + echo "foo" + export FOO=bar EOT bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'" post_bootstrap_user_data = <<-EOT - cd /tmp - sudo yum install -y https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_amd64/amazon-ssm-agent.rpm - sudo systemctl enable amazon-ssm-agent - sudo systemctl start amazon-ssm-agent + cd /tmp + sudo yum install -y https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_amd64/amazon-ssm-agent.rpm + sudo systemctl enable amazon-ssm-agent + sudo systemctl start amazon-ssm-agent EOT } } @@ -128,6 +143,9 @@ module "eks" { attach_cluster_primary_security_group = true vpc_security_group_ids = [aws_security_group.additional.id] + iam_role_additional_policies = { + additional = aws_iam_policy.additional.arn + } } eks_managed_node_groups = { @@ -154,7 +172,7 @@ module "eks" { } update_config = { - max_unavailable_percentage = 50 # or set `max_unavailable` + max_unavailable_percentage = 33 # or set `max_unavailable` } tags = { @@ -270,7 +288,6 @@ module "eks_managed_node_group" { cluster_name = module.eks.cluster_name cluster_version = module.eks.cluster_version - vpc_id = module.vpc.vpc_id subnet_ids = module.vpc.private_subnets cluster_primary_security_group_id = module.eks.cluster_primary_security_group_id vpc_security_group_ids = [ @@ -305,7 +322,6 @@ module "self_managed_node_group" { instance_type = "m5.large" - vpc_id = module.vpc.vpc_id subnet_ids = module.vpc.private_subnets vpc_security_group_ids = [ module.eks.cluster_primary_security_group_id, @@ -366,12 +382,12 @@ module "vpc" { version = "~> 3.0" name = local.name - cidr = "10.0.0.0/16" + cidr = local.vpc_cidr - azs = ["${local.region}a", "${local.region}b", "${local.region}c"] - private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"] - public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"] - intra_subnets = ["10.0.7.0/28", "10.0.7.16/28", "10.0.7.32/28"] + azs = local.azs + private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 4, k)] + public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 48)] + intra_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 52)] enable_nat_gateway = true single_nat_gateway = true @@ -382,13 +398,11 @@ module "vpc" { create_flow_log_cloudwatch_log_group = true public_subnet_tags = { - "kubernetes.io/cluster/${local.name}" = "shared" - "kubernetes.io/role/elb" = 1 + "kubernetes.io/role/elb" = 1 } private_subnet_tags = { - "kubernetes.io/cluster/${local.name}" = "shared" - "kubernetes.io/role/internal-elb" = 1 + "kubernetes.io/role/internal-elb" = 1 } tags = local.tags @@ -409,5 +423,22 @@ resource "aws_security_group" "additional" { ] } - tags = local.tags + tags = merge(local.tags, { Name = "${local.name}-additional" }) +} + +resource "aws_iam_policy" "additional" { + name = "${local.name}-additional" + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Action = [ + "ec2:Describe*", + ] + Effect = "Allow" + Resource = "*" + }, + ] + }) } diff --git a/examples/complete/outputs.tf b/examples/complete/outputs.tf index f94c629541..c1020f3333 100644 --- a/examples/complete/outputs.tf +++ b/examples/complete/outputs.tf @@ -17,16 +17,16 @@ output "cluster_endpoint" { value = module.eks.cluster_endpoint } -output "cluster_name" { - description = "The name of the EKS cluster. Will block on cluster creation until the cluster is really ready" - value = module.eks.cluster_name -} - output "cluster_id" { - description = "The id of the EKS cluster. Will block on cluster creation until the cluster is really ready" + description = "The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts" value = module.eks.cluster_id } +output "cluster_name" { + description = "The name of the EKS cluster" + value = module.eks.cluster_name +} + output "cluster_oidc_issuer_url" { description = "The URL on the EKS cluster for the OpenID Connect identity provider" value = module.eks.cluster_oidc_issuer_url diff --git a/examples/complete/versions.tf b/examples/complete/versions.tf index 6d6dc45be6..f2f8625d4b 100644 --- a/examples/complete/versions.tf +++ b/examples/complete/versions.tf @@ -1,10 +1,10 @@ terraform { - required_version = ">= 0.13.1" + required_version = ">= 1.0" required_providers { aws = { source = "hashicorp/aws" - version = ">= 3.72" + version = ">= 4.45" } kubernetes = { source = "hashicorp/kubernetes" diff --git a/examples/eks_managed_node_group/README.md b/examples/eks_managed_node_group/README.md index 449777a52a..42fb41e82c 100644 --- a/examples/eks_managed_node_group/README.md +++ b/examples/eks_managed_node_group/README.md @@ -57,45 +57,37 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 0.13.1 | -| [aws](#requirement\_aws) | >= 3.72 | +| [terraform](#requirement\_terraform) | >= 1.0 | +| [aws](#requirement\_aws) | >= 4.45 | | [kubernetes](#requirement\_kubernetes) | >= 2.10 | -| [tls](#requirement\_tls) | >= 3.0 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 3.72 | -| [tls](#provider\_tls) | >= 3.0 | +| [aws](#provider\_aws) | >= 4.45 | ## Modules | Name | Source | Version | |------|--------|---------| +| [ebs\_kms\_key](#module\_ebs\_kms\_key) | terraform-aws-modules/kms/aws | ~> 1.1 | | [eks](#module\_eks) | ../.. | n/a | +| [key\_pair](#module\_key\_pair) | terraform-aws-modules/key-pair/aws | ~> 2.0 | | [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 | -| [vpc\_cni\_irsa](#module\_vpc\_cni\_irsa) | terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks | ~> 4.12 | +| [vpc\_cni\_irsa](#module\_vpc\_cni\_irsa) | terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks | ~> 5.0 | ## Resources | Name | Type | |------|------| -| [aws_autoscaling_group_tag.cluster_autoscaler_label_tags](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/autoscaling_group_tag) | resource | | [aws_iam_policy.node_additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | -| [aws_iam_role_policy_attachment.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | -| [aws_key_pair.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/key_pair) | resource | -| [aws_kms_key.ebs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource | -| [aws_kms_key.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource | -| [aws_launch_template.external](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource | -| [aws_security_group.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | | [aws_security_group.remote_access](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | -| [tls_private_key.this](https://registry.terraform.io/providers/hashicorp/tls/latest/docs/resources/private_key) | resource | | [aws_ami.eks_default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source | | [aws_ami.eks_default_arm](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source | | [aws_ami.eks_default_bottlerocket](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source | +| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | | [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | -| [aws_iam_policy_document.ebs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | ## Inputs @@ -115,9 +107,9 @@ No inputs. | [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster | | [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster | | [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role | -| [cluster\_id](#output\_cluster\_id) | The id of the EKS cluster. Will block on cluster creation until the cluster is really ready | +| [cluster\_id](#output\_cluster\_id) | The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts | | [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled | -| [cluster\_name](#output\_cluster\_name) | The name of the EKS cluster. Will block on cluster creation until the cluster is really ready | +| [cluster\_name](#output\_cluster\_name) | The name of the EKS cluster | | [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider | | [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster | | [cluster\_primary\_security\_group\_id](#output\_cluster\_primary\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console | diff --git a/examples/eks_managed_node_group/main.tf b/examples/eks_managed_node_group/main.tf index 050a508810..ebf39cdd16 100644 --- a/examples/eks_managed_node_group/main.tf +++ b/examples/eks_managed_node_group/main.tf @@ -14,11 +14,17 @@ provider "kubernetes" { } } +data "aws_caller_identity" "current" {} +data "aws_availability_zones" "available" {} + locals { name = "ex-${replace(basename(path.cwd), "_", "-")}" cluster_version = "1.24" region = "eu-west-1" + vpc_cidr = "10.0.0.0/16" + azs = slice(data.aws_availability_zones.available.names, 0, 3) + tags = { Example = local.name GithubRepo = "terraform-aws-eks" @@ -26,8 +32,6 @@ locals { } } -data "aws_caller_identity" "current" {} - ################################################################################ # EKS Module ################################################################################ @@ -35,10 +39,9 @@ data "aws_caller_identity" "current" {} module "eks" { source = "../.." - cluster_name = local.name - cluster_version = local.cluster_version - cluster_endpoint_private_access = true - cluster_endpoint_public_access = true + cluster_name = local.name + cluster_version = local.cluster_version + cluster_endpoint_public_access = true # IPV6 cluster_ip_family = "ipv6" @@ -53,66 +56,23 @@ module "eks" { cluster_addons = { coredns = { - resolve_conflicts = "OVERWRITE" + most_recent = true + } + kube-proxy = { + most_recent = true } - kube-proxy = {} vpc-cni = { - resolve_conflicts = "OVERWRITE" + most_recent = true service_account_role_arn = module.vpc_cni_irsa.iam_role_arn } } - cluster_encryption_config = [{ - provider_key_arn = aws_kms_key.eks.arn - resources = ["secrets"] - }] - - cluster_tags = { - # This should not affect the name of the cluster primary security group - # Ref: https://github.com/terraform-aws-modules/terraform-aws-eks/pull/2006 - # Ref: https://github.com/terraform-aws-modules/terraform-aws-eks/pull/2008 - Name = local.name - } - - vpc_id = module.vpc.vpc_id - subnet_ids = module.vpc.private_subnets + vpc_id = module.vpc.vpc_id + subnet_ids = module.vpc.private_subnets + control_plane_subnet_ids = module.vpc.intra_subnets manage_aws_auth_configmap = true - # Extend cluster security group rules - cluster_security_group_additional_rules = { - egress_nodes_ephemeral_ports_tcp = { - description = "To node 1025-65535" - protocol = "tcp" - from_port = 1025 - to_port = 65535 - type = "egress" - source_node_security_group = true - } - } - - # Extend node-to-node security group rules - node_security_group_ntp_ipv6_cidr_block = ["fd00:ec2::123/128"] - node_security_group_additional_rules = { - ingress_self_all = { - description = "Node to node all ports/protocols" - protocol = "-1" - from_port = 0 - to_port = 0 - type = "ingress" - self = true - } - egress_all = { - description = "Node all egress" - protocol = "-1" - from_port = 0 - to_port = 0 - type = "egress" - cidr_blocks = ["0.0.0.0/0"] - ipv6_cidr_blocks = ["::/0"] - } - } - eks_managed_node_group_defaults = { ami_type = "AL2_x86_64" instance_types = ["m6i.large", "m5.large", "m5n.large", "m5zn.large"] @@ -130,14 +90,13 @@ module "eks" { default_node_group = { # By default, the module creates a launch template to ensure tags are propagated to instances, etc., # so we need to disable it to use the default template provided by the AWS EKS managed node group service - create_launch_template = false - launch_template_name = "" + use_custom_launch_template = false disk_size = 50 # Remote access cannot be specified with a launch template remote_access = { - ec2_ssh_key = aws_key_pair.this.key_name + ec2_ssh_key = module.key_pair.key_pair_name source_security_group_ids = [aws_security_group.remote_access.id] } } @@ -146,8 +105,7 @@ module "eks" { bottlerocket_default = { # By default, the module creates a launch template to ensure tags are propagated to instances, etc., # so we need to disable it to use the default template provided by the AWS EKS managed node group service - create_launch_template = false - launch_template_name = "" + use_custom_launch_template = false ami_type = "BOTTLEROCKET_x86_64" platform = "bottlerocket" @@ -158,11 +116,11 @@ module "eks" { ami_type = "BOTTLEROCKET_x86_64" platform = "bottlerocket" - # this will get added to what AWS provides + # This will get added to what AWS provides bootstrap_extra_args = <<-EOT - # extra args added - [settings.kernel] - lockdown = "integrity" + # extra args added + [settings.kernel] + lockdown = "integrity" EOT } @@ -172,31 +130,35 @@ module "eks" { ami_id = data.aws_ami.eks_default_bottlerocket.image_id platform = "bottlerocket" - # use module user data template to boostrap + # Use module user data template to boostrap enable_bootstrap_user_data = true - # this will get added to the template + # This will get added to the template bootstrap_extra_args = <<-EOT - # extra args added - [settings.kernel] - lockdown = "integrity" - - [settings.kubernetes.node-labels] - "label1" = "foo" - "label2" = "bar" - - [settings.kubernetes.node-taints] - "dedicated" = "experimental:PreferNoSchedule" - "special" = "true:NoSchedule" + # The admin host container provides SSH access and runs with "superpowers". + # It is disabled by default, but can be disabled explicitly. + [settings.host-containers.admin] + enabled = false + + # The control host container provides out-of-band access via SSM. + # It is enabled by default, and can be disabled if you do not expect to use SSM. + # This could leave you with no way to access the API and change settings on an existing node! + [settings.host-containers.control] + enabled = true + + # extra args added + [settings.kernel] + lockdown = "integrity" + + [settings.kubernetes.node-labels] + label1 = "foo" + label2 = "bar" + + [settings.kubernetes.node-taints] + dedicated = "experimental:PreferNoSchedule" + special = "true:NoSchedule" EOT } - # Use existing/external launch template - external_lt = { - create_launch_template = false - launch_template_name = aws_launch_template.external.name - launch_template_version = aws_launch_template.external.default_version - } - # Use a custom AMI custom_ami = { ami_type = "AL2_ARM_64" @@ -219,15 +181,15 @@ module "eks" { # See issue https://github.com/awslabs/amazon-eks-ami/issues/844 pre_bootstrap_user_data = <<-EOT - #!/bin/bash - set -ex - cat <<-EOF > /etc/profile.d/bootstrap.sh - export CONTAINER_RUNTIME="containerd" - export USE_MAX_PODS=false - export KUBELET_EXTRA_ARGS="--max-pods=110" - EOF - # Source extra environment variables in bootstrap script - sed -i '/^set -o errexit/a\\nsource /etc/profile.d/bootstrap.sh' /etc/eks/bootstrap.sh + #!/bin/bash + set -ex + cat <<-EOF > /etc/profile.d/bootstrap.sh + export CONTAINER_RUNTIME="containerd" + export USE_MAX_PODS=false + export KUBELET_EXTRA_ARGS="--max-pods=110" + EOF + # Source extra environment variables in bootstrap script + sed -i '/^set -o errexit/a\\nsource /etc/profile.d/bootstrap.sh' /etc/eks/bootstrap.sh EOT } @@ -247,12 +209,12 @@ module "eks" { bootstrap_extra_args = "--container-runtime containerd --kubelet-extra-args '--max-pods=20'" pre_bootstrap_user_data = <<-EOT - export CONTAINER_RUNTIME="containerd" - export USE_MAX_PODS=false + export CONTAINER_RUNTIME="containerd" + export USE_MAX_PODS=false EOT post_bootstrap_user_data = <<-EOT - echo "you are free little kubelet!" + echo "you are free little kubelet!" EOT capacity_type = "SPOT" @@ -272,13 +234,12 @@ module "eks" { ] update_config = { - max_unavailable_percentage = 50 # or set `max_unavailable` + max_unavailable_percentage = 33 # or set `max_unavailable` } description = "EKS managed node group example launch template" ebs_optimized = true - vpc_security_group_ids = [aws_security_group.additional.id] disable_api_termination = false enable_monitoring = true @@ -291,7 +252,7 @@ module "eks" { iops = 3000 throughput = 150 encrypted = true - kms_key_id = aws_kms_key.ebs.arn + kms_key_id = module.ebs_kms_key.key_id delete_on_termination = true } } @@ -311,34 +272,9 @@ module "eks" { iam_role_tags = { Purpose = "Protector of the kubelet" } - iam_role_additional_policies = [ - "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" - ] - - create_security_group = true - security_group_name = "eks-managed-node-group-complete-example" - security_group_use_name_prefix = false - security_group_description = "EKS managed node group complete example security group" - security_group_rules = { - phoneOut = { - description = "Hello CloudFlare" - protocol = "udp" - from_port = 53 - to_port = 53 - type = "egress" - cidr_blocks = ["1.1.1.1/32"] - } - phoneHome = { - description = "Hello cluster" - protocol = "udp" - from_port = 53 - to_port = 53 - type = "egress" - source_cluster_security_group = true # bit of reflection lookup - } - } - security_group_tags = { - Purpose = "Protector of the kubelet" + iam_role_additional_policies = { + AmazonEC2ContainerRegistryReadOnly = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" + additional = aws_iam_policy.node_additional.arn } tags = { @@ -350,18 +286,6 @@ module "eks" { tags = local.tags } -# References to resources that do not exist yet when creating a cluster will cause a plan failure due to https://github.com/hashicorp/terraform/issues/4149 -# There are two options users can take -# 1. Create the dependent resources before the cluster => `terraform apply -target and then `terraform apply` -# Note: this is the route users will have to take for adding additonal security groups to nodes since there isn't a separate "security group attachment" resource -# 2. For addtional IAM policies, users can attach the policies outside of the cluster definition as demonstrated below -resource "aws_iam_role_policy_attachment" "additional" { - for_each = module.eks.eks_managed_node_groups - - policy_arn = aws_iam_policy.node_additional.arn - role = each.value.iam_role_name -} - ################################################################################ # Supporting Resources ################################################################################ @@ -371,11 +295,12 @@ module "vpc" { version = "~> 3.0" name = local.name - cidr = "10.0.0.0/16" + cidr = local.vpc_cidr - azs = ["${local.region}a", "${local.region}b", "${local.region}c"] - private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"] - public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"] + azs = local.azs + private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 4, k)] + public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 48)] + intra_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 52)] enable_ipv6 = true assign_ipv6_address_on_creation = true @@ -393,13 +318,11 @@ module "vpc" { create_flow_log_cloudwatch_log_group = true public_subnet_tags = { - "kubernetes.io/cluster/${local.name}" = "shared" - "kubernetes.io/role/elb" = 1 + "kubernetes.io/role/elb" = 1 } private_subnet_tags = { - "kubernetes.io/cluster/${local.name}" = "shared" - "kubernetes.io/role/internal-elb" = 1 + "kubernetes.io/role/internal-elb" = 1 } tags = local.tags @@ -407,7 +330,7 @@ module "vpc" { module "vpc_cni_irsa" { source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" - version = "~> 4.12" + version = "~> 5.0" role_name_prefix = "VPC-CNI-IRSA" attach_vpc_cni_policy = true @@ -423,175 +346,35 @@ module "vpc_cni_irsa" { tags = local.tags } -resource "aws_security_group" "additional" { - name_prefix = "${local.name}-additional" - vpc_id = module.vpc.vpc_id +module "ebs_kms_key" { + source = "terraform-aws-modules/kms/aws" + version = "~> 1.1" - ingress { - from_port = 22 - to_port = 22 - protocol = "tcp" - cidr_blocks = [ - "10.0.0.0/8", - "172.16.0.0/12", - "192.168.0.0/16", - ] - } + description = "Customer managed key to encrypt EKS managed node group volumes" - tags = local.tags -} + # Policy + key_administrators = [ + data.aws_caller_identity.current.arn + ] + key_service_users = [ + # required for the ASG to manage encrypted volumes for nodes + "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", + # required for the cluster / persistentvolume-controller to create encrypted PVCs + module.eks.cluster_iam_role_arn, + ] -resource "aws_kms_key" "eks" { - description = "EKS Secret Encryption Key" - deletion_window_in_days = 7 - enable_key_rotation = true + # Aliases + aliases = ["eks/${local.name}/ebs"] tags = local.tags } -resource "aws_kms_key" "ebs" { - description = "Customer managed key to encrypt EKS managed node group volumes" - deletion_window_in_days = 7 - policy = data.aws_iam_policy_document.ebs.json -} - -# This policy is required for the KMS key used for EKS root volumes, so the cluster is allowed to enc/dec/attach encrypted EBS volumes -data "aws_iam_policy_document" "ebs" { - # Copy of default KMS policy that lets you manage it - statement { - sid = "Enable IAM User Permissions" - actions = ["kms:*"] - resources = ["*"] - - principals { - type = "AWS" - identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"] - } - } - - # Required for EKS - statement { - sid = "Allow service-linked role use of the CMK" - actions = [ - "kms:Encrypt", - "kms:Decrypt", - "kms:ReEncrypt*", - "kms:GenerateDataKey*", - "kms:DescribeKey" - ] - resources = ["*"] - - principals { - type = "AWS" - identifiers = [ - "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", # required for the ASG to manage encrypted volumes for nodes - module.eks.cluster_iam_role_arn, # required for the cluster / persistentvolume-controller to create encrypted PVCs - ] - } - } - - statement { - sid = "Allow attachment of persistent resources" - actions = ["kms:CreateGrant"] - resources = ["*"] - - principals { - type = "AWS" - identifiers = [ - "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", # required for the ASG to manage encrypted volumes for nodes - module.eks.cluster_iam_role_arn, # required for the cluster / persistentvolume-controller to create encrypted PVCs - ] - } - - condition { - test = "Bool" - variable = "kms:GrantIsForAWSResource" - values = ["true"] - } - } -} - -# This is based on the LT that EKS would create if no custom one is specified (aws ec2 describe-launch-template-versions --launch-template-id xxx) -# there are several more options one could set but you probably dont need to modify them -# you can take the default and add your custom AMI and/or custom tags -# -# Trivia: AWS transparently creates a copy of your LaunchTemplate and actually uses that copy then for the node group. If you DONT use a custom AMI, -# then the default user-data for bootstrapping a cluster is merged in the copy. - -resource "aws_launch_template" "external" { - name_prefix = "external-eks-ex-" - description = "EKS managed node group external launch template" - update_default_version = true - - block_device_mappings { - device_name = "/dev/xvda" - - ebs { - volume_size = 100 - volume_type = "gp2" - delete_on_termination = true - } - } - - monitoring { - enabled = true - } - - # Disabling due to https://github.com/hashicorp/terraform-provider-aws/issues/23766 - # network_interfaces { - # associate_public_ip_address = false - # delete_on_termination = true - # } - - # if you want to use a custom AMI - # image_id = var.ami_id - - # If you use a custom AMI, you need to supply via user-data, the bootstrap script as EKS DOESNT merge its managed user-data then - # you can add more than the minimum code you see in the template, e.g. install SSM agent, see https://github.com/aws/containers-roadmap/issues/593#issuecomment-577181345 - # (optionally you can use https://registry.terraform.io/providers/hashicorp/cloudinit/latest/docs/data-sources/cloudinit_config to render the script, example: https://github.com/terraform-aws-modules/terraform-aws-eks/pull/997#issuecomment-705286151) - # user_data = base64encode(data.template_file.launch_template_userdata.rendered) - - tag_specifications { - resource_type = "instance" - - tags = { - Name = "external_lt" - CustomTag = "Instance custom tag" - } - } - - tag_specifications { - resource_type = "volume" +module "key_pair" { + source = "terraform-aws-modules/key-pair/aws" + version = "~> 2.0" - tags = { - CustomTag = "Volume custom tag" - } - } - - tag_specifications { - resource_type = "network-interface" - - tags = { - CustomTag = "EKS example" - } - } - - tags = { - CustomTag = "Launch template custom tag" - } - - lifecycle { - create_before_destroy = true - } -} - -resource "tls_private_key" "this" { - algorithm = "RSA" -} - -resource "aws_key_pair" "this" { - key_name_prefix = local.name - public_key = tls_private_key.this.public_key_openssh + key_name_prefix = local.name + create_private_key = true tags = local.tags } @@ -617,7 +400,7 @@ resource "aws_security_group" "remote_access" { ipv6_cidr_blocks = ["::/0"] } - tags = local.tags + tags = merge(local.tags, { Name = "${local.name}-remote" }) } resource "aws_iam_policy" "node_additional" { @@ -669,52 +452,3 @@ data "aws_ami" "eks_default_bottlerocket" { values = ["bottlerocket-aws-k8s-${local.cluster_version}-x86_64-*"] } } - -################################################################################ -# Tags for the ASG to support cluster-autoscaler scale up from 0 -################################################################################ - -locals { - - # We need to lookup K8s taint effect from the AWS API value - taint_effects = { - NO_SCHEDULE = "NoSchedule" - NO_EXECUTE = "NoExecute" - PREFER_NO_SCHEDULE = "PreferNoSchedule" - } - - cluster_autoscaler_label_tags = merge([ - for name, group in module.eks.eks_managed_node_groups : { - for label_name, label_value in coalesce(group.node_group_labels, {}) : "${name}|label|${label_name}" => { - autoscaling_group = group.node_group_autoscaling_group_names[0], - key = "k8s.io/cluster-autoscaler/node-template/label/${label_name}", - value = label_value, - } - } - ]...) - - cluster_autoscaler_taint_tags = merge([ - for name, group in module.eks.eks_managed_node_groups : { - for taint in coalesce(group.node_group_taints, []) : "${name}|taint|${taint.key}" => { - autoscaling_group = group.node_group_autoscaling_group_names[0], - key = "k8s.io/cluster-autoscaler/node-template/taint/${taint.key}" - value = "${taint.value}:${local.taint_effects[taint.effect]}" - } - } - ]...) - - cluster_autoscaler_asg_tags = merge(local.cluster_autoscaler_label_tags, local.cluster_autoscaler_taint_tags) -} - -resource "aws_autoscaling_group_tag" "cluster_autoscaler_label_tags" { - for_each = local.cluster_autoscaler_asg_tags - - autoscaling_group_name = each.value.autoscaling_group - - tag { - key = each.value.key - value = each.value.value - - propagate_at_launch = false - } -} diff --git a/examples/eks_managed_node_group/outputs.tf b/examples/eks_managed_node_group/outputs.tf index 67de532779..43334ecc0a 100644 --- a/examples/eks_managed_node_group/outputs.tf +++ b/examples/eks_managed_node_group/outputs.tf @@ -17,16 +17,16 @@ output "cluster_endpoint" { value = module.eks.cluster_endpoint } -output "cluster_name" { - description = "The name of the EKS cluster. Will block on cluster creation until the cluster is really ready" - value = module.eks.cluster_name -} - output "cluster_id" { - description = "The id of the EKS cluster. Will block on cluster creation until the cluster is really ready" + description = "The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts" value = module.eks.cluster_id } +output "cluster_name" { + description = "The name of the EKS cluster" + value = module.eks.cluster_name +} + output "cluster_oidc_issuer_url" { description = "The URL on the EKS cluster for the OpenID Connect identity provider" value = module.eks.cluster_oidc_issuer_url diff --git a/examples/eks_managed_node_group/versions.tf b/examples/eks_managed_node_group/versions.tf index fde7af0f23..f2f8625d4b 100644 --- a/examples/eks_managed_node_group/versions.tf +++ b/examples/eks_managed_node_group/versions.tf @@ -1,14 +1,10 @@ terraform { - required_version = ">= 0.13.1" + required_version = ">= 1.0" required_providers { aws = { source = "hashicorp/aws" - version = ">= 3.72" - } - tls = { - source = "hashicorp/tls" - version = ">= 3.0" + version = ">= 4.45" } kubernetes = { source = "hashicorp/kubernetes" diff --git a/examples/fargate_profile/README.md b/examples/fargate_profile/README.md index 2f9cf7da1b..71a9ee7ad0 100644 --- a/examples/fargate_profile/README.md +++ b/examples/fargate_profile/README.md @@ -19,8 +19,8 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 0.13.1 | -| [aws](#requirement\_aws) | >= 3.72 | +| [terraform](#requirement\_terraform) | >= 1.0 | +| [aws](#requirement\_aws) | >= 4.45 | | [helm](#requirement\_helm) | >= 2.7 | | [null](#requirement\_null) | >= 3.0 | @@ -28,7 +28,7 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 3.72 | +| [aws](#provider\_aws) | >= 4.45 | | [helm](#provider\_helm) | >= 2.7 | | [null](#provider\_null) | >= 3.0 | @@ -43,10 +43,11 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Type | |------|------| -| [aws_kms_key.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource | +| [aws_iam_policy.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | | [helm_release.coredns](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource | | [null_resource.modify_kube_dns](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | | [null_resource.remove_default_coredns_deployment](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | | [aws_eks_addon_version.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_addon_version) | data source | | [aws_eks_cluster_auth.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | @@ -68,9 +69,9 @@ No inputs. | [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster | | [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster | | [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role | -| [cluster\_id](#output\_cluster\_id) | The id of the EKS cluster. Will block on cluster creation until the cluster is really ready | +| [cluster\_id](#output\_cluster\_id) | The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts | | [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled | -| [cluster\_name](#output\_cluster\_name) | The name of the EKS cluster. Will block on cluster creation until the cluster is really ready | +| [cluster\_name](#output\_cluster\_name) | The name of the EKS cluster | | [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider | | [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster | | [cluster\_primary\_security\_group\_id](#output\_cluster\_primary\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console | diff --git a/examples/fargate_profile/main.tf b/examples/fargate_profile/main.tf index 0bf15a7442..8eb0c934fe 100644 --- a/examples/fargate_profile/main.tf +++ b/examples/fargate_profile/main.tf @@ -16,11 +16,16 @@ provider "helm" { } } +data "aws_availability_zones" "available" {} + locals { name = "ex-${replace(basename(path.cwd), "_", "-")}" cluster_version = "1.24" region = "eu-west-1" + vpc_cidr = "10.0.0.0/16" + azs = slice(data.aws_availability_zones.available.names, 0, 3) + tags = { Example = local.name GithubRepo = "terraform-aws-eks" @@ -35,28 +40,29 @@ locals { module "eks" { source = "../.." - cluster_name = local.name - cluster_version = local.cluster_version - cluster_endpoint_private_access = true - cluster_endpoint_public_access = true + cluster_name = local.name + cluster_version = local.cluster_version + cluster_endpoint_public_access = true cluster_addons = { kube-proxy = {} vpc-cni = {} } - cluster_encryption_config = [{ - provider_key_arn = aws_kms_key.eks.arn - resources = ["secrets"] - }] - - vpc_id = module.vpc.vpc_id - subnet_ids = module.vpc.private_subnets + vpc_id = module.vpc.vpc_id + subnet_ids = module.vpc.private_subnets + control_plane_subnet_ids = module.vpc.intra_subnets # Fargate profiles use the cluster primary security group so these are not utilized create_cluster_security_group = false create_node_security_group = false + fargate_profile_defaults = { + iam_role_additional_policies = { + additional = aws_iam_policy.additional.arn + } + } + fargate_profiles = { example = { name = "example" @@ -231,11 +237,12 @@ module "vpc" { version = "~> 3.0" name = local.name - cidr = "10.0.0.0/16" + cidr = local.vpc_cidr - azs = ["${local.region}a", "${local.region}b", "${local.region}c"] - private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"] - public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"] + azs = local.azs + private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 4, k)] + public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 48)] + intra_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 52)] enable_nat_gateway = true single_nat_gateway = true @@ -246,22 +253,29 @@ module "vpc" { create_flow_log_cloudwatch_log_group = true public_subnet_tags = { - "kubernetes.io/cluster/${local.name}" = "shared" - "kubernetes.io/role/elb" = 1 + "kubernetes.io/role/elb" = 1 } private_subnet_tags = { - "kubernetes.io/cluster/${local.name}" = "shared" - "kubernetes.io/role/internal-elb" = 1 + "kubernetes.io/role/internal-elb" = 1 } tags = local.tags } -resource "aws_kms_key" "eks" { - description = "EKS Secret Encryption Key" - deletion_window_in_days = 7 - enable_key_rotation = true - - tags = local.tags +resource "aws_iam_policy" "additional" { + name = "${local.name}-additional" + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Action = [ + "ec2:Describe*", + ] + Effect = "Allow" + Resource = "*" + }, + ] + }) } diff --git a/examples/fargate_profile/outputs.tf b/examples/fargate_profile/outputs.tf index 67de532779..43334ecc0a 100644 --- a/examples/fargate_profile/outputs.tf +++ b/examples/fargate_profile/outputs.tf @@ -17,16 +17,16 @@ output "cluster_endpoint" { value = module.eks.cluster_endpoint } -output "cluster_name" { - description = "The name of the EKS cluster. Will block on cluster creation until the cluster is really ready" - value = module.eks.cluster_name -} - output "cluster_id" { - description = "The id of the EKS cluster. Will block on cluster creation until the cluster is really ready" + description = "The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts" value = module.eks.cluster_id } +output "cluster_name" { + description = "The name of the EKS cluster" + value = module.eks.cluster_name +} + output "cluster_oidc_issuer_url" { description = "The URL on the EKS cluster for the OpenID Connect identity provider" value = module.eks.cluster_oidc_issuer_url diff --git a/examples/fargate_profile/versions.tf b/examples/fargate_profile/versions.tf index 5128bc99e3..69ef526bd9 100644 --- a/examples/fargate_profile/versions.tf +++ b/examples/fargate_profile/versions.tf @@ -1,10 +1,10 @@ terraform { - required_version = ">= 0.13.1" + required_version = ">= 1.0" required_providers { aws = { source = "hashicorp/aws" - version = ">= 3.72" + version = ">= 4.45" } helm = { source = "hashicorp/helm" diff --git a/examples/karpenter/README.md b/examples/karpenter/README.md index 7b865797a4..f5d4e2dcc3 100644 --- a/examples/karpenter/README.md +++ b/examples/karpenter/README.md @@ -51,9 +51,9 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 0.13.1 | -| [aws](#requirement\_aws) | >= 3.72 | -| [helm](#requirement\_helm) | >= 2.4 | +| [terraform](#requirement\_terraform) | >= 1.0 | +| [aws](#requirement\_aws) | >= 4.45 | +| [helm](#requirement\_helm) | >= 2.7 | | [kubectl](#requirement\_kubectl) | >= 1.14 | | [kubernetes](#requirement\_kubernetes) | >= 2.10 | | [null](#requirement\_null) | >= 3.0 | @@ -62,8 +62,9 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 3.72 | -| [helm](#provider\_helm) | >= 2.4 | +| [aws](#provider\_aws) | >= 4.45 | +| [aws.virginia](#provider\_aws.virginia) | >= 4.45 | +| [helm](#provider\_helm) | >= 2.7 | | [kubectl](#provider\_kubectl) | >= 1.14 | | [null](#provider\_null) | >= 3.0 | @@ -86,6 +87,8 @@ Note that this example may create resources which cost money. Run `terraform des | [kubectl_manifest.karpenter_provisioner](https://registry.terraform.io/providers/gavinbunney/kubectl/latest/docs/resources/manifest) | resource | | [null_resource.modify_kube_dns](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | | [null_resource.remove_default_coredns_deployment](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | +| [aws_ecrpublic_authorization_token.token](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ecrpublic_authorization_token) | data source | | [aws_eks_addon_version.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_addon_version) | data source | | [aws_eks_cluster_auth.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | @@ -107,9 +110,9 @@ No inputs. | [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster | | [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster | | [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role | -| [cluster\_id](#output\_cluster\_id) | The id of the EKS cluster. Will block on cluster creation until the cluster is really ready | +| [cluster\_id](#output\_cluster\_id) | The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts | | [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled | -| [cluster\_name](#output\_cluster\_name) | The name of the EKS cluster. Will block on cluster creation until the cluster is really ready | +| [cluster\_name](#output\_cluster\_name) | The name of the EKS cluster | | [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider | | [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster | | [cluster\_primary\_security\_group\_id](#output\_cluster\_primary\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console | diff --git a/examples/karpenter/main.tf b/examples/karpenter/main.tf index 3634132ee2..0a8725e466 100644 --- a/examples/karpenter/main.tf +++ b/examples/karpenter/main.tf @@ -2,6 +2,11 @@ provider "aws" { region = local.region } +provider "aws" { + region = "us-east-1" + alias = "virginia" +} + provider "kubernetes" { host = module.eks.cluster_endpoint cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) @@ -42,11 +47,19 @@ provider "kubectl" { } } +data "aws_availability_zones" "available" {} +data "aws_ecrpublic_authorization_token" "token" { + provider = aws.virginia +} + locals { name = "ex-${replace(basename(path.cwd), "_", "-")}" cluster_version = "1.24" region = "eu-west-1" + vpc_cidr = "10.0.0.0/16" + azs = slice(data.aws_availability_zones.available.names, 0, 3) + tags = { Example = local.name GithubRepo = "terraform-aws-eks" @@ -61,17 +74,13 @@ locals { module "eks" { source = "../.." - cluster_name = local.name - cluster_version = local.cluster_version - cluster_endpoint_private_access = true - cluster_endpoint_public_access = true - - vpc_id = module.vpc.vpc_id - subnet_ids = module.vpc.private_subnets + cluster_name = local.name + cluster_version = local.cluster_version + cluster_endpoint_public_access = true - # Fargate profiles use the cluster primary security group so these are not utilized - create_cluster_security_group = false - create_node_security_group = false + vpc_id = module.vpc.vpc_id + subnet_ids = module.vpc.private_subnets + control_plane_subnet_ids = module.vpc.intra_subnets manage_aws_auth_configmap = true aws_auth_roles = [ @@ -114,7 +123,6 @@ module "eks" { # Karpenter ################################################################################ - module "karpenter" { source = "../../modules/karpenter" @@ -128,10 +136,12 @@ resource "helm_release" "karpenter" { namespace = "karpenter" create_namespace = true - name = "karpenter" - repository = "oci://public.ecr.aws/karpenter" - chart = "karpenter" - version = "v0.19.1" + name = "karpenter" + repository = "oci://public.ecr.aws/karpenter" + repository_username = data.aws_ecrpublic_authorization_token.token.user_name + repository_password = data.aws_ecrpublic_authorization_token.token.password + chart = "karpenter" + version = "v0.19.3" set { name = "settings.aws.clusterName" @@ -367,24 +377,27 @@ module "vpc" { version = "~> 3.0" name = local.name - cidr = "10.0.0.0/16" + cidr = local.vpc_cidr - azs = ["${local.region}a", "${local.region}b", "${local.region}c"] - private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"] - public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"] + azs = local.azs + private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 4, k)] + public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 48)] + intra_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 52)] enable_nat_gateway = true single_nat_gateway = true enable_dns_hostnames = true + enable_flow_log = true + create_flow_log_cloudwatch_iam_role = true + create_flow_log_cloudwatch_log_group = true + public_subnet_tags = { - "kubernetes.io/cluster/${local.name}" = "shared" - "kubernetes.io/role/elb" = 1 + "kubernetes.io/role/elb" = 1 } private_subnet_tags = { - "kubernetes.io/cluster/${local.name}" = "shared" - "kubernetes.io/role/internal-elb" = 1 + "kubernetes.io/role/internal-elb" = 1 # Tags subnets for Karpenter auto-discovery "karpenter.sh/discovery" = local.name } diff --git a/examples/karpenter/outputs.tf b/examples/karpenter/outputs.tf index 39d418bf37..f0ad50bd6a 100644 --- a/examples/karpenter/outputs.tf +++ b/examples/karpenter/outputs.tf @@ -17,16 +17,16 @@ output "cluster_endpoint" { value = module.eks.cluster_endpoint } -output "cluster_name" { - description = "The name of the EKS cluster. Will block on cluster creation until the cluster is really ready" - value = module.eks.cluster_name -} - output "cluster_id" { - description = "The id of the EKS cluster. Will block on cluster creation until the cluster is really ready" + description = "The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts" value = module.eks.cluster_id } +output "cluster_name" { + description = "The name of the EKS cluster" + value = module.eks.cluster_name +} + output "cluster_oidc_issuer_url" { description = "The URL on the EKS cluster for the OpenID Connect identity provider" value = module.eks.cluster_oidc_issuer_url diff --git a/examples/karpenter/versions.tf b/examples/karpenter/versions.tf index 92e85aacf6..7ae5250dce 100644 --- a/examples/karpenter/versions.tf +++ b/examples/karpenter/versions.tf @@ -1,10 +1,10 @@ terraform { - required_version = ">= 0.13.1" + required_version = ">= 1.0" required_providers { aws = { source = "hashicorp/aws" - version = ">= 3.72" + version = ">= 4.45" } kubernetes = { source = "hashicorp/kubernetes" @@ -12,7 +12,7 @@ terraform { } helm = { source = "hashicorp/helm" - version = ">= 2.4" + version = ">= 2.7" } kubectl = { source = "gavinbunney/kubectl" diff --git a/examples/outposts/README.md b/examples/outposts/README.md new file mode 100644 index 0000000000..20b60fd4e9 --- /dev/null +++ b/examples/outposts/README.md @@ -0,0 +1,115 @@ +# EKS on Outposts + +Configuration in this directory creates an AWS EKS local cluster on AWS Outposts + +See the [AWS documentation](https://docs.aws.amazon.com/eks/latest/userguide/eks-outposts.html) for further details. + +Note: This example requires an an AWS Outpost to provision. + +## Usage + +To run this example you need to: + +1. Deploy the remote host where the cluster will be provisioned from. The remote host is required since only private access is permitted to clusters created on Outposts. If you have access to the network where Outposts are provisioned (VPN, etc.), you can skip this step: + +```bash +$ cd prerequisites +$ terraform init +$ terraform plan +$ terraform apply +``` + +2. If provisioning using the remote host deployed in step 1, connect to the remote host using SSM. Note, you will need to have the [SSM plugin for the AWS CLI installed](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-working-with-install-plugin.html). You can use the output generated by step 1 to connect: + +```bash +$ aws ssm start-session --region --target +``` + +3. Once connected to the remote host, navigate to the cloned project example directory and deploy the example: + +```bash +$ cd $HOME/terraform-aws-eks/examples/outposts +$ terraform init +$ terraform plan +$ terraform apply +``` + +Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources. + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0 | +| [aws](#requirement\_aws) | >= 4.45 | +| [kubernetes](#requirement\_kubernetes) | >= 2.10 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 4.45 | +| [kubernetes](#provider\_kubernetes) | >= 2.10 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [eks](#module\_eks) | ../.. | n/a | + +## Resources + +| Name | Type | +|------|------| +| [kubernetes_storage_class_v1.this](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/storage_class_v1) | resource | +| [aws_outposts_outpost_instance_types.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/outposts_outpost_instance_types) | data source | +| [aws_outposts_outposts.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/outposts_outposts) | data source | +| [aws_subnet.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/subnet) | data source | +| [aws_subnets.lookup](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/subnets) | data source | +| [aws_subnets.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/subnets) | data source | +| [aws_vpc.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/vpc) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [region](#input\_region) | The AWS region to deploy into (e.g. us-east-1) | `string` | `"us-west-2"` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [aws\_auth\_configmap\_yaml](#output\_aws\_auth\_configmap\_yaml) | Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles | +| [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created | +| [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created | +| [cluster\_addons](#output\_cluster\_addons) | Map of attribute maps for all EKS cluster addons enabled | +| [cluster\_arn](#output\_cluster\_arn) | The Amazon Resource Name (ARN) of the cluster | +| [cluster\_certificate\_authority\_data](#output\_cluster\_certificate\_authority\_data) | Base64 encoded certificate data required to communicate with the cluster | +| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for your Kubernetes API server | +| [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster | +| [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster | +| [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role | +| [cluster\_id](#output\_cluster\_id) | The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts | +| [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled | +| [cluster\_name](#output\_cluster\_name) | The name of the EKS cluster | +| [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider | +| [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster | +| [cluster\_primary\_security\_group\_id](#output\_cluster\_primary\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console | +| [cluster\_security\_group\_arn](#output\_cluster\_security\_group\_arn) | Amazon Resource Name (ARN) of the cluster security group | +| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | ID of the cluster security group | +| [cluster\_status](#output\_cluster\_status) | Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED` | +| [cluster\_tls\_certificate\_sha1\_fingerprint](#output\_cluster\_tls\_certificate\_sha1\_fingerprint) | The SHA1 fingerprint of the public key of the cluster's certificate | +| [eks\_managed\_node\_groups](#output\_eks\_managed\_node\_groups) | Map of attribute maps for all EKS managed node groups created | +| [eks\_managed\_node\_groups\_autoscaling\_group\_names](#output\_eks\_managed\_node\_groups\_autoscaling\_group\_names) | List of the autoscaling group names created by EKS managed node groups | +| [fargate\_profiles](#output\_fargate\_profiles) | Map of attribute maps for all EKS Fargate Profiles created | +| [kms\_key\_arn](#output\_kms\_key\_arn) | The Amazon Resource Name (ARN) of the key | +| [kms\_key\_id](#output\_kms\_key\_id) | The globally unique identifier for the key | +| [kms\_key\_policy](#output\_kms\_key\_policy) | The IAM resource policy set on the key | +| [node\_security\_group\_arn](#output\_node\_security\_group\_arn) | Amazon Resource Name (ARN) of the node shared security group | +| [node\_security\_group\_id](#output\_node\_security\_group\_id) | ID of the node shared security group | +| [oidc\_provider](#output\_oidc\_provider) | The OpenID Connect identity provider (issuer URL without leading `https://`) | +| [oidc\_provider\_arn](#output\_oidc\_provider\_arn) | The ARN of the OIDC Provider if `enable_irsa = true` | +| [self\_managed\_node\_groups](#output\_self\_managed\_node\_groups) | Map of attribute maps for all self managed node groups created | +| [self\_managed\_node\_groups\_autoscaling\_group\_names](#output\_self\_managed\_node\_groups\_autoscaling\_group\_names) | List of the autoscaling group names created by self-managed node groups | + diff --git a/examples/outposts/main.tf b/examples/outposts/main.tf new file mode 100644 index 0000000000..50cbc9586d --- /dev/null +++ b/examples/outposts/main.tf @@ -0,0 +1,152 @@ +provider "aws" { + region = var.region +} + +provider "kubernetes" { + host = module.eks.cluster_endpoint + cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) + + exec { + api_version = "client.authentication.k8s.io/v1beta1" + command = "aws" + # Note: `cluster_id` is used with Outposts for auth + args = ["eks", "get-token", "--cluster-id", module.eks.cluster_id, "--region", var.region] + } +} + +locals { + name = "ex-${basename(path.cwd)}" + cluster_version = "1.21" # Required by EKS on Outposts + + outpost_arn = element(tolist(data.aws_outposts_outposts.this.arns), 0) + instance_type = element(tolist(data.aws_outposts_outpost_instance_types.this.instance_types), 0) + + tags = { + Example = local.name + GithubRepo = "terraform-aws-eks" + GithubOrg = "terraform-aws-modules" + } +} + +################################################################################ +# EKS Module +################################################################################ + +module "eks" { + source = "../.." + + cluster_name = local.name + cluster_version = local.cluster_version + + cluster_endpoint_public_access = false # Not available on Outpost + cluster_endpoint_private_access = true + + vpc_id = data.aws_vpc.this.id + subnet_ids = data.aws_subnets.this.ids + + outpost_config = { + control_plane_instance_type = local.instance_type + outpost_arns = [local.outpost_arn] + } + + # Local clusters will automatically add the node group IAM role to the aws-auth configmap + manage_aws_auth_configmap = true + + # Extend cluster security group rules + cluster_security_group_additional_rules = { + ingress_vpc_https = { + description = "Remote host to control plane" + protocol = "tcp" + from_port = 443 + to_port = 443 + type = "ingress" + cidr_blocks = [data.aws_vpc.this.cidr_block] + } + } + + self_managed_node_group_defaults = { + attach_cluster_primary_security_group = true + + iam_role_additional_policies = { + AmazonSSMManagedInstanceCore = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore" + } + } + + self_managed_node_groups = { + outpost = { + name = local.name + + min_size = 2 + max_size = 5 + desired_size = 3 + instance_type = local.instance_type + + # Additional information is required to join local clusters to EKS + bootstrap_extra_args = <<-EOT + --enable-local-outpost true --cluster-id ${module.eks.cluster_id} --container-runtime containerd + EOT + } + } + + tags = local.tags +} + +resource "kubernetes_storage_class_v1" "this" { + metadata { + name = "ebs-sc" + annotations = { + "storageclass.kubernetes.io/is-default-class" = "true" + } + } + + storage_provisioner = "ebs.csi.aws.com" + volume_binding_mode = "WaitForFirstConsumer" + allow_volume_expansion = true + + parameters = { + type = "gp2" + encrypted = "true" + } +} + +################################################################################ +# Supporting Resources +################################################################################ + +data "aws_outposts_outposts" "this" {} + +data "aws_outposts_outpost_instance_types" "this" { + arn = local.outpost_arn +} + +# This just grabs the first Outpost and returns its subnets +data "aws_subnets" "lookup" { + filter { + name = "outpost-arn" + values = [local.outpost_arn] + } +} + +# This grabs a single subnet to reverse lookup those that belong to same VPC +# This is whats used for the cluster +data "aws_subnet" "this" { + id = element(tolist(data.aws_subnets.lookup.ids), 0) +} + +# These are subnets for the Outpost and restricted to the same VPC +# This is whats used for the cluster +data "aws_subnets" "this" { + filter { + name = "outpost-arn" + values = [local.outpost_arn] + } + + filter { + name = "vpc-id" + values = [data.aws_subnet.this.vpc_id] + } +} + +data "aws_vpc" "this" { + id = data.aws_subnet.this.vpc_id +} diff --git a/examples/outposts/outputs.tf b/examples/outposts/outputs.tf new file mode 100644 index 0000000000..43334ecc0a --- /dev/null +++ b/examples/outposts/outputs.tf @@ -0,0 +1,211 @@ +################################################################################ +# Cluster +################################################################################ + +output "cluster_arn" { + description = "The Amazon Resource Name (ARN) of the cluster" + value = module.eks.cluster_arn +} + +output "cluster_certificate_authority_data" { + description = "Base64 encoded certificate data required to communicate with the cluster" + value = module.eks.cluster_certificate_authority_data +} + +output "cluster_endpoint" { + description = "Endpoint for your Kubernetes API server" + value = module.eks.cluster_endpoint +} + +output "cluster_id" { + description = "The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts" + value = module.eks.cluster_id +} + +output "cluster_name" { + description = "The name of the EKS cluster" + value = module.eks.cluster_name +} + +output "cluster_oidc_issuer_url" { + description = "The URL on the EKS cluster for the OpenID Connect identity provider" + value = module.eks.cluster_oidc_issuer_url +} + +output "cluster_platform_version" { + description = "Platform version for the cluster" + value = module.eks.cluster_platform_version +} + +output "cluster_status" { + description = "Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED`" + value = module.eks.cluster_status +} + +output "cluster_primary_security_group_id" { + description = "Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console" + value = module.eks.cluster_primary_security_group_id +} + +################################################################################ +# KMS Key +################################################################################ + +output "kms_key_arn" { + description = "The Amazon Resource Name (ARN) of the key" + value = module.eks.kms_key_arn +} + +output "kms_key_id" { + description = "The globally unique identifier for the key" + value = module.eks.kms_key_id +} + +output "kms_key_policy" { + description = "The IAM resource policy set on the key" + value = module.eks.kms_key_policy +} + +################################################################################ +# Security Group +################################################################################ + +output "cluster_security_group_arn" { + description = "Amazon Resource Name (ARN) of the cluster security group" + value = module.eks.cluster_security_group_arn +} + +output "cluster_security_group_id" { + description = "ID of the cluster security group" + value = module.eks.cluster_security_group_id +} + +################################################################################ +# Node Security Group +################################################################################ + +output "node_security_group_arn" { + description = "Amazon Resource Name (ARN) of the node shared security group" + value = module.eks.node_security_group_arn +} + +output "node_security_group_id" { + description = "ID of the node shared security group" + value = module.eks.node_security_group_id +} + +################################################################################ +# IRSA +################################################################################ + +output "oidc_provider" { + description = "The OpenID Connect identity provider (issuer URL without leading `https://`)" + value = module.eks.oidc_provider +} + +output "oidc_provider_arn" { + description = "The ARN of the OIDC Provider if `enable_irsa = true`" + value = module.eks.oidc_provider_arn +} + +output "cluster_tls_certificate_sha1_fingerprint" { + description = "The SHA1 fingerprint of the public key of the cluster's certificate" + value = module.eks.cluster_tls_certificate_sha1_fingerprint +} + +################################################################################ +# IAM Role +################################################################################ + +output "cluster_iam_role_name" { + description = "IAM role name of the EKS cluster" + value = module.eks.cluster_iam_role_name +} + +output "cluster_iam_role_arn" { + description = "IAM role ARN of the EKS cluster" + value = module.eks.cluster_iam_role_arn +} + +output "cluster_iam_role_unique_id" { + description = "Stable and unique string identifying the IAM role" + value = module.eks.cluster_iam_role_unique_id +} + +################################################################################ +# EKS Addons +################################################################################ + +output "cluster_addons" { + description = "Map of attribute maps for all EKS cluster addons enabled" + value = module.eks.cluster_addons +} + +################################################################################ +# EKS Identity Provider +################################################################################ + +output "cluster_identity_providers" { + description = "Map of attribute maps for all EKS identity providers enabled" + value = module.eks.cluster_identity_providers +} + +################################################################################ +# CloudWatch Log Group +################################################################################ + +output "cloudwatch_log_group_name" { + description = "Name of cloudwatch log group created" + value = module.eks.cloudwatch_log_group_name +} + +output "cloudwatch_log_group_arn" { + description = "Arn of cloudwatch log group created" + value = module.eks.cloudwatch_log_group_arn +} + +################################################################################ +# Fargate Profile +################################################################################ + +output "fargate_profiles" { + description = "Map of attribute maps for all EKS Fargate Profiles created" + value = module.eks.fargate_profiles +} + +################################################################################ +# EKS Managed Node Group +################################################################################ + +output "eks_managed_node_groups" { + description = "Map of attribute maps for all EKS managed node groups created" + value = module.eks.eks_managed_node_groups +} + +output "eks_managed_node_groups_autoscaling_group_names" { + description = "List of the autoscaling group names created by EKS managed node groups" + value = module.eks.eks_managed_node_groups_autoscaling_group_names +} + +################################################################################ +# Self Managed Node Group +################################################################################ + +output "self_managed_node_groups" { + description = "Map of attribute maps for all self managed node groups created" + value = module.eks.self_managed_node_groups +} + +output "self_managed_node_groups_autoscaling_group_names" { + description = "List of the autoscaling group names created by self-managed node groups" + value = module.eks.self_managed_node_groups_autoscaling_group_names +} + +################################################################################ +# Additional +################################################################################ + +output "aws_auth_configmap_yaml" { + description = "Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles" + value = module.eks.aws_auth_configmap_yaml +} diff --git a/examples/outposts/prerequisites/main.tf b/examples/outposts/prerequisites/main.tf new file mode 100644 index 0000000000..e49d7bdbb5 --- /dev/null +++ b/examples/outposts/prerequisites/main.tf @@ -0,0 +1,150 @@ +provider "aws" { + region = var.region +} + +locals { + name = "ex-${basename(path.cwd)}" + + terraform_version = "1.3.6" + + outpost_arn = element(tolist(data.aws_outposts_outposts.this.arns), 0) + instance_type = element(tolist(data.aws_outposts_outpost_instance_types.this.instance_types), 0) + + tags = { + Example = local.name + GithubRepo = "terraform-aws-eks" + GithubOrg = "terraform-aws-modules" + } +} + +################################################################################ +# Pre-Requisites +################################################################################ + +module "ssm_bastion_ec2" { + source = "terraform-aws-modules/ec2-instance/aws" + version = "~> 4.2" + + name = "${local.name}-bastion" + + create_iam_instance_profile = true + iam_role_policies = { + AdministratorAccess = "arn:aws:iam::aws:policy/AdministratorAccess" + } + + instance_type = local.instance_type + + user_data = <<-EOT + #!/bin/bash + + # Add ssm-user since it won't exist until first login + adduser -m ssm-user + tee /etc/sudoers.d/ssm-agent-users <<'EOF' + # User rules for ssm-user + ssm-user ALL=(ALL) NOPASSWD:ALL + EOF + chmod 440 /etc/sudoers.d/ssm-agent-users + + cd /home/ssm-user + + # Install git to clone repo + yum install git -y + + # Install Terraform + curl -sSO https://releases.hashicorp.com/terraform/${local.terraform_version}/terraform_${local.terraform_version}_linux_amd64.zip + sudo unzip -qq terraform_${local.terraform_version}_linux_amd64.zip terraform -d /usr/bin/ + rm terraform_${local.terraform_version}_linux_amd64.zip 2> /dev/null + + # Install kubectl + curl -LO https://dl.k8s.io/release/v1.21.0/bin/linux/amd64/kubectl + install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl + + # Remove default awscli which is v1 - we want latest v2 + yum remove awscli -y + curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" + unzip -qq awscliv2.zip + ./aws/install + + # Clone repo + git clone https://github.com/bryantbiggs/terraform-aws-eks.git \ + && cd /home/ssm-user/terraform-aws-eks \ + && git checkout refactor/v19 + + chown -R ssm-user:ssm-user /home/ssm-user/ + EOT + + vpc_security_group_ids = [module.bastion_security_group.security_group_id] + subnet_id = element(data.aws_subnets.this.ids, 0) + + tags = local.tags +} + +module "bastion_security_group" { + source = "terraform-aws-modules/security-group/aws" + version = "~> 4.0" + + name = "${local.name}-bastion" + description = "Security group to allow provisioning ${local.name} EKS local cluster on Outposts" + vpc_id = data.aws_vpc.this.id + + ingress_with_cidr_blocks = [ + { + from_port = 443 + to_port = 443 + protocol = "tcp" + cidr_blocks = data.aws_vpc.this.cidr_block + }, + ] + egress_with_cidr_blocks = [ + { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = "0.0.0.0/0" + }, + ] + + tags = local.tags +} + +################################################################################ +# Supporting Resources +################################################################################ + +data "aws_outposts_outposts" "this" {} + +data "aws_outposts_outpost_instance_types" "this" { + arn = local.outpost_arn +} + +# This just grabs the first Outpost and returns its subnets +data "aws_subnets" "lookup" { + filter { + name = "outpost-arn" + values = [local.outpost_arn] + } +} + +# This grabs a single subnet to reverse lookup those that belong to same VPC +# This is whats used for the cluster +data "aws_subnet" "this" { + id = element(tolist(data.aws_subnets.lookup.ids), 0) +} + +# These are subnets for the Outpost and restricted to the same VPC +# This is whats used for the cluster +data "aws_subnets" "this" { + filter { + name = "outpost-arn" + values = [local.outpost_arn] + } + + filter { + name = "vpc-id" + values = [data.aws_subnet.this.vpc_id] + } +} + +data "aws_vpc" "this" { + id = data.aws_subnet.this.vpc_id +} diff --git a/examples/outposts/prerequisites/outputs.tf b/examples/outposts/prerequisites/outputs.tf new file mode 100644 index 0000000000..f2ff81ab70 --- /dev/null +++ b/examples/outposts/prerequisites/outputs.tf @@ -0,0 +1,4 @@ +output "ssm_start_session" { + description = "SSM start session command to connect to remote host created" + value = "aws ssm start-session --region ${var.region} --target ${module.ssm_bastion_ec2.id}" +} diff --git a/examples/outposts/prerequisites/variables.tf b/examples/outposts/prerequisites/variables.tf new file mode 100644 index 0000000000..47945c8501 --- /dev/null +++ b/examples/outposts/prerequisites/variables.tf @@ -0,0 +1,5 @@ +variable "region" { + description = "The AWS region to deploy into (e.g. us-east-1)" + type = string + default = "us-west-2" +} diff --git a/examples/outposts/prerequisites/versions.tf b/examples/outposts/prerequisites/versions.tf new file mode 100644 index 0000000000..5f058b4c11 --- /dev/null +++ b/examples/outposts/prerequisites/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.34" + } + } +} diff --git a/examples/outposts/variables.tf b/examples/outposts/variables.tf new file mode 100644 index 0000000000..47945c8501 --- /dev/null +++ b/examples/outposts/variables.tf @@ -0,0 +1,5 @@ +variable "region" { + description = "The AWS region to deploy into (e.g. us-east-1)" + type = string + default = "us-west-2" +} diff --git a/examples/outposts/versions.tf b/examples/outposts/versions.tf new file mode 100644 index 0000000000..f2f8625d4b --- /dev/null +++ b/examples/outposts/versions.tf @@ -0,0 +1,14 @@ +terraform { + required_version = ">= 1.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.45" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.10" + } + } +} diff --git a/examples/self_managed_node_group/README.md b/examples/self_managed_node_group/README.md index 9608cdb8cd..97deaa59ab 100644 --- a/examples/self_managed_node_group/README.md +++ b/examples/self_managed_node_group/README.md @@ -25,23 +25,23 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 0.13.1 | -| [aws](#requirement\_aws) | >= 3.72 | +| [terraform](#requirement\_terraform) | >= 1.0 | +| [aws](#requirement\_aws) | >= 4.45 | | [kubernetes](#requirement\_kubernetes) | >= 2.10 | -| [tls](#requirement\_tls) | >= 3.0 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 3.72 | -| [tls](#provider\_tls) | >= 3.0 | +| [aws](#provider\_aws) | >= 4.45 | ## Modules | Name | Source | Version | |------|--------|---------| +| [ebs\_kms\_key](#module\_ebs\_kms\_key) | terraform-aws-modules/kms/aws | ~> 1.1 | | [eks](#module\_eks) | ../.. | n/a | +| [key\_pair](#module\_key\_pair) | terraform-aws-modules/key-pair/aws | ~> 2.0 | | [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 | ## Resources @@ -49,15 +49,11 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Type | |------|------| | [aws_ec2_capacity_reservation.targeted](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ec2_capacity_reservation) | resource | -| [aws_key_pair.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/key_pair) | resource | -| [aws_kms_key.ebs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource | -| [aws_kms_key.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource | -| [aws_security_group.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | -| [tls_private_key.this](https://registry.terraform.io/providers/hashicorp/tls/latest/docs/resources/private_key) | resource | +| [aws_iam_policy.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | | [aws_ami.eks_default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source | | [aws_ami.eks_default_bottlerocket](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source | +| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | | [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | -| [aws_iam_policy_document.ebs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | ## Inputs @@ -77,9 +73,9 @@ No inputs. | [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster | | [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster | | [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role | -| [cluster\_id](#output\_cluster\_id) | The id of the EKS cluster. Will block on cluster creation until the cluster is really ready | +| [cluster\_id](#output\_cluster\_id) | The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts | | [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled | -| [cluster\_name](#output\_cluster\_name) | The name of the EKS cluster. Will block on cluster creation until the cluster is really ready | +| [cluster\_name](#output\_cluster\_name) | The name of the EKS cluster | | [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider | | [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster | | [cluster\_primary\_security\_group\_id](#output\_cluster\_primary\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console | diff --git a/examples/self_managed_node_group/main.tf b/examples/self_managed_node_group/main.tf index 27e82eebb3..fe7b5d85c8 100644 --- a/examples/self_managed_node_group/main.tf +++ b/examples/self_managed_node_group/main.tf @@ -14,11 +14,17 @@ provider "kubernetes" { } } +data "aws_caller_identity" "current" {} +data "aws_availability_zones" "available" {} + locals { name = "ex-${replace(basename(path.cwd), "_", "-")}" cluster_version = "1.24" region = "eu-west-1" + vpc_cidr = "10.0.0.0/16" + azs = slice(data.aws_availability_zones.available.names, 0, 3) + tags = { Example = local.name GithubRepo = "terraform-aws-eks" @@ -26,8 +32,6 @@ locals { } } -data "aws_caller_identity" "current" {} - ################################################################################ # EKS Module ################################################################################ @@ -35,69 +39,31 @@ data "aws_caller_identity" "current" {} module "eks" { source = "../.." - cluster_name = local.name - cluster_version = local.cluster_version - cluster_endpoint_private_access = true - cluster_endpoint_public_access = true + cluster_name = local.name + cluster_version = local.cluster_version + cluster_endpoint_public_access = true cluster_addons = { coredns = { - resolve_conflicts = "OVERWRITE" + most_recent = true + } + kube-proxy = { + most_recent = true } - kube-proxy = {} vpc-cni = { - resolve_conflicts = "OVERWRITE" + most_recent = true } } - cluster_encryption_config = [{ - provider_key_arn = aws_kms_key.eks.arn - resources = ["secrets"] - }] - - vpc_id = module.vpc.vpc_id - subnet_ids = module.vpc.private_subnets + vpc_id = module.vpc.vpc_id + subnet_ids = module.vpc.private_subnets + control_plane_subnet_ids = module.vpc.intra_subnets # Self managed node groups will not automatically create the aws-auth configmap so we need to create_aws_auth_configmap = true manage_aws_auth_configmap = true - # Extend cluster security group rules - cluster_security_group_additional_rules = { - egress_nodes_ephemeral_ports_tcp = { - description = "To node 1025-65535" - protocol = "tcp" - from_port = 1025 - to_port = 65535 - type = "egress" - source_node_security_group = true - } - } - - # Extend node-to-node security group rules - node_security_group_additional_rules = { - ingress_self_all = { - description = "Node to node all ports/protocols" - protocol = "-1" - from_port = 0 - to_port = 0 - type = "ingress" - self = true - } - egress_all = { - description = "Node all egress" - protocol = "-1" - from_port = 0 - to_port = 0 - type = "egress" - cidr_blocks = ["0.0.0.0/0"] - ipv6_cidr_blocks = ["::/0"] - } - } - self_managed_node_group_defaults = { - create_security_group = false - # enable discovery of autoscaling groups by cluster-autoscaler autoscaling_group_tags = { "k8s.io/cluster-autoscaler/enabled" : true, @@ -117,24 +83,31 @@ module "eks" { ami_id = data.aws_ami.eks_default_bottlerocket.id instance_type = "m5.large" desired_size = 2 - key_name = aws_key_pair.this.key_name - - iam_role_additional_policies = ["arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"] + key_name = module.key_pair.key_pair_name bootstrap_extra_args = <<-EOT - # The admin host container provides SSH access and runs with "superpowers". - # It is disabled by default, but can be disabled explicitly. - [settings.host-containers.admin] - enabled = false - - # The control host container provides out-of-band access via SSM. - # It is enabled by default, and can be disabled if you do not expect to use SSM. - # This could leave you with no way to access the API and change settings on an existing node! - [settings.host-containers.control] - enabled = true - - [settings.kubernetes.node-labels] - ingress = "allowed" + # The admin host container provides SSH access and runs with "superpowers". + # It is disabled by default, but can be disabled explicitly. + [settings.host-containers.admin] + enabled = false + + # The control host container provides out-of-band access via SSM. + # It is enabled by default, and can be disabled if you do not expect to use SSM. + # This could leave you with no way to access the API and change settings on an existing node! + [settings.host-containers.control] + enabled = true + + # extra args added + [settings.kernel] + lockdown = "integrity" + + [settings.kubernetes.node-labels] + label1 = "foo" + label2 = "bar" + + [settings.kubernetes.node-taints] + dedicated = "experimental:PreferNoSchedule" + special = "true:NoSchedule" EOT } @@ -177,15 +150,14 @@ module "eks" { instance_type = "c5n.9xlarge" post_bootstrap_user_data = <<-EOT - - # Install EFA - curl -O https://efa-installer.amazonaws.com/aws-efa-installer-latest.tar.gz - tar -xf aws-efa-installer-latest.tar.gz && cd aws-efa-installer - ./efa_installer.sh -y --minimal - fi_info -p efa -t FI_EP_RDM - - # Disable ptrace - sysctl -w kernel.yama.ptrace_scope=0 + # Install EFA + curl -O https://efa-installer.amazonaws.com/aws-efa-installer-latest.tar.gz + tar -xf aws-efa-installer-latest.tar.gz && cd aws-efa-installer + ./efa_installer.sh -y --minimal + fi_info -p efa -t FI_EP_RDM + + # Disable ptrace + sysctl -w kernel.yama.ptrace_scope=0 EOT network_interfaces = [ @@ -214,12 +186,12 @@ module "eks" { bootstrap_extra_args = "--kubelet-extra-args '--max-pods=110'" pre_bootstrap_user_data = <<-EOT - export CONTAINER_RUNTIME="containerd" - export USE_MAX_PODS=false + export CONTAINER_RUNTIME="containerd" + export USE_MAX_PODS=false EOT post_bootstrap_user_data = <<-EOT - echo "you are free little kubelet!" + echo "you are free little kubelet!" EOT instance_type = "m6i.large" @@ -228,9 +200,8 @@ module "eks" { launch_template_use_name_prefix = true launch_template_description = "Self managed node group example launch template" - ebs_optimized = true - vpc_security_group_ids = [aws_security_group.additional.id] - enable_monitoring = true + ebs_optimized = true + enable_monitoring = true block_device_mappings = { xvda = { @@ -241,7 +212,7 @@ module "eks" { iops = 3000 throughput = 150 encrypted = true - kms_key_id = aws_kms_key.ebs.arn + kms_key_id = module.ebs_kms_key.key_id delete_on_termination = true } } @@ -267,34 +238,9 @@ module "eks" { iam_role_tags = { Purpose = "Protector of the kubelet" } - iam_role_additional_policies = [ - "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" - ] - - create_security_group = true - security_group_name = "self-managed-node-group-complete-example" - security_group_use_name_prefix = false - security_group_description = "Self managed node group complete example security group" - security_group_rules = { - phoneOut = { - description = "Hello CloudFlare" - protocol = "udp" - from_port = 53 - to_port = 53 - type = "egress" - cidr_blocks = ["1.1.1.1/32"] - } - phoneHome = { - description = "Hello cluster" - protocol = "udp" - from_port = 53 - to_port = 53 - type = "egress" - source_cluster_security_group = true # bit of reflection lookup - } - } - security_group_tags = { - Purpose = "Protector of the kubelet" + iam_role_additional_policies = { + AmazonEC2ContainerRegistryReadOnly = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" + additional = aws_iam_policy.additional.arn } timeouts = { @@ -321,11 +267,12 @@ module "vpc" { version = "~> 3.0" name = local.name - cidr = "10.0.0.0/16" + cidr = local.vpc_cidr - azs = ["${local.region}a", "${local.region}b", "${local.region}c"] - private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"] - public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"] + azs = local.azs + private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 4, k)] + public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 48)] + intra_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 52)] enable_nat_gateway = true single_nat_gateway = true @@ -336,44 +283,16 @@ module "vpc" { create_flow_log_cloudwatch_log_group = true public_subnet_tags = { - "kubernetes.io/cluster/${local.name}" = "shared" - "kubernetes.io/role/elb" = 1 + "kubernetes.io/role/elb" = 1 } private_subnet_tags = { - "kubernetes.io/cluster/${local.name}" = "shared" - "kubernetes.io/role/internal-elb" = 1 - } - - tags = local.tags -} - -resource "aws_security_group" "additional" { - name_prefix = "${local.name}-additional" - vpc_id = module.vpc.vpc_id - - ingress { - from_port = 22 - to_port = 22 - protocol = "tcp" - cidr_blocks = [ - "10.0.0.0/8", - "172.16.0.0/12", - "192.168.0.0/16", - ] + "kubernetes.io/role/internal-elb" = 1 } tags = local.tags } -resource "aws_kms_key" "eks" { - description = "EKS Secret Encryption Key" - deletion_window_in_days = 7 - enable_key_rotation = true - - tags = local.tags -} - data "aws_ami" "eks_default" { most_recent = true owners = ["amazon"] @@ -394,19 +313,37 @@ data "aws_ami" "eks_default_bottlerocket" { } } -resource "tls_private_key" "this" { - algorithm = "RSA" -} +module "key_pair" { + source = "terraform-aws-modules/key-pair/aws" + version = "~> 2.0" -resource "aws_key_pair" "this" { - key_name = local.name - public_key = tls_private_key.this.public_key_openssh + key_name_prefix = local.name + create_private_key = true + + tags = local.tags } -resource "aws_kms_key" "ebs" { - description = "Customer managed key to encrypt self managed node group volumes" - deletion_window_in_days = 7 - policy = data.aws_iam_policy_document.ebs.json +module "ebs_kms_key" { + source = "terraform-aws-modules/kms/aws" + version = "~> 1.1" + + description = "Customer managed key to encrypt EKS managed node group volumes" + + # Policy + key_administrators = [ + data.aws_caller_identity.current.arn + ] + key_service_users = [ + # required for the ASG to manage encrypted volumes for nodes + "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", + # required for the cluster / persistentvolume-controller to create encrypted PVCs + module.eks.cluster_iam_role_arn, + ] + + # Aliases + aliases = ["eks/${local.name}/ebs"] + + tags = local.tags } resource "aws_ec2_capacity_reservation" "targeted" { @@ -417,58 +354,22 @@ resource "aws_ec2_capacity_reservation" "targeted" { instance_match_criteria = "targeted" } -# This policy is required for the KMS key used for EKS root volumes, so the cluster is allowed to enc/dec/attach encrypted EBS volumes -data "aws_iam_policy_document" "ebs" { - # Copy of default KMS policy that lets you manage it - statement { - sid = "Enable IAM User Permissions" - actions = ["kms:*"] - resources = ["*"] - - principals { - type = "AWS" - identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"] - } - } +resource "aws_iam_policy" "additional" { + name = "${local.name}-additional" + description = "Example usage of node additional policy" - # Required for EKS - statement { - sid = "Allow service-linked role use of the CMK" - actions = [ - "kms:Encrypt", - "kms:Decrypt", - "kms:ReEncrypt*", - "kms:GenerateDataKey*", - "kms:DescribeKey" + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Action = [ + "ec2:Describe*", + ] + Effect = "Allow" + Resource = "*" + }, ] - resources = ["*"] - - principals { - type = "AWS" - identifiers = [ - "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", # required for the ASG to manage encrypted volumes for nodes - module.eks.cluster_iam_role_arn, # required for the cluster / persistentvolume-controller to create encrypted PVCs - ] - } - } + }) - statement { - sid = "Allow attachment of persistent resources" - actions = ["kms:CreateGrant"] - resources = ["*"] - - principals { - type = "AWS" - identifiers = [ - "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", # required for the ASG to manage encrypted volumes for nodes - module.eks.cluster_iam_role_arn, # required for the cluster / persistentvolume-controller to create encrypted PVCs - ] - } - - condition { - test = "Bool" - variable = "kms:GrantIsForAWSResource" - values = ["true"] - } - } + tags = local.tags } diff --git a/examples/self_managed_node_group/outputs.tf b/examples/self_managed_node_group/outputs.tf index 67de532779..43334ecc0a 100644 --- a/examples/self_managed_node_group/outputs.tf +++ b/examples/self_managed_node_group/outputs.tf @@ -17,16 +17,16 @@ output "cluster_endpoint" { value = module.eks.cluster_endpoint } -output "cluster_name" { - description = "The name of the EKS cluster. Will block on cluster creation until the cluster is really ready" - value = module.eks.cluster_name -} - output "cluster_id" { - description = "The id of the EKS cluster. Will block on cluster creation until the cluster is really ready" + description = "The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts" value = module.eks.cluster_id } +output "cluster_name" { + description = "The name of the EKS cluster" + value = module.eks.cluster_name +} + output "cluster_oidc_issuer_url" { description = "The URL on the EKS cluster for the OpenID Connect identity provider" value = module.eks.cluster_oidc_issuer_url diff --git a/examples/self_managed_node_group/versions.tf b/examples/self_managed_node_group/versions.tf index fde7af0f23..f2f8625d4b 100644 --- a/examples/self_managed_node_group/versions.tf +++ b/examples/self_managed_node_group/versions.tf @@ -1,14 +1,10 @@ terraform { - required_version = ">= 0.13.1" + required_version = ">= 1.0" required_providers { aws = { source = "hashicorp/aws" - version = ">= 3.72" - } - tls = { - source = "hashicorp/tls" - version = ">= 3.0" + version = ">= 4.45" } kubernetes = { source = "hashicorp/kubernetes" diff --git a/examples/user_data/README.md b/examples/user_data/README.md index 54cd9ec72b..cea7dce755 100644 --- a/examples/user_data/README.md +++ b/examples/user_data/README.md @@ -17,8 +17,7 @@ $ terraform apply | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 0.13.1 | -| [aws](#requirement\_aws) | >= 3.72 | +| [terraform](#requirement\_terraform) | >= 1.0 | ## Providers diff --git a/examples/user_data/main.tf b/examples/user_data/main.tf index 5c5b1266cf..cb565ba04c 100644 --- a/examples/user_data/main.tf +++ b/examples/user_data/main.tf @@ -19,7 +19,7 @@ module "eks_mng_linux_additional" { source = "../../modules/_user_data" pre_bootstrap_user_data = <<-EOT - export CONTAINER_RUNTIME="containerd" + export CONTAINER_RUNTIME="containerd" EOT } @@ -34,14 +34,14 @@ module "eks_mng_linux_custom_ami" { enable_bootstrap_user_data = true pre_bootstrap_user_data = <<-EOT - export CONTAINER_RUNTIME="containerd" - export USE_MAX_PODS=false + export CONTAINER_RUNTIME="containerd" + export USE_MAX_PODS=false EOT bootstrap_extra_args = "--container-runtime containerd --kubelet-extra-args '--max-pods=20 --instance-type t3a.large'" post_bootstrap_user_data = <<-EOT - echo "All done" + echo "All done" EOT } @@ -56,14 +56,14 @@ module "eks_mng_linux_custom_template" { user_data_template_path = "${path.module}/templates/linux_custom.tpl" pre_bootstrap_user_data = <<-EOT - echo "foo" - export FOO=bar + echo "foo" + export FOO=bar EOT bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'" post_bootstrap_user_data = <<-EOT - echo "All done" + echo "All done" EOT } @@ -80,9 +80,9 @@ module "eks_mng_bottlerocket_additional" { platform = "bottlerocket" bootstrap_extra_args = <<-EOT - # extra args added - [settings.kernel] - lockdown = "integrity" + # extra args added + [settings.kernel] + lockdown = "integrity" EOT } @@ -98,9 +98,9 @@ module "eks_mng_bottlerocket_custom_ami" { enable_bootstrap_user_data = true bootstrap_extra_args = <<-EOT - # extra args added - [settings.kernel] - lockdown = "integrity" + # extra args added + [settings.kernel] + lockdown = "integrity" EOT } @@ -116,9 +116,9 @@ module "eks_mng_bottlerocket_custom_template" { user_data_template_path = "${path.module}/templates/bottlerocket_custom.tpl" bootstrap_extra_args = <<-EOT - # extra args added - [settings.kernel] - lockdown = "integrity" + # extra args added + [settings.kernel] + lockdown = "integrity" EOT } @@ -140,14 +140,14 @@ module "self_mng_linux_bootstrap" { cluster_auth_base64 = local.cluster_auth_base64 pre_bootstrap_user_data = <<-EOT - echo "foo" - export FOO=bar + echo "foo" + export FOO=bar EOT bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'" post_bootstrap_user_data = <<-EOT - echo "All done" + echo "All done" EOT } @@ -164,14 +164,14 @@ module "self_mng_linux_custom_template" { user_data_template_path = "${path.module}/templates/linux_custom.tpl" pre_bootstrap_user_data = <<-EOT - echo "foo" - export FOO=bar + echo "foo" + export FOO=bar EOT bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'" post_bootstrap_user_data = <<-EOT - echo "All done" + echo "All done" EOT } @@ -197,9 +197,9 @@ module "self_mng_bottlerocket_bootstrap" { cluster_auth_base64 = local.cluster_auth_base64 bootstrap_extra_args = <<-EOT - # extra args added - [settings.kernel] - lockdown = "integrity" + # extra args added + [settings.kernel] + lockdown = "integrity" EOT } @@ -218,9 +218,9 @@ module "self_mng_bottlerocket_custom_template" { user_data_template_path = "${path.module}/templates/bottlerocket_custom.tpl" bootstrap_extra_args = <<-EOT - # extra args added - [settings.kernel] - lockdown = "integrity" + # extra args added + [settings.kernel] + lockdown = "integrity" EOT } @@ -246,13 +246,13 @@ module "self_mng_windows_bootstrap" { cluster_auth_base64 = local.cluster_auth_base64 pre_bootstrap_user_data = <<-EOT - [string]$Something = 'IDoNotKnowAnyPowerShell ¯\_(ツ)_/¯' + [string]$Something = 'IDoNotKnowAnyPowerShell ¯\_(ツ)_/¯' EOT # I don't know if this is the right way on WindowsOS, but its just a string check here anyways bootstrap_extra_args = "-KubeletExtraArgs --node-labels=node.kubernetes.io/lifecycle=spot" post_bootstrap_user_data = <<-EOT - [string]$Something = 'IStillDoNotKnowAnyPowerShell ¯\_(ツ)_/¯' + [string]$Something = 'IStillDoNotKnowAnyPowerShell ¯\_(ツ)_/¯' EOT } @@ -271,12 +271,12 @@ module "self_mng_windows_custom_template" { user_data_template_path = "${path.module}/templates/windows_custom.tpl" pre_bootstrap_user_data = <<-EOT - [string]$Something = 'IDoNotKnowAnyPowerShell ¯\_(ツ)_/¯' + [string]$Something = 'IDoNotKnowAnyPowerShell ¯\_(ツ)_/¯' EOT # I don't know if this is the right way on WindowsOS, but its just a string check here anyways bootstrap_extra_args = "-KubeletExtraArgs --node-labels=node.kubernetes.io/lifecycle=spot" post_bootstrap_user_data = <<-EOT - [string]$Something = 'IStillDoNotKnowAnyPowerShell ¯\_(ツ)_/¯' + [string]$Something = 'IStillDoNotKnowAnyPowerShell ¯\_(ツ)_/¯' EOT } diff --git a/examples/user_data/versions.tf b/examples/user_data/versions.tf index 22e8d7265f..7117131f4c 100644 --- a/examples/user_data/versions.tf +++ b/examples/user_data/versions.tf @@ -1,10 +1,3 @@ terraform { - required_version = ">= 0.13.1" - - required_providers { - aws = { - source = "hashicorp/aws" - version = ">= 3.72" - } - } + required_version = ">= 1.0" } diff --git a/main.tf b/main.tf index 3ed4edaacf..9193031348 100644 --- a/main.tf +++ b/main.tf @@ -1,11 +1,13 @@ data "aws_partition" "current" {} data "aws_caller_identity" "current" {} -data "aws_default_tags" "current" {} locals { create = var.create && var.putin_khuylo cluster_role = try(aws_iam_role.this[0].arn, var.iam_role_arn) + + create_outposts_local_cluster = length(var.outpost_config) > 0 + enable_cluster_encryption_config = length(var.cluster_encryption_config) > 0 && !local.create_outposts_local_cluster } ################################################################################ @@ -25,16 +27,32 @@ resource "aws_eks_cluster" "this" { subnet_ids = coalescelist(var.control_plane_subnet_ids, var.subnet_ids) endpoint_private_access = var.cluster_endpoint_private_access endpoint_public_access = var.cluster_endpoint_public_access - public_access_cidrs = var.cluster_endpoint_public_access_cidrs + public_access_cidrs = var.cluster_endpoint_public_access ? var.cluster_endpoint_public_access_cidrs : [] } - kubernetes_network_config { - ip_family = var.cluster_ip_family - service_ipv4_cidr = var.cluster_service_ipv4_cidr + dynamic "kubernetes_network_config" { + # Not valid on Outposts + for_each = local.create_outposts_local_cluster ? [] : [1] + + content { + ip_family = var.cluster_ip_family + service_ipv4_cidr = var.cluster_service_ipv4_cidr + service_ipv6_cidr = var.cluster_service_ipv6_cidr + } + } + + dynamic "outpost_config" { + for_each = local.create_outposts_local_cluster ? [var.outpost_config] : [] + + content { + control_plane_instance_type = outpost_config.value.control_plane_instance_type + outpost_arns = outpost_config.value.outpost_arns + } } dynamic "encryption_config" { - for_each = toset(var.cluster_encryption_config) + # Not available on Outposts + for_each = local.enable_cluster_encryption_config ? [var.cluster_encryption_config] : [] content { provider { @@ -67,9 +85,8 @@ resource "aws_ec2_tag" "cluster_primary_security_group" { # This should not affect the name of the cluster primary security group # Ref: https://github.com/terraform-aws-modules/terraform-aws-eks/pull/2006 # Ref: https://github.com/terraform-aws-modules/terraform-aws-eks/pull/2008 - # `aws_default_tags` is merged in to "dedupe" tags and stabilize tag updates - for_each = { for k, v in merge(var.tags, var.cluster_tags, data.aws_default_tags.current.tags) : - k => v if local.create && k != "Name" && var.create_cluster_primary_security_group_tags + for_each = { for k, v in merge(var.tags, var.cluster_tags) : + k => v if local.create && k != "Name" && var.create_cluster_primary_security_group_tags && v != null } resource_id = aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id @@ -93,9 +110,9 @@ resource "aws_cloudwatch_log_group" "this" { module "kms" { source = "terraform-aws-modules/kms/aws" - version = "1.0.2" # Note - be mindful of Terraform/provider version compatibility between modules + version = "1.1.0" # Note - be mindful of Terraform/provider version compatibility between modules - create = local.create && var.create_kms_key + create = local.create && var.create_kms_key && !local.create_outposts_local_cluster # not valid on Outposts description = coalesce(var.kms_key_description, "${var.cluster_name} cluster encryption key") key_usage = "ENCRYPT_DECRYPT" @@ -129,7 +146,7 @@ locals { cluster_security_group_id = local.create_cluster_sg ? aws_security_group.cluster[0].id : var.cluster_security_group_id # Do not add rules to node security group if the module is not creating it - cluster_security_group_rules = local.create_node_sg ? { + cluster_security_group_rules = { for k, v in { ingress_nodes_443 = { description = "Node groups to cluster API" protocol = "tcp" @@ -138,23 +155,7 @@ locals { type = "ingress" source_node_security_group = true } - egress_nodes_443 = { - description = "Cluster API to node groups" - protocol = "tcp" - from_port = 443 - to_port = 443 - type = "egress" - source_node_security_group = true - } - egress_nodes_kubelet = { - description = "Cluster API to node kubelets" - protocol = "tcp" - from_port = 10250 - to_port = 10250 - type = "egress" - source_node_security_group = true - } - } : {} + } : k => v if local.create_node_sg } } resource "aws_security_group" "cluster" { @@ -177,7 +178,10 @@ resource "aws_security_group" "cluster" { } resource "aws_security_group_rule" "cluster" { - for_each = { for k, v in merge(local.cluster_security_group_rules, var.cluster_security_group_additional_rules) : k => v if local.create_cluster_sg } + for_each = { for k, v in merge( + local.cluster_security_group_rules, + var.cluster_security_group_additional_rules + ) : k => v if local.create_cluster_sg } # Required security_group_id = aws_security_group.cluster[0].id @@ -187,15 +191,13 @@ resource "aws_security_group_rule" "cluster" { type = each.value.type # Optional - description = try(each.value.description, null) - cidr_blocks = try(each.value.cidr_blocks, null) - ipv6_cidr_blocks = try(each.value.ipv6_cidr_blocks, null) - prefix_list_ids = try(each.value.prefix_list_ids, []) - self = try(each.value.self, null) - source_security_group_id = try( - each.value.source_security_group_id, - try(each.value.source_node_security_group, false) ? local.node_security_group_id : null - ) + description = lookup(each.value, "description", null) + cidr_blocks = lookup(each.value, "cidr_blocks", null) + ipv6_cidr_blocks = lookup(each.value, "ipv6_cidr_blocks", null) + prefix_list_ids = lookup(each.value, "prefix_list_ids", []) + self = lookup(each.value, "self", null) + source_security_group_id = lookup(each.value, "source_security_group_id", + lookup(each.value, "source_node_security_group", false)) ? local.node_security_group_id : null } ################################################################################ @@ -204,13 +206,15 @@ resource "aws_security_group_rule" "cluster" { ################################################################################ data "tls_certificate" "this" { - count = local.create && var.enable_irsa ? 1 : 0 + # Not available on outposts + count = local.create && var.enable_irsa && !local.create_outposts_local_cluster ? 1 : 0 url = aws_eks_cluster.this[0].identity[0].oidc[0].issuer } resource "aws_iam_openid_connect_provider" "oidc_provider" { - count = local.create && var.enable_irsa ? 1 : 0 + # Not available on outposts + count = local.create && var.enable_irsa && !local.create_outposts_local_cluster ? 1 : 0 client_id_list = distinct(compact(concat(["sts.${local.dns_suffix}"], var.openid_connect_audiences))) thumbprint_list = concat(data.tls_certificate.this[0].certificates[*].sha1_fingerprint, var.custom_oidc_thumbprints) @@ -227,9 +231,9 @@ resource "aws_iam_openid_connect_provider" "oidc_provider" { ################################################################################ locals { - create_iam_role = local.create && var.create_iam_role - iam_role_name = coalesce(var.iam_role_name, "${var.cluster_name}-cluster") - policy_arn_prefix = "arn:${data.aws_partition.current.partition}:iam::aws:policy" + create_iam_role = local.create && var.create_iam_role + iam_role_name = coalesce(var.iam_role_name, "${var.cluster_name}-cluster") + iam_role_policy_prefix = "arn:${data.aws_partition.current.partition}:iam::aws:policy" cluster_encryption_policy_name = coalesce(var.cluster_encryption_policy_name, "${local.iam_role_name}-ClusterEncryption") @@ -249,6 +253,17 @@ data "aws_iam_policy_document" "assume_role_policy" { type = "Service" identifiers = ["eks.${local.dns_suffix}"] } + + dynamic "principals" { + for_each = local.create_outposts_local_cluster ? [1] : [] + + content { + type = "Service" + identifiers = [ + "ec2.${local.dns_suffix}", + ] + } + } } } @@ -292,10 +307,17 @@ resource "aws_iam_role" "this" { # Policies attached ref https://docs.aws.amazon.com/eks/latest/userguide/service_IAM_role.html resource "aws_iam_role_policy_attachment" "this" { - for_each = local.create_iam_role ? toset(compact(distinct(concat([ - "${local.policy_arn_prefix}/AmazonEKSClusterPolicy", - "${local.policy_arn_prefix}/AmazonEKSVPCResourceController", - ], var.iam_role_additional_policies)))) : toset([]) + for_each = { for k, v in { + AmazonEKSClusterPolicy = local.create_outposts_local_cluster ? "${local.iam_role_policy_prefix}/AmazonEKSLocalOutpostClusterPolicy" : "${local.iam_role_policy_prefix}/AmazonEKSClusterPolicy", + AmazonEKSVPCResourceController = "${local.iam_role_policy_prefix}/AmazonEKSVPCResourceController", + } : k => v if local.create_iam_role } + + policy_arn = each.value + role = aws_iam_role.this[0].name +} + +resource "aws_iam_role_policy_attachment" "additional" { + for_each = { for k, v in var.iam_role_additional_policies : k => v if local.create_iam_role } policy_arn = each.value role = aws_iam_role.this[0].name @@ -303,14 +325,16 @@ resource "aws_iam_role_policy_attachment" "this" { # Using separate attachment due to `The "for_each" value depends on resource attributes that cannot be determined until apply` resource "aws_iam_role_policy_attachment" "cluster_encryption" { - count = local.create_iam_role && var.attach_cluster_encryption_policy && length(var.cluster_encryption_config) > 0 ? 1 : 0 + # Encryption config not available on Outposts + count = local.create_iam_role && var.attach_cluster_encryption_policy && local.enable_cluster_encryption_config ? 1 : 0 policy_arn = aws_iam_policy.cluster_encryption[0].arn role = aws_iam_role.this[0].name } resource "aws_iam_policy" "cluster_encryption" { - count = local.create_iam_role && var.attach_cluster_encryption_policy && length(var.cluster_encryption_config) > 0 ? 1 : 0 + # Encryption config not available on Outposts + count = local.create_iam_role && var.attach_cluster_encryption_policy && local.enable_cluster_encryption_config ? 1 : 0 name = var.cluster_encryption_policy_use_name_prefix ? null : local.cluster_encryption_policy_name name_prefix = var.cluster_encryption_policy_use_name_prefix ? local.cluster_encryption_policy_name : null @@ -328,7 +352,7 @@ resource "aws_iam_policy" "cluster_encryption" { "kms:DescribeKey", ] Effect = "Allow" - Resource = var.create_kms_key ? [module.kms.key_arn] : [for config in var.cluster_encryption_config : config.provider_key_arn] + Resource = var.create_kms_key ? module.kms.key_arn : var.cluster_encryption_config.provider_key_arn }, ] }) @@ -341,14 +365,22 @@ resource "aws_iam_policy" "cluster_encryption" { ################################################################################ resource "aws_eks_addon" "this" { - for_each = { for k, v in var.cluster_addons : k => v if local.create } + # Not supported on outposts + for_each = { for k, v in var.cluster_addons : k => v if local.create && !local.create_outposts_local_cluster } cluster_name = aws_eks_cluster.this[0].name addon_name = try(each.value.name, each.key) - addon_version = lookup(each.value, "addon_version", null) - resolve_conflicts = lookup(each.value, "resolve_conflicts", null) - service_account_role_arn = lookup(each.value, "service_account_role_arn", null) + addon_version = try(each.value.addon_version, data.aws_eks_addon_version.this[each.key].version) + preserve = try(each.value.preserve, null) + resolve_conflicts = try(each.value.resolve_conflicts, "OVERWRITE") + service_account_role_arn = try(each.value.service_account_role_arn, null) + + timeouts { + create = try(each.value.timeouts.create, var.cluster_addons_timeouts.create, null) + update = try(each.value.timeouts.update, var.cluster_addons_timeouts.update, null) + delete = try(each.value.timeouts.delete, var.cluster_addons_timeouts.delete, null) + } depends_on = [ module.fargate_profile, @@ -359,13 +391,21 @@ resource "aws_eks_addon" "this" { tags = var.tags } +data "aws_eks_addon_version" "this" { + for_each = { for k, v in var.cluster_addons : k => v if local.create && !local.create_outposts_local_cluster } + + addon_name = try(each.value.name, each.key) + kubernetes_version = coalesce(var.cluster_version, aws_eks_cluster.this[0].version) + most_recent = try(each.value.most_recent, null) +} + ################################################################################ # EKS Identity Provider # Note - this is different from IRSA ################################################################################ resource "aws_eks_identity_provider_config" "this" { - for_each = { for k, v in var.cluster_identity_providers : k => v if local.create } + for_each = { for k, v in var.cluster_identity_providers : k => v if local.create && !local.create_outposts_local_cluster } cluster_name = aws_eks_cluster.this[0].name diff --git a/modules/_user_data/README.md b/modules/_user_data/README.md index 87da77b447..0853fd9e1a 100644 --- a/modules/_user_data/README.md +++ b/modules/_user_data/README.md @@ -9,7 +9,7 @@ See [`examples/user_data/`](https://github.com/terraform-aws-modules/terraform-a | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 0.13.1 | +| [terraform](#requirement\_terraform) | >= 1.0 | | [cloudinit](#requirement\_cloudinit) | >= 2.0 | ## Providers diff --git a/modules/_user_data/outputs.tf b/modules/_user_data/outputs.tf index c2a569b05b..075801b233 100644 --- a/modules/_user_data/outputs.tf +++ b/modules/_user_data/outputs.tf @@ -1,4 +1,4 @@ output "user_data" { description = "Base64 encoded user data rendered for the provided inputs" - value = try(local.platform[var.platform].user_data, "") + value = try(local.platform[var.platform].user_data, null) } diff --git a/modules/_user_data/versions.tf b/modules/_user_data/versions.tf index e293dc67ce..2dbd12cdc0 100644 --- a/modules/_user_data/versions.tf +++ b/modules/_user_data/versions.tf @@ -1,5 +1,5 @@ terraform { - required_version = ">= 0.13.1" + required_version = ">= 1.0" required_providers { cloudinit = { diff --git a/modules/eks-managed-node-group/README.md b/modules/eks-managed-node-group/README.md index bf8d6de605..d4d4cc9b40 100644 --- a/modules/eks-managed-node-group/README.md +++ b/modules/eks-managed-node-group/README.md @@ -18,7 +18,18 @@ module "eks_managed_node_group" { // The following variables are necessary if you decide to use the module outside of the parent EKS module context. // Without it, the security groups of the nodes are empty and thus won't join the cluster. cluster_primary_security_group_id = module.eks.cluster_primary_security_group_id - cluster_security_group_id = module.eks.node_security_group_id + cluster_security_group_id = module.eks.node_security_group_id + + // Note: `disk_size`, and `remote_access` can only be set when using the EKS managed node group default launch template + // This module defaults to providing a custom launch template to allow for custom security groups, tag propagation, etc. + // use_custom_launch_template = false + // disk_size = 50 + // + // # Remote access cannot be specified with a launch template + // remote_access = { + // ec2_ssh_key = module.key_pair.key_pair_name + // source_security_group_ids = [aws_security_group.remote_access.id] + // } min_size = 1 max_size = 10 @@ -53,14 +64,14 @@ module "eks_managed_node_group" { | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 0.13.1 | -| [aws](#requirement\_aws) | >= 3.72 | +| [terraform](#requirement\_terraform) | >= 1.0 | +| [aws](#requirement\_aws) | >= 4.45 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 3.72 | +| [aws](#provider\_aws) | >= 4.45 | ## Modules @@ -74,10 +85,9 @@ module "eks_managed_node_group" { |------|------| | [aws_eks_node_group.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_node_group) | resource | | [aws_iam_role.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | +| [aws_iam_role_policy_attachment.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | | [aws_iam_role_policy_attachment.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | | [aws_launch_template.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource | -| [aws_security_group.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | -| [aws_security_group_rule.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource | | [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | | [aws_iam_policy_document.assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | | [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source | @@ -98,26 +108,24 @@ module "eks_managed_node_group" { | [cluster\_ip\_family](#input\_cluster\_ip\_family) | The IP family used to assign Kubernetes pod and service addresses. Valid values are `ipv4` (default) and `ipv6` | `string` | `null` | no | | [cluster\_name](#input\_cluster\_name) | Name of associated EKS cluster | `string` | `null` | no | | [cluster\_primary\_security\_group\_id](#input\_cluster\_primary\_security\_group\_id) | The ID of the EKS cluster primary security group to associate with the instance(s). This is the security group that is automatically created by the EKS service | `string` | `null` | no | -| [cluster\_security\_group\_id](#input\_cluster\_security\_group\_id) | Cluster control plane security group ID | `string` | `null` | no | | [cluster\_service\_ipv4\_cidr](#input\_cluster\_service\_ipv4\_cidr) | The CIDR block to assign Kubernetes service IP addresses from. If you don't specify a block, Kubernetes assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks | `string` | `null` | no | | [cluster\_version](#input\_cluster\_version) | Kubernetes version. Defaults to EKS Cluster Kubernetes version | `string` | `null` | no | | [cpu\_options](#input\_cpu\_options) | The CPU options for the instance | `map(string)` | `{}` | no | | [create](#input\_create) | Determines whether to create EKS managed node group or not | `bool` | `true` | no | | [create\_iam\_role](#input\_create\_iam\_role) | Determines whether an IAM role is created or to use an existing IAM role | `bool` | `true` | no | | [create\_launch\_template](#input\_create\_launch\_template) | Determines whether to create a launch template or not. If set to `false`, EKS will use its own default launch template | `bool` | `true` | no | -| [create\_security\_group](#input\_create\_security\_group) | Determines whether to create a security group | `bool` | `true` | no | | [credit\_specification](#input\_credit\_specification) | Customize the credit specification of the instance | `map(string)` | `{}` | no | | [desired\_size](#input\_desired\_size) | Desired number of instances/nodes | `number` | `1` | no | | [disable\_api\_termination](#input\_disable\_api\_termination) | If true, enables EC2 instance termination protection | `bool` | `null` | no | -| [disk\_size](#input\_disk\_size) | Disk size in GiB for nodes. Defaults to `20` | `number` | `null` | no | +| [disk\_size](#input\_disk\_size) | Disk size in GiB for nodes. Defaults to `20`. Only valid when `use_custom_launch_template` = `false` | `number` | `null` | no | | [ebs\_optimized](#input\_ebs\_optimized) | If true, the launched EC2 instance(s) will be EBS-optimized | `bool` | `null` | no | -| [elastic\_gpu\_specifications](#input\_elastic\_gpu\_specifications) | The elastic GPU to attach to the instance | `map(string)` | `{}` | no | +| [elastic\_gpu\_specifications](#input\_elastic\_gpu\_specifications) | The elastic GPU to attach to the instance | `any` | `{}` | no | | [elastic\_inference\_accelerator](#input\_elastic\_inference\_accelerator) | Configuration block containing an Elastic Inference Accelerator to attach to the instance | `map(string)` | `{}` | no | | [enable\_bootstrap\_user\_data](#input\_enable\_bootstrap\_user\_data) | Determines whether the bootstrap configurations are populated within the user data template. Only valid when using a custom AMI via `ami_id` | `bool` | `false` | no | | [enable\_monitoring](#input\_enable\_monitoring) | Enables/disables detailed monitoring | `bool` | `true` | no | | [enclave\_options](#input\_enclave\_options) | Enable Nitro Enclaves on launched instances | `map(string)` | `{}` | no | | [force\_update\_version](#input\_force\_update\_version) | Force version update if existing pods are unable to be drained due to a pod disruption budget issue | `bool` | `null` | no | -| [iam\_role\_additional\_policies](#input\_iam\_role\_additional\_policies) | Additional policies to be added to the IAM role | `list(string)` | `[]` | no | +| [iam\_role\_additional\_policies](#input\_iam\_role\_additional\_policies) | Additional policies to be added to the IAM role | `map(string)` | `{}` | no | | [iam\_role\_arn](#input\_iam\_role\_arn) | Existing IAM role ARN for the node group. Required if `create_iam_role` is set to `false` | `string` | `null` | no | | [iam\_role\_attach\_cni\_policy](#input\_iam\_role\_attach\_cni\_policy) | Whether to attach the `AmazonEKS_CNI_Policy`/`AmazonEKS_CNI_IPv6_Policy` IAM policy to the IAM IAM role. WARNING: If set `false` the permissions must be assigned to the `aws-node` DaemonSet pods via another method or nodes will not be able to join the cluster | `bool` | `true` | no | | [iam\_role\_description](#input\_iam\_role\_description) | Description of the role | `string` | `null` | no | @@ -133,11 +141,13 @@ module "eks_managed_node_group" { | [labels](#input\_labels) | Key-value map of Kubernetes labels. Only labels that are applied with the EKS API are managed by this argument. Other Kubernetes labels applied to the EKS Node Group will not be managed | `map(string)` | `null` | no | | [launch\_template\_default\_version](#input\_launch\_template\_default\_version) | Default version of the launch template | `string` | `null` | no | | [launch\_template\_description](#input\_launch\_template\_description) | Description of the launch template | `string` | `null` | no | -| [launch\_template\_name](#input\_launch\_template\_name) | Launch template name - either to be created (`var.create_launch_template` = `true`) or existing (`var.create_launch_template` = `false`) | `string` | `""` | no | +| [launch\_template\_id](#input\_launch\_template\_id) | The ID of an existing launch template to use. Required when `create_launch_template` = `false` and `use_custom_launch_template` = `true` | `string` | `""` | no | +| [launch\_template\_name](#input\_launch\_template\_name) | Name of launch template to be created | `string` | `null` | no | | [launch\_template\_tags](#input\_launch\_template\_tags) | A map of additional tags to add to the tag\_specifications of launch template created | `map(string)` | `{}` | no | | [launch\_template\_use\_name\_prefix](#input\_launch\_template\_use\_name\_prefix) | Determines whether to use `launch_template_name` as is or create a unique name beginning with the `launch_template_name` as the prefix | `bool` | `true` | no | | [launch\_template\_version](#input\_launch\_template\_version) | Launch template version number. The default is `$Default` | `string` | `null` | no | -| [license\_specifications](#input\_license\_specifications) | A list of license specifications to associate with | `map(string)` | `{}` | no | +| [license\_specifications](#input\_license\_specifications) | A map of license specifications to associate with | `any` | `{}` | no | +| [maintenance\_options](#input\_maintenance\_options) | The maintenance options for the instance | `any` | `{}` | no | | [max\_size](#input\_max\_size) | Maximum number of instances/nodes | `number` | `3` | no | | [metadata\_options](#input\_metadata\_options) | Customize the metadata options for the instance | `map(string)` |
{
"http_endpoint": "enabled",
"http_put_response_hop_limit": 2,
"http_tokens": "required"
}
| no | | [min\_size](#input\_min\_size) | Minimum number of instances/nodes | `number` | `0` | no | @@ -147,22 +157,18 @@ module "eks_managed_node_group" { | [platform](#input\_platform) | Identifies if the OS platform is `bottlerocket` or `linux` based; `windows` is not supported | `string` | `"linux"` | no | | [post\_bootstrap\_user\_data](#input\_post\_bootstrap\_user\_data) | User data that is appended to the user data script after of the EKS bootstrap script. Not used when `platform` = `bottlerocket` | `string` | `""` | no | | [pre\_bootstrap\_user\_data](#input\_pre\_bootstrap\_user\_data) | User data that is injected into the user data script ahead of the EKS bootstrap script. Not used when `platform` = `bottlerocket` | `string` | `""` | no | +| [private\_dns\_name\_options](#input\_private\_dns\_name\_options) | The options for the instance hostname. The default values are inherited from the subnet | `map(string)` | `{}` | no | | [ram\_disk\_id](#input\_ram\_disk\_id) | The ID of the ram disk | `string` | `null` | no | -| [remote\_access](#input\_remote\_access) | Configuration block with remote access settings | `any` | `{}` | no | -| [security\_group\_description](#input\_security\_group\_description) | Description for the security group created | `string` | `"EKS managed node group security group"` | no | -| [security\_group\_name](#input\_security\_group\_name) | Name to use on security group created | `string` | `null` | no | -| [security\_group\_rules](#input\_security\_group\_rules) | List of security group rules to add to the security group created | `any` | `{}` | no | -| [security\_group\_tags](#input\_security\_group\_tags) | A map of additional tags to add to the security group created | `map(string)` | `{}` | no | -| [security\_group\_use\_name\_prefix](#input\_security\_group\_use\_name\_prefix) | Determines whether the security group name (`security_group_name`) is used as a prefix | `bool` | `true` | no | +| [remote\_access](#input\_remote\_access) | Configuration block with remote access settings. Only valid when `use_custom_launch_template` = `false` | `any` | `{}` | no | | [subnet\_ids](#input\_subnet\_ids) | Identifiers of EC2 Subnets to associate with the EKS Node Group. These subnets must have the following resource tag: `kubernetes.io/cluster/CLUSTER_NAME` | `list(string)` | `null` | no | | [tags](#input\_tags) | A map of tags to add to all resources | `map(string)` | `{}` | no | | [taints](#input\_taints) | The Kubernetes taints to be applied to the nodes in the node group. Maximum of 50 taints per node group | `any` | `{}` | no | | [timeouts](#input\_timeouts) | Create, update, and delete timeout configurations for the node group | `map(string)` | `{}` | no | -| [update\_config](#input\_update\_config) | Configuration block of settings for max unavailable resources during node group updates | `map(string)` | `{}` | no | +| [update\_config](#input\_update\_config) | Configuration block of settings for max unavailable resources during node group updates | `map(string)` |
{
"max_unavailable_percentage": 33
}
| no | | [update\_launch\_template\_default\_version](#input\_update\_launch\_template\_default\_version) | Whether to update the launch templates default version on each update. Conflicts with `launch_template_default_version` | `bool` | `true` | no | +| [use\_custom\_launch\_template](#input\_use\_custom\_launch\_template) | Determines whether to use a custom launch template or not. If set to `false`, EKS will use its own default launch template | `bool` | `true` | no | | [use\_name\_prefix](#input\_use\_name\_prefix) | Determines whether to use `name` as is or create a unique name beginning with the `name` as the prefix | `bool` | `true` | no | | [user\_data\_template\_path](#input\_user\_data\_template\_path) | Path to a local, custom user data template file to use when rendering user data | `string` | `""` | no | -| [vpc\_id](#input\_vpc\_id) | ID of the VPC where the security group/nodes will be provisioned | `string` | `null` | no | | [vpc\_security\_group\_ids](#input\_vpc\_security\_group\_ids) | A list of security group IDs to associate | `list(string)` | `[]` | no | ## Outputs @@ -183,6 +189,4 @@ module "eks_managed_node_group" { | [node\_group\_resources](#output\_node\_group\_resources) | List of objects containing information about underlying resources | | [node\_group\_status](#output\_node\_group\_status) | Status of the EKS Node Group | | [node\_group\_taints](#output\_node\_group\_taints) | List of objects containing information about taints applied to the node group | -| [security\_group\_arn](#output\_security\_group\_arn) | Amazon Resource Name (ARN) of the security group | -| [security\_group\_id](#output\_security\_group\_id) | ID of the security group | diff --git a/modules/eks-managed-node-group/main.tf b/modules/eks-managed-node-group/main.tf index f55630876e..06275f00a2 100644 --- a/modules/eks-managed-node-group/main.tf +++ b/modules/eks-managed-node-group/main.tf @@ -1,5 +1,4 @@ data "aws_partition" "current" {} - data "aws_caller_identity" "current" {} ################################################################################ @@ -30,75 +29,51 @@ module "user_data" { ################################################################################ locals { - # There are 4 scenarios here that have to be considered for `use_custom_launch_template`: - # 1. `var.create_launch_template = false && var.launch_template_name == ""` => EKS MNG will use its own default LT - # 2. `var.create_launch_template = false && var.launch_template_name == "something"` => User provided custom LT will be used - # 3. `var.create_launch_template = true && var.launch_template_name == ""` => Custom LT will be used, module will provide a default name - # 4. `var.create_launch_template = true && var.launch_template_name == "something"` => Custom LT will be used, LT name is provided by user - use_custom_launch_template = var.create_launch_template || var.launch_template_name != "" - - launch_template_name_int = coalesce(var.launch_template_name, "${var.name}-eks-node-group") - - security_group_ids = compact(concat([try(aws_security_group.this[0].id, ""), var.cluster_primary_security_group_id], var.vpc_security_group_ids)) + launch_template_name = coalesce(var.launch_template_name, "${var.name}-eks-node-group") + security_group_ids = compact(concat([var.cluster_primary_security_group_id], var.vpc_security_group_ids)) } resource "aws_launch_template" "this" { - count = var.create && var.create_launch_template ? 1 : 0 - - name = var.launch_template_use_name_prefix ? null : local.launch_template_name_int - name_prefix = var.launch_template_use_name_prefix ? "${local.launch_template_name_int}-" : null - description = var.launch_template_description - - ebs_optimized = var.ebs_optimized - image_id = var.ami_id - # # Set on node group instead - # instance_type = var.launch_template_instance_type - key_name = var.key_name - user_data = module.user_data.user_data - - vpc_security_group_ids = length(var.network_interfaces) > 0 ? [] : local.security_group_ids - - default_version = var.launch_template_default_version - update_default_version = var.update_launch_template_default_version - disable_api_termination = var.disable_api_termination - # Set on EKS managed node group, will fail if set here - # https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-basics - # instance_initiated_shutdown_behavior = var.instance_initiated_shutdown_behavior - kernel_id = var.kernel_id - ram_disk_id = var.ram_disk_id + count = var.create && var.create_launch_template && var.use_custom_launch_template ? 1 : 0 dynamic "block_device_mappings" { for_each = var.block_device_mappings + content { - device_name = block_device_mappings.value.device_name - no_device = lookup(block_device_mappings.value, "no_device", null) - virtual_name = lookup(block_device_mappings.value, "virtual_name", null) + device_name = try(block_device_mappings.value.device_name, null) dynamic "ebs" { - for_each = flatten([lookup(block_device_mappings.value, "ebs", [])]) + for_each = try([block_device_mappings.value.ebs], []) + content { - delete_on_termination = lookup(ebs.value, "delete_on_termination", null) - encrypted = lookup(ebs.value, "encrypted", null) - kms_key_id = lookup(ebs.value, "kms_key_id", null) - iops = lookup(ebs.value, "iops", null) - throughput = lookup(ebs.value, "throughput", null) - snapshot_id = lookup(ebs.value, "snapshot_id", null) - volume_size = lookup(ebs.value, "volume_size", null) - volume_type = lookup(ebs.value, "volume_type", null) + delete_on_termination = try(ebs.value.delete_on_termination, null) + encrypted = try(ebs.value.encrypted, null) + iops = try(ebs.value.iops, null) + kms_key_id = try(ebs.value.kms_key_id, null) + snapshot_id = try(ebs.value.snapshot_id, null) + throughput = try(ebs.value.throughput, null) + volume_size = try(ebs.value.volume_size, null) + volume_type = try(ebs.value.volume_type, null) } } + + no_device = try(block_device_mappings.value.no_device, null) + virtual_name = try(block_device_mappings.value.virtual_name, null) } } dynamic "capacity_reservation_specification" { for_each = length(var.capacity_reservation_specification) > 0 ? [var.capacity_reservation_specification] : [] + content { - capacity_reservation_preference = lookup(capacity_reservation_specification.value, "capacity_reservation_preference", null) + capacity_reservation_preference = try(capacity_reservation_specification.value.capacity_reservation_preference, null) dynamic "capacity_reservation_target" { for_each = try([capacity_reservation_specification.value.capacity_reservation_target], []) + content { - capacity_reservation_id = lookup(capacity_reservation_target.value, "capacity_reservation_id", null) + capacity_reservation_id = try(capacity_reservation_target.value.capacity_reservation_id, null) + capacity_reservation_resource_group_arn = try(capacity_reservation_target.value.capacity_reservation_resource_group_arn, null) } } } @@ -106,21 +81,29 @@ resource "aws_launch_template" "this" { dynamic "cpu_options" { for_each = length(var.cpu_options) > 0 ? [var.cpu_options] : [] + content { - core_count = cpu_options.value.core_count - threads_per_core = cpu_options.value.threads_per_core + core_count = try(cpu_options.value.core_count, null) + threads_per_core = try(cpu_options.value.threads_per_core, null) } } dynamic "credit_specification" { for_each = length(var.credit_specification) > 0 ? [var.credit_specification] : [] + content { - cpu_credits = credit_specification.value.cpu_credits + cpu_credits = try(credit_specification.value.cpu_credits, null) } } + default_version = var.launch_template_default_version + description = var.launch_template_description + disable_api_termination = var.disable_api_termination + ebs_optimized = var.ebs_optimized + dynamic "elastic_gpu_specifications" { - for_each = length(var.elastic_gpu_specifications) > 0 ? [var.elastic_gpu_specifications] : [] + for_each = var.elastic_gpu_specifications + content { type = elastic_gpu_specifications.value.type } @@ -128,6 +111,7 @@ resource "aws_launch_template" "this" { dynamic "elastic_inference_accelerator" { for_each = length(var.elastic_inference_accelerator) > 0 ? [var.elastic_inference_accelerator] : [] + content { type = elastic_inference_accelerator.value.type } @@ -135,6 +119,7 @@ resource "aws_launch_template" "this" { dynamic "enclave_options" { for_each = length(var.enclave_options) > 0 ? [var.enclave_options] : [] + content { enabled = enclave_options.value.enabled } @@ -143,7 +128,8 @@ resource "aws_launch_template" "this" { # Set on EKS managed node group, will fail if set here # https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-basics # dynamic "hibernation_options" { - # for_each = var.hibernation_options != null ? [var.hibernation_options] : [] + # for_each = length(var.hibernation_options) > 0 ? [var.hibernation_options] : [] + # content { # configured = hibernation_options.value.configured # } @@ -159,104 +145,154 @@ resource "aws_launch_template" "this" { # } # } + image_id = var.ami_id + # Set on EKS managed node group, will fail if set here + # https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-basics + # instance_initiated_shutdown_behavior = var.instance_initiated_shutdown_behavior + dynamic "instance_market_options" { for_each = length(var.instance_market_options) > 0 ? [var.instance_market_options] : [] + content { - market_type = instance_market_options.value.market_type + market_type = try(instance_market_options.value.market_type, null) dynamic "spot_options" { - for_each = length(lookup(instance_market_options.value, "spot_options", {})) > 0 ? [instance_market_options.value.spot_options] : [] + for_each = try([instance_market_options.value.spot_options], []) + content { - block_duration_minutes = lookup(spot_options.value, "block_duration_minutes", null) - instance_interruption_behavior = lookup(spot_options.value, "instance_interruption_behavior", null) - max_price = lookup(spot_options.value, "max_price", null) - spot_instance_type = lookup(spot_options.value, "spot_instance_type", null) - valid_until = lookup(spot_options.value, "valid_until", null) + block_duration_minutes = try(spot_options.value.block_duration_minutes, null) + instance_interruption_behavior = try(spot_options.value.instance_interruption_behavior, null) + max_price = try(spot_options.value.max_price, null) + spot_instance_type = try(spot_options.value.spot_instance_type, null) + valid_until = try(spot_options.value.valid_until, null) } } } } + # # Set on node group instead + # instance_type = var.launch_template_instance_type + kernel_id = var.kernel_id + key_name = var.key_name + dynamic "license_specification" { - for_each = length(var.license_specifications) > 0 ? [var.license_specifications] : [] + for_each = length(var.license_specifications) > 0 ? var.license_specifications : {} + content { license_configuration_arn = license_specifications.value.license_configuration_arn } } + dynamic "maintenance_options" { + for_each = length(var.maintenance_options) > 0 ? [var.maintenance_options] : [] + + content { + auto_recovery = try(maintenance_options.value.auto_recovery, null) + } + } + dynamic "metadata_options" { for_each = length(var.metadata_options) > 0 ? [var.metadata_options] : [] + content { - http_endpoint = lookup(metadata_options.value, "http_endpoint", null) - http_tokens = lookup(metadata_options.value, "http_tokens", null) - http_put_response_hop_limit = lookup(metadata_options.value, "http_put_response_hop_limit", null) - http_protocol_ipv6 = lookup(metadata_options.value, "http_protocol_ipv6", null) - instance_metadata_tags = lookup(metadata_options.value, "instance_metadata_tags", null) + http_endpoint = try(metadata_options.value.http_endpoint, null) + http_protocol_ipv6 = try(metadata_options.value.http_protocol_ipv6, null) + http_put_response_hop_limit = try(metadata_options.value.http_put_response_hop_limit, null) + http_tokens = try(metadata_options.value.http_tokens, null) + instance_metadata_tags = try(metadata_options.value.instance_metadata_tags, null) } } dynamic "monitoring" { - for_each = var.enable_monitoring != null ? [1] : [] + for_each = var.enable_monitoring ? [1] : [] + content { enabled = var.enable_monitoring } } + name = var.launch_template_use_name_prefix ? null : local.launch_template_name + name_prefix = var.launch_template_use_name_prefix ? "${local.launch_template_name}-" : null + dynamic "network_interfaces" { for_each = var.network_interfaces content { - associate_carrier_ip_address = lookup(network_interfaces.value, "associate_carrier_ip_address", null) - associate_public_ip_address = lookup(network_interfaces.value, "associate_public_ip_address", null) - delete_on_termination = lookup(network_interfaces.value, "delete_on_termination", null) - description = lookup(network_interfaces.value, "description", null) - device_index = lookup(network_interfaces.value, "device_index", null) - interface_type = lookup(network_interfaces.value, "interface_type", null) + associate_carrier_ip_address = try(network_interfaces.value.associate_carrier_ip_address, null) + associate_public_ip_address = try(network_interfaces.value.associate_public_ip_address, null) + delete_on_termination = try(network_interfaces.value.delete_on_termination, null) + description = try(network_interfaces.value.description, null) + device_index = try(network_interfaces.value.device_index, null) + interface_type = try(network_interfaces.value.interface_type, null) + ipv4_address_count = try(network_interfaces.value.ipv4_address_count, null) ipv4_addresses = try(network_interfaces.value.ipv4_addresses, []) - ipv4_address_count = lookup(network_interfaces.value, "ipv4_address_count", null) + ipv4_prefix_count = try(network_interfaces.value.ipv4_prefix_count, null) + ipv4_prefixes = try(network_interfaces.value.ipv4_prefixes, null) + ipv6_address_count = try(network_interfaces.value.ipv6_address_count, null) ipv6_addresses = try(network_interfaces.value.ipv6_addresses, []) - ipv6_address_count = lookup(network_interfaces.value, "ipv6_address_count", null) - network_interface_id = lookup(network_interfaces.value, "network_interface_id", null) - private_ip_address = lookup(network_interfaces.value, "private_ip_address", null) - security_groups = compact(concat(try(network_interfaces.value.security_groups, []), local.security_group_ids)) + ipv6_prefix_count = try(network_interfaces.value.ipv6_prefix_count, null) + ipv6_prefixes = try(network_interfaces.value.ipv6_prefixes, []) + network_card_index = try(network_interfaces.value.network_card_index, null) + network_interface_id = try(network_interfaces.value.network_interface_id, null) + private_ip_address = try(network_interfaces.value.private_ip_address, null) + # Ref: https://github.com/hashicorp/terraform-provider-aws/issues/4570 + security_groups = compact(concat(try(network_interfaces.value.security_groups, []), local.security_group_ids)) # Set on EKS managed node group, will fail if set here # https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-basics - # subnet_id = lookup(network_interfaces.value, "subnet_id", null) + # subnet_id = try(network_interfaces.value.subnet_id, null) } } dynamic "placement" { for_each = length(var.placement) > 0 ? [var.placement] : [] + + content { + affinity = try(placement.value.affinity, null) + availability_zone = try(placement.value.availability_zone, null) + group_name = try(placement.value.group_name, null) + host_id = try(placement.value.host_id, null) + host_resource_group_arn = try(placement.value.host_resource_group_arn, null) + partition_number = try(placement.value.partition_number, null) + spread_domain = try(placement.value.spread_domain, null) + tenancy = try(placement.value.tenancy, null) + } + } + + dynamic "private_dns_name_options" { + for_each = length(var.private_dns_name_options) > 0 ? [var.private_dns_name_options] : [] + content { - affinity = lookup(placement.value, "affinity", null) - availability_zone = lookup(placement.value, "availability_zone", null) - group_name = lookup(placement.value, "group_name", null) - host_id = lookup(placement.value, "host_id", null) - spread_domain = lookup(placement.value, "spread_domain", null) - tenancy = lookup(placement.value, "tenancy", null) - partition_number = lookup(placement.value, "partition_number", null) + enable_resource_name_dns_aaaa_record = try(private_dns_name_options.value.enable_resource_name_dns_aaaa_record, null) + enable_resource_name_dns_a_record = try(private_dns_name_options.value.enable_resource_name_dns_a_record, null) + hostname_type = try(private_dns_name_options.value.hostname_type, null) } } + ram_disk_id = var.ram_disk_id + dynamic "tag_specifications" { for_each = toset(["instance", "volume", "network-interface"]) + content { resource_type = tag_specifications.key tags = merge(var.tags, { Name = var.name }, var.launch_template_tags) } } - lifecycle { - create_before_destroy = true - } + update_default_version = var.update_launch_template_default_version + user_data = module.user_data.user_data + vpc_security_group_ids = length(var.network_interfaces) > 0 ? [] : local.security_group_ids + + tags = var.tags - # Prevent premature access of security group roles and policies by pods that + # Prevent premature access of policies by pods that # require permissions on create/destroy that depend on nodes depends_on = [ - aws_security_group_rule.this, aws_iam_role_policy_attachment.this, ] - tags = var.tags + lifecycle { + create_before_destroy = true + } } ################################################################################ @@ -264,7 +300,7 @@ resource "aws_launch_template" "this" { ################################################################################ locals { - launch_template_name = try(aws_launch_template.this[0].name, var.launch_template_name, null) + launch_template_id = var.create && var.create_launch_template ? aws_launch_template.this[0].id : var.launch_template_id # Change order to allow users to set version priority before using defaults launch_template_version = coalesce(var.launch_template_version, try(aws_launch_template.this[0].default_version, "$Default")) } @@ -293,21 +329,23 @@ resource "aws_eks_node_group" "this" { version = var.ami_id != "" ? null : var.cluster_version capacity_type = var.capacity_type - disk_size = local.use_custom_launch_template ? null : var.disk_size # if using LT, set disk size on LT or else it will error here + disk_size = var.use_custom_launch_template ? null : var.disk_size # if using a custom LT, set disk size on custom LT or else it will error here force_update_version = var.force_update_version instance_types = var.instance_types labels = var.labels dynamic "launch_template" { - for_each = local.use_custom_launch_template ? [1] : [] + for_each = var.use_custom_launch_template ? [1] : [] + content { - name = local.launch_template_name + id = local.launch_template_id version = local.launch_template_version } } dynamic "remote_access" { for_each = length(var.remote_access) > 0 ? [var.remote_access] : [] + content { ec2_ssh_key = try(remote_access.value.ec2_ssh_key, null) source_security_group_ids = try(remote_access.value.source_security_group_ids, []) @@ -316,15 +354,17 @@ resource "aws_eks_node_group" "this" { dynamic "taint" { for_each = var.taints + content { key = taint.value.key - value = lookup(taint.value, "value") + value = try(taint.value.value, null) effect = taint.value.effect } } dynamic "update_config" { for_each = length(var.update_config) > 0 ? [var.update_config] : [] + content { max_unavailable_percentage = try(update_config.value.max_unavailable_percentage, null) max_unavailable = try(update_config.value.max_unavailable, null) @@ -350,68 +390,14 @@ resource "aws_eks_node_group" "this" { ) } -################################################################################ -# Security Group -################################################################################ - -locals { - security_group_name = coalesce(var.security_group_name, "${var.name}-eks-node-group") - create_security_group = var.create && var.create_security_group -} - -resource "aws_security_group" "this" { - count = local.create_security_group ? 1 : 0 - - name = var.security_group_use_name_prefix ? null : local.security_group_name - name_prefix = var.security_group_use_name_prefix ? "${local.security_group_name}-" : null - description = var.security_group_description - vpc_id = var.vpc_id - - tags = merge( - var.tags, - { "Name" = local.security_group_name }, - var.security_group_tags - ) - - # https://github.com/hashicorp/terraform-provider-aws/issues/2445 - # https://github.com/hashicorp/terraform-provider-aws/issues/9692 - lifecycle { - create_before_destroy = true - } -} - -resource "aws_security_group_rule" "this" { - for_each = { for k, v in var.security_group_rules : k => v if local.create_security_group } - - # Required - security_group_id = aws_security_group.this[0].id - protocol = each.value.protocol - from_port = each.value.from_port - to_port = each.value.to_port - type = each.value.type - - # Optional - description = try(each.value.description, null) - cidr_blocks = try(each.value.cidr_blocks, null) - ipv6_cidr_blocks = try(each.value.ipv6_cidr_blocks, null) - prefix_list_ids = try(each.value.prefix_list_ids, []) - self = try(each.value.self, null) - source_security_group_id = try( - each.value.source_security_group_id, - try(each.value.source_cluster_security_group, false) ? var.cluster_security_group_id : null - ) -} - ################################################################################ # IAM Role ################################################################################ locals { - iam_role_name = coalesce(var.iam_role_name, "${var.name}-eks-node-group") - + iam_role_name = coalesce(var.iam_role_name, "${var.name}-eks-node-group") iam_role_policy_prefix = "arn:${data.aws_partition.current.partition}:iam::aws:policy" - - cni_policy = var.cluster_ip_family == "ipv6" ? "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:policy/AmazonEKS_CNI_IPv6_Policy" : "${local.iam_role_policy_prefix}/AmazonEKS_CNI_Policy" + cni_policy = var.cluster_ip_family == "ipv6" ? "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:policy/AmazonEKS_CNI_IPv6_Policy" : "${local.iam_role_policy_prefix}/AmazonEKS_CNI_Policy" } data "aws_iam_policy_document" "assume_role_policy" { @@ -445,11 +431,18 @@ resource "aws_iam_role" "this" { # Policies attached ref https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_node_group resource "aws_iam_role_policy_attachment" "this" { - for_each = var.create && var.create_iam_role ? toset(compact(distinct(concat([ + for_each = { for k, v in toset(compact([ "${local.iam_role_policy_prefix}/AmazonEKSWorkerNodePolicy", "${local.iam_role_policy_prefix}/AmazonEC2ContainerRegistryReadOnly", var.iam_role_attach_cni_policy ? local.cni_policy : "", - ], var.iam_role_additional_policies)))) : toset([]) + ])) : k => v if var.create && var.create_iam_role } + + policy_arn = each.value + role = aws_iam_role.this[0].name +} + +resource "aws_iam_role_policy_attachment" "additional" { + for_each = { for k, v in var.iam_role_additional_policies : k => v if var.create && var.create_iam_role } policy_arn = each.value role = aws_iam_role.this[0].name diff --git a/modules/eks-managed-node-group/outputs.tf b/modules/eks-managed-node-group/outputs.tf index 9d7535332f..58dbe9646f 100644 --- a/modules/eks-managed-node-group/outputs.tf +++ b/modules/eks-managed-node-group/outputs.tf @@ -4,22 +4,22 @@ output "launch_template_id" { description = "The ID of the launch template" - value = try(aws_launch_template.this[0].id, "") + value = try(aws_launch_template.this[0].id, null) } output "launch_template_arn" { description = "The ARN of the launch template" - value = try(aws_launch_template.this[0].arn, "") + value = try(aws_launch_template.this[0].arn, null) } output "launch_template_latest_version" { description = "The latest version of the launch template" - value = try(aws_launch_template.this[0].latest_version, "") + value = try(aws_launch_template.this[0].latest_version, null) } output "launch_template_name" { description = "The name of the launch template" - value = try(aws_launch_template.this[0].name, "") + value = try(aws_launch_template.this[0].name, null) } ################################################################################ @@ -28,17 +28,17 @@ output "launch_template_name" { output "node_group_arn" { description = "Amazon Resource Name (ARN) of the EKS Node Group" - value = try(aws_eks_node_group.this[0].arn, "") + value = try(aws_eks_node_group.this[0].arn, null) } output "node_group_id" { description = "EKS Cluster name and EKS Node Group name separated by a colon (`:`)" - value = try(aws_eks_node_group.this[0].id, "") + value = try(aws_eks_node_group.this[0].id, null) } output "node_group_resources" { description = "List of objects containing information about underlying resources" - value = try(aws_eks_node_group.this[0].resources, "") + value = try(aws_eks_node_group.this[0].resources, null) } output "node_group_autoscaling_group_names" { @@ -48,7 +48,7 @@ output "node_group_autoscaling_group_names" { output "node_group_status" { description = "Status of the EKS Node Group" - value = try(aws_eks_node_group.this[0].arn, "") + value = try(aws_eks_node_group.this[0].arn, null) } output "node_group_labels" { @@ -61,27 +61,13 @@ output "node_group_taints" { value = try(aws_eks_node_group.this[0].taint, []) } -################################################################################ -# Security Group -################################################################################ - -output "security_group_arn" { - description = "Amazon Resource Name (ARN) of the security group" - value = try(aws_security_group.this[0].arn, "") -} - -output "security_group_id" { - description = "ID of the security group" - value = try(aws_security_group.this[0].id, "") -} - ################################################################################ # IAM Role ################################################################################ output "iam_role_name" { description = "The name of the IAM role" - value = try(aws_iam_role.this[0].name, "") + value = try(aws_iam_role.this[0].name, null) } output "iam_role_arn" { @@ -91,5 +77,5 @@ output "iam_role_arn" { output "iam_role_unique_id" { description = "Stable and unique string identifying the IAM role" - value = try(aws_iam_role.this[0].unique_id, "") + value = try(aws_iam_role.this[0].unique_id, null) } diff --git a/modules/eks-managed-node-group/variables.tf b/modules/eks-managed-node-group/variables.tf index 0000827719..d2790e1acb 100644 --- a/modules/eks-managed-node-group/variables.tf +++ b/modules/eks-managed-node-group/variables.tf @@ -84,12 +84,24 @@ variable "create_launch_template" { default = true } -variable "launch_template_name" { - description = "Launch template name - either to be created (`var.create_launch_template` = `true`) or existing (`var.create_launch_template` = `false`)" +variable "use_custom_launch_template" { + description = "Determines whether to use a custom launch template or not. If set to `false`, EKS will use its own default launch template" + type = bool + default = true +} + +variable "launch_template_id" { + description = "The ID of an existing launch template to use. Required when `create_launch_template` = `false` and `use_custom_launch_template` = `true`" type = string default = "" } +variable "launch_template_name" { + description = "Name of launch template to be created" + type = string + default = null +} + variable "launch_template_use_name_prefix" { description = "Determines whether to use `launch_template_name` as is or create a unique name beginning with the `launch_template_name` as the prefix" type = bool @@ -188,7 +200,7 @@ variable "credit_specification" { variable "elastic_gpu_specifications" { description = "The elastic GPU to attach to the instance" - type = map(string) + type = any default = {} } @@ -210,9 +222,15 @@ variable "instance_market_options" { default = {} } +variable "maintenance_options" { + description = "The maintenance options for the instance" + type = any + default = {} +} + variable "license_specifications" { - description = "A list of license specifications to associate with" - type = map(string) + description = "A map of license specifications to associate with" + type = any default = {} } @@ -244,6 +262,12 @@ variable "placement" { default = {} } +variable "private_dns_name_options" { + description = "The options for the instance hostname. The default values are inherited from the subnet" + type = map(string) + default = {} +} + variable "launch_template_tags" { description = "A map of additional tags to add to the tag_specifications of launch template created" type = map(string) @@ -309,7 +333,7 @@ variable "capacity_type" { } variable "disk_size" { - description = "Disk size in GiB for nodes. Defaults to `20`" + description = "Disk size in GiB for nodes. Defaults to `20`. Only valid when `use_custom_launch_template` = `false`" type = number default = null } @@ -345,7 +369,7 @@ variable "launch_template_version" { } variable "remote_access" { - description = "Configuration block with remote access settings" + description = "Configuration block with remote access settings. Only valid when `use_custom_launch_template` = `false`" type = any default = {} } @@ -359,7 +383,9 @@ variable "taints" { variable "update_config" { description = "Configuration block of settings for max unavailable resources during node group updates" type = map(string) - default = {} + default = { + max_unavailable_percentage = 33 + } } variable "timeouts" { @@ -368,58 +394,6 @@ variable "timeouts" { default = {} } -################################################################################ -# Security Group -################################################################################ - -variable "create_security_group" { - description = "Determines whether to create a security group" - type = bool - default = true -} - -variable "security_group_name" { - description = "Name to use on security group created" - type = string - default = null -} - -variable "security_group_use_name_prefix" { - description = "Determines whether the security group name (`security_group_name`) is used as a prefix" - type = bool - default = true -} - -variable "security_group_description" { - description = "Description for the security group created" - type = string - default = "EKS managed node group security group" -} - -variable "vpc_id" { - description = "ID of the VPC where the security group/nodes will be provisioned" - type = string - default = null -} - -variable "security_group_rules" { - description = "List of security group rules to add to the security group created" - type = any - default = {} -} - -variable "cluster_security_group_id" { - description = "Cluster control plane security group ID" - type = string - default = null -} - -variable "security_group_tags" { - description = "A map of additional tags to add to the security group created" - type = map(string) - default = {} -} - ################################################################################ # IAM Role ################################################################################ @@ -480,8 +454,8 @@ variable "iam_role_attach_cni_policy" { variable "iam_role_additional_policies" { description = "Additional policies to be added to the IAM role" - type = list(string) - default = [] + type = map(string) + default = {} } variable "iam_role_tags" { diff --git a/modules/eks-managed-node-group/versions.tf b/modules/eks-managed-node-group/versions.tf index 22e8d7265f..325eee94e1 100644 --- a/modules/eks-managed-node-group/versions.tf +++ b/modules/eks-managed-node-group/versions.tf @@ -1,10 +1,10 @@ terraform { - required_version = ">= 0.13.1" + required_version = ">= 1.0" required_providers { aws = { source = "hashicorp/aws" - version = ">= 3.72" + version = ">= 4.45" } } } diff --git a/modules/fargate-profile/README.md b/modules/fargate-profile/README.md index 97dd0d3e53..4ed9a6fd2d 100644 --- a/modules/fargate-profile/README.md +++ b/modules/fargate-profile/README.md @@ -8,8 +8,8 @@ Configuration in this directory creates a Fargate EKS Profile module "fargate_profile" { source = "terraform-aws-modules/eks/aws//modules/fargate-profile" - name = "separate-fargate-profile" - cluster_name = "my-cluster" + name = "separate-fargate-profile" + cluster_name = "my-cluster" subnet_ids = ["subnet-abcde012", "subnet-bcde012a", "subnet-fghi345a"] selectors = [{ @@ -28,14 +28,14 @@ module "fargate_profile" { | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 0.13.1 | -| [aws](#requirement\_aws) | >= 3.72 | +| [terraform](#requirement\_terraform) | >= 1.0 | +| [aws](#requirement\_aws) | >= 4.45 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 3.72 | +| [aws](#provider\_aws) | >= 4.45 | ## Modules @@ -47,6 +47,7 @@ No modules. |------|------| | [aws_eks_fargate_profile.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_fargate_profile) | resource | | [aws_iam_role.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | +| [aws_iam_role_policy_attachment.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | | [aws_iam_role_policy_attachment.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | | [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | | [aws_iam_policy_document.assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | @@ -60,7 +61,7 @@ No modules. | [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster | `string` | `null` | no | | [create](#input\_create) | Determines whether to create Fargate profile or not | `bool` | `true` | no | | [create\_iam\_role](#input\_create\_iam\_role) | Determines whether an IAM role is created or to use an existing IAM role | `bool` | `true` | no | -| [iam\_role\_additional\_policies](#input\_iam\_role\_additional\_policies) | Additional policies to be added to the IAM role | `list(string)` | `[]` | no | +| [iam\_role\_additional\_policies](#input\_iam\_role\_additional\_policies) | Additional policies to be added to the IAM role | `map(string)` | `{}` | no | | [iam\_role\_arn](#input\_iam\_role\_arn) | Existing IAM role ARN for the Fargate profile. Required if `create_iam_role` is set to `false` | `string` | `null` | no | | [iam\_role\_attach\_cni\_policy](#input\_iam\_role\_attach\_cni\_policy) | Whether to attach the `AmazonEKS_CNI_Policy`/`AmazonEKS_CNI_IPv6_Policy` IAM policy to the IAM IAM role. WARNING: If set `false` the permissions must be assigned to the `aws-node` DaemonSet pods via another method or nodes will not be able to join the cluster | `bool` | `true` | no | | [iam\_role\_description](#input\_iam\_role\_description) | Description of the role | `string` | `null` | no | diff --git a/modules/fargate-profile/main.tf b/modules/fargate-profile/main.tf index 554b0e8814..de9dd2d754 100644 --- a/modules/fargate-profile/main.tf +++ b/modules/fargate-profile/main.tf @@ -1,13 +1,10 @@ data "aws_partition" "current" {} - data "aws_caller_identity" "current" {} locals { - iam_role_name = coalesce(var.iam_role_name, var.name, "fargate-profile") - + iam_role_name = coalesce(var.iam_role_name, var.name, "fargate-profile") iam_role_policy_prefix = "arn:${data.aws_partition.current.partition}:iam::aws:policy" - - cni_policy = var.cluster_ip_family == "ipv6" ? "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:policy/AmazonEKS_CNI_IPv6_Policy" : "${local.iam_role_policy_prefix}/AmazonEKS_CNI_Policy" + cni_policy = var.cluster_ip_family == "ipv6" ? "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:policy/AmazonEKS_CNI_IPv6_Policy" : "${local.iam_role_policy_prefix}/AmazonEKS_CNI_Policy" } ################################################################################ @@ -44,10 +41,17 @@ resource "aws_iam_role" "this" { } resource "aws_iam_role_policy_attachment" "this" { - for_each = var.create && var.create_iam_role ? toset(compact(distinct(concat([ + for_each = { for k, v in toset(compact([ "${local.iam_role_policy_prefix}/AmazonEKSFargatePodExecutionRolePolicy", var.iam_role_attach_cni_policy ? local.cni_policy : "", - ], var.iam_role_additional_policies)))) : toset([]) + ])) : k => v if var.create && var.create_iam_role } + + policy_arn = each.value + role = aws_iam_role.this[0].name +} + +resource "aws_iam_role_policy_attachment" "additional" { + for_each = { for k, v in var.iam_role_additional_policies : k => v if var.create && var.create_iam_role } policy_arn = each.value role = aws_iam_role.this[0].name diff --git a/modules/fargate-profile/outputs.tf b/modules/fargate-profile/outputs.tf index c8b663e80e..96763bfb1f 100644 --- a/modules/fargate-profile/outputs.tf +++ b/modules/fargate-profile/outputs.tf @@ -4,7 +4,7 @@ output "iam_role_name" { description = "The name of the IAM role" - value = try(aws_iam_role.this[0].name, "") + value = try(aws_iam_role.this[0].name, null) } output "iam_role_arn" { @@ -14,7 +14,7 @@ output "iam_role_arn" { output "iam_role_unique_id" { description = "Stable and unique string identifying the IAM role" - value = try(aws_iam_role.this[0].unique_id, "") + value = try(aws_iam_role.this[0].unique_id, null) } ################################################################################ @@ -23,20 +23,20 @@ output "iam_role_unique_id" { output "fargate_profile_arn" { description = "Amazon Resource Name (ARN) of the EKS Fargate Profile" - value = try(aws_eks_fargate_profile.this[0].arn, "") + value = try(aws_eks_fargate_profile.this[0].arn, null) } output "fargate_profile_id" { description = "EKS Cluster name and EKS Fargate Profile name separated by a colon (`:`)" - value = try(aws_eks_fargate_profile.this[0].id, "") + value = try(aws_eks_fargate_profile.this[0].id, null) } output "fargate_profile_status" { description = "Status of the EKS Fargate Profile" - value = try(aws_eks_fargate_profile.this[0].status, "") + value = try(aws_eks_fargate_profile.this[0].status, null) } output "fargate_profile_pod_execution_role_arn" { description = "Amazon Resource Name (ARN) of the EKS Fargate Profile Pod execution role ARN" - value = try(aws_eks_fargate_profile.this[0].pod_execution_role_arn, "") + value = try(aws_eks_fargate_profile.this[0].pod_execution_role_arn, null) } diff --git a/modules/fargate-profile/variables.tf b/modules/fargate-profile/variables.tf index 4d5e95ccbe..e22279dc6b 100644 --- a/modules/fargate-profile/variables.tf +++ b/modules/fargate-profile/variables.tf @@ -70,8 +70,8 @@ variable "iam_role_attach_cni_policy" { variable "iam_role_additional_policies" { description = "Additional policies to be added to the IAM role" - type = list(string) - default = [] + type = map(string) + default = {} } variable "iam_role_tags" { diff --git a/modules/fargate-profile/versions.tf b/modules/fargate-profile/versions.tf index 22e8d7265f..325eee94e1 100644 --- a/modules/fargate-profile/versions.tf +++ b/modules/fargate-profile/versions.tf @@ -1,10 +1,10 @@ terraform { - required_version = ">= 0.13.1" + required_version = ">= 1.0" required_providers { aws = { source = "hashicorp/aws" - version = ">= 3.72" + version = ">= 4.45" } } } diff --git a/modules/karpenter/README.md b/modules/karpenter/README.md index 1797c6d2e8..71f4ff9455 100644 --- a/modules/karpenter/README.md +++ b/modules/karpenter/README.md @@ -99,14 +99,14 @@ module "karpenter" { | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 0.13.1 | -| [aws](#requirement\_aws) | >= 3.72 | +| [terraform](#requirement\_terraform) | >= 1.0 | +| [aws](#requirement\_aws) | >= 4.45 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 3.72 | +| [aws](#provider\_aws) | >= 4.45 | ## Modules diff --git a/modules/karpenter/main.tf b/modules/karpenter/main.tf index d54fa9585a..62640f0550 100644 --- a/modules/karpenter/main.tf +++ b/modules/karpenter/main.tf @@ -261,11 +261,14 @@ locals { resource "aws_cloudwatch_event_rule" "this" { for_each = { for k, v in local.events : k => v if local.enable_spot_termination } - name = "Karpenter${each.value.name}-${var.cluster_name}" + name_prefix = "Karpenter${each.value.name}-" description = each.value.description event_pattern = jsonencode(each.value.event_pattern) - tags = var.tags + tags = merge( + { "ClusterName" : var.cluster_name }, + var.tags, + ) } resource "aws_cloudwatch_event_target" "this" { diff --git a/modules/karpenter/versions.tf b/modules/karpenter/versions.tf index 22e8d7265f..325eee94e1 100644 --- a/modules/karpenter/versions.tf +++ b/modules/karpenter/versions.tf @@ -1,10 +1,10 @@ terraform { - required_version = ">= 0.13.1" + required_version = ">= 1.0" required_providers { aws = { source = "hashicorp/aws" - version = ">= 3.72" + version = ">= 4.45" } } } diff --git a/modules/self-managed-node-group/README.md b/modules/self-managed-node-group/README.md index 4c392362f6..051aa4f4eb 100644 --- a/modules/self-managed-node-group/README.md +++ b/modules/self-managed-node-group/README.md @@ -16,8 +16,11 @@ module "self_managed_node_group" { vpc_id = "vpc-1234556abcdef" subnet_ids = ["subnet-abcde012", "subnet-bcde012a", "subnet-fghi345a"] + // The following variables are necessary if you decide to use the module outside of the parent EKS module context. + // Without it, the security groups of the nodes are empty and thus won't join the cluster. vpc_security_group_ids = [ - # cluster_security_group_id, + module.eks.cluster_primary_security_group_id, + module.eks.cluster_security_group_id, ] min_size = 1 @@ -39,14 +42,14 @@ module "self_managed_node_group" { | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 0.13.1 | -| [aws](#requirement\_aws) | >= 3.72 | +| [terraform](#requirement\_terraform) | >= 1.0 | +| [aws](#requirement\_aws) | >= 4.45 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 3.72 | +| [aws](#provider\_aws) | >= 4.45 | ## Modules @@ -62,13 +65,11 @@ module "self_managed_node_group" { | [aws_autoscaling_schedule.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/autoscaling_schedule) | resource | | [aws_iam_instance_profile.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_instance_profile) | resource | | [aws_iam_role.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | +| [aws_iam_role_policy_attachment.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | | [aws_iam_role_policy_attachment.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | | [aws_launch_template.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource | -| [aws_security_group.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | -| [aws_security_group_rule.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource | | [aws_ami.eks_default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source | | [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | -| [aws_default_tags.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/default_tags) | data source | | [aws_iam_policy_document.assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | | [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source | @@ -88,32 +89,33 @@ module "self_managed_node_group" { | [cluster\_ip\_family](#input\_cluster\_ip\_family) | The IP family used to assign Kubernetes pod and service addresses. Valid values are `ipv4` (default) and `ipv6` | `string` | `null` | no | | [cluster\_name](#input\_cluster\_name) | Name of associated EKS cluster | `string` | `""` | no | | [cluster\_primary\_security\_group\_id](#input\_cluster\_primary\_security\_group\_id) | The ID of the EKS cluster primary security group to associate with the instance(s). This is the security group that is automatically created by the EKS service | `string` | `null` | no | -| [cluster\_security\_group\_id](#input\_cluster\_security\_group\_id) | Cluster control plane security group ID | `string` | `null` | no | | [cluster\_version](#input\_cluster\_version) | Kubernetes cluster version - used to lookup default AMI ID if one is not provided | `string` | `null` | no | +| [context](#input\_context) | Reserved | `string` | `null` | no | | [cpu\_options](#input\_cpu\_options) | The CPU options for the instance | `map(string)` | `{}` | no | | [create](#input\_create) | Determines whether to create self managed node group or not | `bool` | `true` | no | | [create\_autoscaling\_group](#input\_create\_autoscaling\_group) | Determines whether to create autoscaling group or not | `bool` | `true` | no | | [create\_iam\_instance\_profile](#input\_create\_iam\_instance\_profile) | Determines whether an IAM instance profile is created or to use an existing IAM instance profile | `bool` | `true` | no | | [create\_launch\_template](#input\_create\_launch\_template) | Determines whether to create launch template or not | `bool` | `true` | no | | [create\_schedule](#input\_create\_schedule) | Determines whether to create autoscaling group schedule or not | `bool` | `true` | no | -| [create\_security\_group](#input\_create\_security\_group) | Determines whether to create a security group | `bool` | `true` | no | | [credit\_specification](#input\_credit\_specification) | Customize the credit specification of the instance | `map(string)` | `{}` | no | | [default\_cooldown](#input\_default\_cooldown) | The amount of time, in seconds, after a scaling activity completes before another scaling activity can start | `number` | `null` | no | +| [default\_instance\_warmup](#input\_default\_instance\_warmup) | Amount of time, in seconds, until a newly launched instance can contribute to the Amazon CloudWatch metrics. This delay lets an instance finish initializing before Amazon EC2 Auto Scaling aggregates instance metrics, resulting in more reliable usage data | `number` | `null` | no | | [delete\_timeout](#input\_delete\_timeout) | Delete timeout to wait for destroying autoscaling group | `string` | `null` | no | | [desired\_size](#input\_desired\_size) | The number of Amazon EC2 instances that should be running in the autoscaling group | `number` | `1` | no | | [disable\_api\_termination](#input\_disable\_api\_termination) | If true, enables EC2 instance termination protection | `bool` | `null` | no | | [ebs\_optimized](#input\_ebs\_optimized) | If true, the launched EC2 instance will be EBS-optimized | `bool` | `null` | no | -| [elastic\_gpu\_specifications](#input\_elastic\_gpu\_specifications) | The elastic GPU to attach to the instance | `map(string)` | `{}` | no | +| [elastic\_gpu\_specifications](#input\_elastic\_gpu\_specifications) | The elastic GPU to attach to the instance | `any` | `{}` | no | | [elastic\_inference\_accelerator](#input\_elastic\_inference\_accelerator) | Configuration block containing an Elastic Inference Accelerator to attach to the instance | `map(string)` | `{}` | no | | [enable\_monitoring](#input\_enable\_monitoring) | Enables/disables detailed monitoring | `bool` | `true` | no | | [enabled\_metrics](#input\_enabled\_metrics) | A list of metrics to collect. The allowed values are `GroupDesiredCapacity`, `GroupInServiceCapacity`, `GroupPendingCapacity`, `GroupMinSize`, `GroupMaxSize`, `GroupInServiceInstances`, `GroupPendingInstances`, `GroupStandbyInstances`, `GroupStandbyCapacity`, `GroupTerminatingCapacity`, `GroupTerminatingInstances`, `GroupTotalCapacity`, `GroupTotalInstances` | `list(string)` | `[]` | no | | [enclave\_options](#input\_enclave\_options) | Enable Nitro Enclaves on launched instances | `map(string)` | `{}` | no | | [force\_delete](#input\_force\_delete) | Allows deleting the Auto Scaling Group without waiting for all instances in the pool to terminate. You can force an Auto Scaling Group to delete even if it's in the process of scaling a resource. Normally, Terraform drains all the instances before deleting the group. This bypasses that behavior and potentially leaves resources dangling | `bool` | `null` | no | +| [force\_delete\_warm\_pool](#input\_force\_delete\_warm\_pool) | Allows deleting the Auto Scaling Group without waiting for all instances in the warm pool to terminate | `bool` | `null` | no | | [health\_check\_grace\_period](#input\_health\_check\_grace\_period) | Time (in seconds) after instance comes into service before checking health | `number` | `null` | no | | [health\_check\_type](#input\_health\_check\_type) | `EC2` or `ELB`. Controls how health checking is done | `string` | `null` | no | | [hibernation\_options](#input\_hibernation\_options) | The hibernation options for the instance | `map(string)` | `{}` | no | | [iam\_instance\_profile\_arn](#input\_iam\_instance\_profile\_arn) | Amazon Resource Name (ARN) of an existing IAM instance profile that provides permissions for the node group. Required if `create_iam_instance_profile` = `false` | `string` | `null` | no | -| [iam\_role\_additional\_policies](#input\_iam\_role\_additional\_policies) | Additional policies to be added to the IAM role | `list(string)` | `[]` | no | +| [iam\_role\_additional\_policies](#input\_iam\_role\_additional\_policies) | Additional policies to be added to the IAM role | `map(string)` | `{}` | no | | [iam\_role\_attach\_cni\_policy](#input\_iam\_role\_attach\_cni\_policy) | Whether to attach the `AmazonEKS_CNI_Policy`/`AmazonEKS_CNI_IPv6_Policy` IAM policy to the IAM IAM role. WARNING: If set `false` the permissions must be assigned to the `aws-node` DaemonSet pods via another method or nodes will not be able to join the cluster | `bool` | `true` | no | | [iam\_role\_description](#input\_iam\_role\_description) | Description of the role | `string` | `null` | no | | [iam\_role\_name](#input\_iam\_role\_name) | Name to use on IAM role created | `string` | `null` | no | @@ -124,17 +126,20 @@ module "self_managed_node_group" { | [initial\_lifecycle\_hooks](#input\_initial\_lifecycle\_hooks) | One or more Lifecycle Hooks to attach to the Auto Scaling Group before instances are launched. The syntax is exactly the same as the separate `aws_autoscaling_lifecycle_hook` resource, without the `autoscaling_group_name` attribute. Please note that this will only work when creating a new Auto Scaling Group. For all other use-cases, please use `aws_autoscaling_lifecycle_hook` resource | `list(map(string))` | `[]` | no | | [instance\_initiated\_shutdown\_behavior](#input\_instance\_initiated\_shutdown\_behavior) | Shutdown behavior for the instance. Can be `stop` or `terminate`. (Default: `stop`) | `string` | `null` | no | | [instance\_market\_options](#input\_instance\_market\_options) | The market (purchasing) option for the instance | `any` | `{}` | no | -| [instance\_refresh](#input\_instance\_refresh) | If this block is configured, start an Instance Refresh when this Auto Scaling Group is updated | `any` | `{}` | no | +| [instance\_refresh](#input\_instance\_refresh) | If this block is configured, start an Instance Refresh when this Auto Scaling Group is updated | `any` |
{
"preferences": {
"min_healthy_percentage": 66
},
"strategy": "Rolling"
}
| no | +| [instance\_requirements](#input\_instance\_requirements) | The attribute requirements for the type of instance. If present then `instance_type` cannot be present | `any` | `{}` | no | | [instance\_type](#input\_instance\_type) | The type of the instance to launch | `string` | `""` | no | | [kernel\_id](#input\_kernel\_id) | The kernel ID | `string` | `null` | no | | [key\_name](#input\_key\_name) | The key name that should be used for the instance | `string` | `null` | no | | [launch\_template\_default\_version](#input\_launch\_template\_default\_version) | Default Version of the launch template | `string` | `null` | no | | [launch\_template\_description](#input\_launch\_template\_description) | Description of the launch template | `string` | `null` | no | -| [launch\_template\_name](#input\_launch\_template\_name) | Launch template name - either to be created (`var.create_launch_template` = `true`) or existing (`var.create_launch_template` = `false`) | `string` | `null` | no | +| [launch\_template\_id](#input\_launch\_template\_id) | The ID of an existing launch template to use. Required when `create_launch_template` = `false` | `string` | `""` | no | +| [launch\_template\_name](#input\_launch\_template\_name) | Name of launch template to be created | `string` | `null` | no | | [launch\_template\_tags](#input\_launch\_template\_tags) | A map of additional tags to add to the tag\_specifications of launch template created | `map(string)` | `{}` | no | | [launch\_template\_use\_name\_prefix](#input\_launch\_template\_use\_name\_prefix) | Determines whether to use `launch_template_name` as is or create a unique name beginning with the `launch_template_name` as the prefix | `bool` | `true` | no | | [launch\_template\_version](#input\_launch\_template\_version) | Launch template version. Can be version number, `$Latest`, or `$Default` | `string` | `null` | no | -| [license\_specifications](#input\_license\_specifications) | A list of license specifications to associate with | `map(string)` | `{}` | no | +| [license\_specifications](#input\_license\_specifications) | A map of license specifications to associate with | `any` | `{}` | no | +| [maintenance\_options](#input\_maintenance\_options) | The maintenance options for the instance | `any` | `{}` | no | | [max\_instance\_lifetime](#input\_max\_instance\_lifetime) | The maximum amount of time, in seconds, that an instance can be in service, values must be either equal to 0 or between 604800 and 31536000 seconds | `number` | `null` | no | | [max\_size](#input\_max\_size) | The maximum size of the autoscaling group | `number` | `3` | no | | [metadata\_options](#input\_metadata\_options) | Customize the metadata options for the instance | `map(string)` |
{
"http_endpoint": "enabled",
"http_put_response_hop_limit": 2,
"http_tokens": "required"
}
| no | @@ -149,14 +154,10 @@ module "self_managed_node_group" { | [platform](#input\_platform) | Identifies if the OS platform is `bottlerocket`, `linux`, or `windows` based | `string` | `"linux"` | no | | [post\_bootstrap\_user\_data](#input\_post\_bootstrap\_user\_data) | User data that is appended to the user data script after of the EKS bootstrap script. Not used when `platform` = `bottlerocket` | `string` | `""` | no | | [pre\_bootstrap\_user\_data](#input\_pre\_bootstrap\_user\_data) | User data that is injected into the user data script ahead of the EKS bootstrap script. Not used when `platform` = `bottlerocket` | `string` | `""` | no | +| [private\_dns\_name\_options](#input\_private\_dns\_name\_options) | The options for the instance hostname. The default values are inherited from the subnet | `map(string)` | `{}` | no | | [protect\_from\_scale\_in](#input\_protect\_from\_scale\_in) | Allows setting instance protection. The autoscaling group will not select instances with this setting for termination during scale in events. | `bool` | `false` | no | | [ram\_disk\_id](#input\_ram\_disk\_id) | The ID of the ram disk | `string` | `null` | no | | [schedules](#input\_schedules) | Map of autoscaling group schedule to create | `map(any)` | `{}` | no | -| [security\_group\_description](#input\_security\_group\_description) | Description for the security group created | `string` | `"EKS self-managed node group security group"` | no | -| [security\_group\_name](#input\_security\_group\_name) | Name to use on security group created | `string` | `null` | no | -| [security\_group\_rules](#input\_security\_group\_rules) | List of security group rules to add to the security group created | `any` | `{}` | no | -| [security\_group\_tags](#input\_security\_group\_tags) | A map of additional tags to add to the security group created | `map(string)` | `{}` | no | -| [security\_group\_use\_name\_prefix](#input\_security\_group\_use\_name\_prefix) | Determines whether the security group name (`security_group_name`) is used as a prefix | `bool` | `true` | no | | [service\_linked\_role\_arn](#input\_service\_linked\_role\_arn) | The ARN of the service-linked role that the ASG will use to call other AWS services | `string` | `null` | no | | [subnet\_ids](#input\_subnet\_ids) | A list of subnet IDs to launch resources in. Subnets automatically determine which availability zones the group will reside. Conflicts with `availability_zones` | `list(string)` | `null` | no | | [suspended\_processes](#input\_suspended\_processes) | A list of processes to suspend for the Auto Scaling Group. The allowed values are `Launch`, `Terminate`, `HealthCheck`, `ReplaceUnhealthy`, `AZRebalance`, `AlarmNotification`, `ScheduledActions`, `AddToLoadBalancer`. Note that if you suspend either the `Launch` or `Terminate` process types, it can prevent your Auto Scaling Group from functioning properly | `list(string)` | `[]` | no | @@ -164,11 +165,9 @@ module "self_managed_node_group" { | [target\_group\_arns](#input\_target\_group\_arns) | A set of `aws_alb_target_group` ARNs, for use with Application or Network Load Balancing | `list(string)` | `[]` | no | | [termination\_policies](#input\_termination\_policies) | A list of policies to decide how the instances in the Auto Scaling Group should be terminated. The allowed values are `OldestInstance`, `NewestInstance`, `OldestLaunchConfiguration`, `ClosestToNextInstanceHour`, `OldestLaunchTemplate`, `AllocationStrategy`, `Default` | `list(string)` | `[]` | no | | [update\_launch\_template\_default\_version](#input\_update\_launch\_template\_default\_version) | Whether to update Default Version each update. Conflicts with `launch_template_default_version` | `bool` | `true` | no | -| [use\_default\_tags](#input\_use\_default\_tags) | Enables/disables the use of provider default tags in the tag\_specifications of the Auto Scaling group | `bool` | `false` | no | | [use\_mixed\_instances\_policy](#input\_use\_mixed\_instances\_policy) | Determines whether to use a mixed instances policy in the autoscaling group or not | `bool` | `false` | no | | [use\_name\_prefix](#input\_use\_name\_prefix) | Determines whether to use `name` as is or create a unique name beginning with the `name` as the prefix | `bool` | `true` | no | | [user\_data\_template\_path](#input\_user\_data\_template\_path) | Path to a local, custom user data template file to use when rendering user data | `string` | `""` | no | -| [vpc\_id](#input\_vpc\_id) | ID of the VPC where the security group/nodes will be provisioned | `string` | `null` | no | | [vpc\_security\_group\_ids](#input\_vpc\_security\_group\_ids) | A list of security group IDs to associate | `list(string)` | `[]` | no | | [wait\_for\_capacity\_timeout](#input\_wait\_for\_capacity\_timeout) | A maximum duration that Terraform should wait for ASG instances to be healthy before timing out. (See also Waiting for Capacity below.) Setting this to '0' causes Terraform to skip all Capacity Waiting behavior. | `string` | `null` | no | | [wait\_for\_elb\_capacity](#input\_wait\_for\_elb\_capacity) | Setting this will cause Terraform to wait for exactly this number of healthy instances in all attached load balancers on both create and update operations. Takes precedence over `min_elb_capacity` behavior. | `number` | `null` | no | @@ -202,7 +201,5 @@ module "self_managed_node_group" { | [launch\_template\_latest\_version](#output\_launch\_template\_latest\_version) | The latest version of the launch template | | [launch\_template\_name](#output\_launch\_template\_name) | The name of the launch template | | [platform](#output\_platform) | Identifies if the OS platform is `bottlerocket`, `linux`, or `windows` based | -| [security\_group\_arn](#output\_security\_group\_arn) | Amazon Resource Name (ARN) of the security group | -| [security\_group\_id](#output\_security\_group\_id) | ID of the security group | | [user\_data](#output\_user\_data) | Base64 encoded user data | diff --git a/modules/self-managed-node-group/main.tf b/modules/self-managed-node-group/main.tf index f74fb8189e..7632d24609 100644 --- a/modules/self-managed-node-group/main.tf +++ b/modules/self-managed-node-group/main.tf @@ -1,9 +1,6 @@ data "aws_partition" "current" {} - data "aws_caller_identity" "current" {} -data "aws_default_tags" "current" {} - data "aws_ami" "eks_default" { count = var.create ? 1 : 0 @@ -43,65 +40,51 @@ module "user_data" { ################################################################################ locals { - launch_template_name_int = coalesce(var.launch_template_name, "${var.name}-node-group") - - security_group_ids = compact(concat([try(aws_security_group.this[0].id, ""), var.cluster_primary_security_group_id], var.vpc_security_group_ids)) + launch_template_name = coalesce(var.launch_template_name, "${var.name}-node-group") + security_group_ids = compact(concat([var.cluster_primary_security_group_id], var.vpc_security_group_ids)) } resource "aws_launch_template" "this" { count = var.create && var.create_launch_template ? 1 : 0 - name = var.launch_template_use_name_prefix ? null : local.launch_template_name_int - name_prefix = var.launch_template_use_name_prefix ? "${local.launch_template_name_int}-" : null - description = var.launch_template_description - - ebs_optimized = var.ebs_optimized - image_id = coalesce(var.ami_id, data.aws_ami.eks_default[0].image_id) - instance_type = var.instance_type - key_name = var.key_name - user_data = module.user_data.user_data - - vpc_security_group_ids = length(var.network_interfaces) > 0 ? [] : local.security_group_ids - - default_version = var.launch_template_default_version - update_default_version = var.update_launch_template_default_version - disable_api_termination = var.disable_api_termination - instance_initiated_shutdown_behavior = var.instance_initiated_shutdown_behavior - kernel_id = var.kernel_id - ram_disk_id = var.ram_disk_id - dynamic "block_device_mappings" { for_each = var.block_device_mappings + content { - device_name = block_device_mappings.value.device_name - no_device = lookup(block_device_mappings.value, "no_device", null) - virtual_name = lookup(block_device_mappings.value, "virtual_name", null) + device_name = try(block_device_mappings.value.device_name, null) dynamic "ebs" { - for_each = flatten([lookup(block_device_mappings.value, "ebs", [])]) + for_each = try([block_device_mappings.value.ebs], []) + content { - delete_on_termination = lookup(ebs.value, "delete_on_termination", null) - encrypted = lookup(ebs.value, "encrypted", null) - kms_key_id = lookup(ebs.value, "kms_key_id", null) - iops = lookup(ebs.value, "iops", null) - throughput = lookup(ebs.value, "throughput", null) - snapshot_id = lookup(ebs.value, "snapshot_id", null) - volume_size = lookup(ebs.value, "volume_size", null) - volume_type = lookup(ebs.value, "volume_type", null) + delete_on_termination = try(ebs.value.delete_on_termination, null) + encrypted = try(ebs.value.encrypted, null) + iops = try(ebs.value.iops, null) + kms_key_id = try(ebs.value.kms_key_id, null) + snapshot_id = try(ebs.value.snapshot_id, null) + throughput = try(ebs.value.throughput, null) + volume_size = try(ebs.value.volume_size, null) + volume_type = try(ebs.value.volume_type, null) } } + + no_device = try(block_device_mappings.value.no_device, null) + virtual_name = try(block_device_mappings.value.virtual_name, null) } } dynamic "capacity_reservation_specification" { for_each = length(var.capacity_reservation_specification) > 0 ? [var.capacity_reservation_specification] : [] + content { - capacity_reservation_preference = lookup(capacity_reservation_specification.value, "capacity_reservation_preference", null) + capacity_reservation_preference = try(capacity_reservation_specification.value.capacity_reservation_preference, null) dynamic "capacity_reservation_target" { for_each = try([capacity_reservation_specification.value.capacity_reservation_target], []) + content { - capacity_reservation_id = lookup(capacity_reservation_target.value, "capacity_reservation_id", null) + capacity_reservation_id = try(capacity_reservation_target.value.capacity_reservation_id, null) + capacity_reservation_resource_group_arn = try(capacity_reservation_target.value.capacity_reservation_resource_group_arn, null) } } } @@ -109,21 +92,29 @@ resource "aws_launch_template" "this" { dynamic "cpu_options" { for_each = length(var.cpu_options) > 0 ? [var.cpu_options] : [] + content { - core_count = cpu_options.value.core_count - threads_per_core = cpu_options.value.threads_per_core + core_count = try(cpu_options.value.core_count, null) + threads_per_core = try(cpu_options.value.threads_per_core, null) } } dynamic "credit_specification" { for_each = length(var.credit_specification) > 0 ? [var.credit_specification] : [] + content { - cpu_credits = credit_specification.value.cpu_credits + cpu_credits = try(credit_specification.value.cpu_credits, null) } } + default_version = var.launch_template_default_version + description = var.launch_template_description + disable_api_termination = var.disable_api_termination + ebs_optimized = var.ebs_optimized + dynamic "elastic_gpu_specifications" { - for_each = length(var.elastic_gpu_specifications) > 0 ? [var.elastic_gpu_specifications] : [] + for_each = var.elastic_gpu_specifications + content { type = elastic_gpu_specifications.value.type } @@ -131,6 +122,7 @@ resource "aws_launch_template" "this" { dynamic "elastic_inference_accelerator" { for_each = length(var.elastic_inference_accelerator) > 0 ? [var.elastic_inference_accelerator] : [] + content { type = elastic_inference_accelerator.value.type } @@ -138,6 +130,7 @@ resource "aws_launch_template" "this" { dynamic "enclave_options" { for_each = length(var.enclave_options) > 0 ? [var.enclave_options] : [] + content { enabled = enclave_options.value.enabled } @@ -145,6 +138,7 @@ resource "aws_launch_template" "this" { dynamic "hibernation_options" { for_each = length(var.hibernation_options) > 0 ? [var.hibernation_options] : [] + content { configured = hibernation_options.value.configured } @@ -154,102 +148,245 @@ resource "aws_launch_template" "this" { arn = var.create_iam_instance_profile ? aws_iam_instance_profile.this[0].arn : var.iam_instance_profile_arn } + image_id = coalesce(var.ami_id, data.aws_ami.eks_default[0].image_id) + instance_initiated_shutdown_behavior = var.instance_initiated_shutdown_behavior + dynamic "instance_market_options" { for_each = length(var.instance_market_options) > 0 ? [var.instance_market_options] : [] + content { - market_type = instance_market_options.value.market_type + market_type = try(instance_market_options.value.market_type, null) dynamic "spot_options" { - for_each = length(lookup(instance_market_options.value, "spot_options", {})) > 0 ? [instance_market_options.value.spot_options] : [] + for_each = try([instance_market_options.value.spot_options], []) + + content { + block_duration_minutes = try(spot_options.value.block_duration_minutes, null) + instance_interruption_behavior = try(spot_options.value.instance_interruption_behavior, null) + max_price = try(spot_options.value.max_price, null) + spot_instance_type = try(spot_options.value.spot_instance_type, null) + valid_until = try(spot_options.value.valid_until, null) + } + } + } + } + + dynamic "instance_requirements" { + for_each = length(var.instance_requirements) > 0 ? [var.instance_requirements] : [] + + content { + + dynamic "accelerator_count" { + for_each = try([instance_requirements.value.accelerator_count], []) + + content { + max = try(accelerator_count.value.max, null) + min = try(accelerator_count.value.min, null) + } + } + + accelerator_manufacturers = try(instance_requirements.value.accelerator_manufacturers, []) + accelerator_names = try(instance_requirements.value.accelerator_names, []) + + dynamic "accelerator_total_memory_mib" { + for_each = try([instance_requirements.value.accelerator_total_memory_mib], []) + + content { + max = try(accelerator_total_memory_mib.value.max, null) + min = try(accelerator_total_memory_mib.value.min, null) + } + } + + accelerator_types = try(instance_requirements.value.accelerator_types, []) + bare_metal = try(instance_requirements.value.bare_metal, null) + + dynamic "baseline_ebs_bandwidth_mbps" { + for_each = try([instance_requirements.value.baseline_ebs_bandwidth_mbps], []) + + content { + max = try(baseline_ebs_bandwidth_mbps.value.max, null) + min = try(baseline_ebs_bandwidth_mbps.value.min, null) + } + } + + burstable_performance = try(instance_requirements.value.burstable_performance, null) + cpu_manufacturers = try(instance_requirements.value.cpu_manufacturers, []) + excluded_instance_types = try(instance_requirements.value.excluded_instance_types, []) + instance_generations = try(instance_requirements.value.instance_generations, []) + local_storage = try(instance_requirements.value.local_storage, null) + local_storage_types = try(instance_requirements.value.local_storage_types, []) + + dynamic "memory_gib_per_vcpu" { + for_each = try([instance_requirements.value.memory_gib_per_vcpu], []) + + content { + max = try(memory_gib_per_vcpu.value.max, null) + min = try(memory_gib_per_vcpu.value.min, null) + } + } + + dynamic "memory_mib" { + for_each = [instance_requirements.value.memory_mib] + + content { + max = try(memory_mib.value.max, null) + min = memory_mib.value.min + } + } + + dynamic "network_interface_count" { + for_each = try([instance_requirements.value.network_interface_count], []) + content { - block_duration_minutes = lookup(spot_options.value, "block_duration_minutes", null) - instance_interruption_behavior = lookup(spot_options.value, "instance_interruption_behavior", null) - max_price = lookup(spot_options.value, "max_price", null) - spot_instance_type = lookup(spot_options.value, "spot_instance_type", null) - valid_until = lookup(spot_options.value, "valid_until", null) + max = try(network_interface_count.value.max, null) + min = try(network_interface_count.value.min, null) + } + } + + on_demand_max_price_percentage_over_lowest_price = try(instance_requirements.value.on_demand_max_price_percentage_over_lowest_price, null) + require_hibernate_support = try(instance_requirements.value.require_hibernate_support, null) + spot_max_price_percentage_over_lowest_price = try(instance_requirements.value.spot_max_price_percentage_over_lowest_price, null) + + dynamic "total_local_storage_gb" { + for_each = try([instance_requirements.value.total_local_storage_gb], []) + + content { + max = try(total_local_storage_gb.value.max, null) + min = try(total_local_storage_gb.value.min, null) + } + } + + dynamic "vcpu_count" { + for_each = [instance_requirements.value.vcpu_count] + + content { + max = try(vcpu_count.value.max, null) + min = vcpu_count.value.min } } } } + instance_type = var.instance_type + kernel_id = var.kernel_id + key_name = var.key_name + dynamic "license_specification" { - for_each = length(var.license_specifications) > 0 ? [var.license_specifications] : [] + for_each = length(var.license_specifications) > 0 ? var.license_specifications : {} + content { license_configuration_arn = license_specifications.value.license_configuration_arn } } + dynamic "maintenance_options" { + for_each = length(var.maintenance_options) > 0 ? [var.maintenance_options] : [] + + content { + auto_recovery = try(maintenance_options.value.auto_recovery, null) + } + } + dynamic "metadata_options" { for_each = length(var.metadata_options) > 0 ? [var.metadata_options] : [] + content { - http_endpoint = lookup(metadata_options.value, "http_endpoint", null) - http_tokens = lookup(metadata_options.value, "http_tokens", null) - http_put_response_hop_limit = lookup(metadata_options.value, "http_put_response_hop_limit", null) - http_protocol_ipv6 = lookup(metadata_options.value, "http_protocol_ipv6", null) - instance_metadata_tags = lookup(metadata_options.value, "instance_metadata_tags", null) + http_endpoint = try(metadata_options.value.http_endpoint, null) + http_protocol_ipv6 = try(metadata_options.value.http_protocol_ipv6, null) + http_put_response_hop_limit = try(metadata_options.value.http_put_response_hop_limit, null) + http_tokens = try(metadata_options.value.http_tokens, null) + instance_metadata_tags = try(metadata_options.value.instance_metadata_tags, null) } } dynamic "monitoring" { - for_each = var.enable_monitoring != null ? [1] : [] + for_each = var.enable_monitoring ? [1] : [] + content { enabled = var.enable_monitoring } } + name = var.launch_template_use_name_prefix ? null : local.launch_template_name + name_prefix = var.launch_template_use_name_prefix ? "${local.launch_template_name}-" : null + dynamic "network_interfaces" { for_each = var.network_interfaces content { - associate_carrier_ip_address = lookup(network_interfaces.value, "associate_carrier_ip_address", null) - associate_public_ip_address = lookup(network_interfaces.value, "associate_public_ip_address", null) - delete_on_termination = lookup(network_interfaces.value, "delete_on_termination", null) - description = lookup(network_interfaces.value, "description", null) - device_index = lookup(network_interfaces.value, "device_index", null) - interface_type = lookup(network_interfaces.value, "interface_type", null) + associate_carrier_ip_address = try(network_interfaces.value.associate_carrier_ip_address, null) + associate_public_ip_address = try(network_interfaces.value.associate_public_ip_address, null) + delete_on_termination = try(network_interfaces.value.delete_on_termination, null) + description = try(network_interfaces.value.description, null) + device_index = try(network_interfaces.value.device_index, null) + interface_type = try(network_interfaces.value.interface_type, null) + ipv4_address_count = try(network_interfaces.value.ipv4_address_count, null) ipv4_addresses = try(network_interfaces.value.ipv4_addresses, []) - ipv4_address_count = lookup(network_interfaces.value, "ipv4_address_count", null) + ipv4_prefix_count = try(network_interfaces.value.ipv4_prefix_count, null) + ipv4_prefixes = try(network_interfaces.value.ipv4_prefixes, null) + ipv6_address_count = try(network_interfaces.value.ipv6_address_count, null) ipv6_addresses = try(network_interfaces.value.ipv6_addresses, []) - ipv6_address_count = lookup(network_interfaces.value, "ipv6_address_count", null) - network_interface_id = lookup(network_interfaces.value, "network_interface_id", null) - private_ip_address = lookup(network_interfaces.value, "private_ip_address", null) - security_groups = compact(concat(try(network_interfaces.value.security_groups, []), local.security_group_ids)) - subnet_id = lookup(network_interfaces.value, "subnet_id", null) + ipv6_prefix_count = try(network_interfaces.value.ipv6_prefix_count, null) + ipv6_prefixes = try(network_interfaces.value.ipv6_prefixes, []) + network_card_index = try(network_interfaces.value.network_card_index, null) + network_interface_id = try(network_interfaces.value.network_interface_id, null) + private_ip_address = try(network_interfaces.value.private_ip_address, null) + # Ref: https://github.com/hashicorp/terraform-provider-aws/issues/4570 + security_groups = compact(concat(try(network_interfaces.value.security_groups, []), local.security_group_ids)) + subnet_id = try(network_interfaces.value.subnet_id, null) } } dynamic "placement" { for_each = length(var.placement) > 0 ? [var.placement] : [] + + content { + affinity = try(placement.value.affinity, null) + availability_zone = try(placement.value.availability_zone, null) + group_name = try(placement.value.group_name, null) + host_id = try(placement.value.host_id, null) + host_resource_group_arn = try(placement.value.host_resource_group_arn, null) + partition_number = try(placement.value.partition_number, null) + spread_domain = try(placement.value.spread_domain, null) + tenancy = try(placement.value.tenancy, null) + } + } + + dynamic "private_dns_name_options" { + for_each = length(var.private_dns_name_options) > 0 ? [var.private_dns_name_options] : [] + content { - affinity = lookup(placement.value, "affinity", null) - availability_zone = lookup(placement.value, "availability_zone", null) - group_name = lookup(placement.value, "group_name", null) - host_id = lookup(placement.value, "host_id", null) - spread_domain = lookup(placement.value, "spread_domain", null) - tenancy = lookup(placement.value, "tenancy", null) - partition_number = lookup(placement.value, "partition_number", null) + enable_resource_name_dns_aaaa_record = try(private_dns_name_options.value.enable_resource_name_dns_aaaa_record, null) + enable_resource_name_dns_a_record = try(private_dns_name_options.value.enable_resource_name_dns_a_record, null) + hostname_type = try(private_dns_name_options.value.hostname_type, null) } } + ram_disk_id = var.ram_disk_id + dynamic "tag_specifications" { for_each = toset(["instance", "volume", "network-interface"]) + content { resource_type = tag_specifications.key tags = merge(var.tags, { Name = var.name }, var.launch_template_tags) } } - lifecycle { - create_before_destroy = true - } + update_default_version = var.update_launch_template_default_version + user_data = module.user_data.user_data + vpc_security_group_ids = length(var.network_interfaces) > 0 ? [] : local.security_group_ids + + tags = var.tags - # Prevent premature access of security group roles and policies by pods that + # Prevent premature access of policies by pods that # require permissions on create/destroy that depend on nodes depends_on = [ - aws_security_group_rule.this, aws_iam_role_policy_attachment.this, ] - tags = var.tags + lifecycle { + create_before_destroy = true + } } ################################################################################ @@ -257,7 +394,7 @@ resource "aws_launch_template" "this" { ################################################################################ locals { - launch_template_name = try(aws_launch_template.this[0].name, var.launch_template_name) + launch_template_id = var.create && var.create_launch_template ? aws_launch_template.this[0].id : var.launch_template_id # Change order to allow users to set version priority before using defaults launch_template_version = coalesce(var.launch_template_version, try(aws_launch_template.this[0].default_version, "$Default")) } @@ -265,123 +402,215 @@ locals { resource "aws_autoscaling_group" "this" { count = var.create && var.create_autoscaling_group ? 1 : 0 - name = var.use_name_prefix ? null : var.name - name_prefix = var.use_name_prefix ? "${var.name}-" : null - - dynamic "launch_template" { - for_each = var.use_mixed_instances_policy ? [] : [1] - - content { - name = local.launch_template_name - version = local.launch_template_version - } - } - - availability_zones = var.availability_zones - vpc_zone_identifier = var.subnet_ids - - min_size = var.min_size - max_size = var.max_size - desired_capacity = var.desired_size + availability_zones = var.availability_zones capacity_rebalance = var.capacity_rebalance - min_elb_capacity = var.min_elb_capacity - wait_for_elb_capacity = var.wait_for_elb_capacity - wait_for_capacity_timeout = var.wait_for_capacity_timeout + context = var.context default_cooldown = var.default_cooldown - protect_from_scale_in = var.protect_from_scale_in - - target_group_arns = var.target_group_arns - placement_group = var.placement_group - health_check_type = var.health_check_type + default_instance_warmup = var.default_instance_warmup + desired_capacity = var.desired_size + enabled_metrics = var.enabled_metrics + force_delete = var.force_delete + force_delete_warm_pool = var.force_delete_warm_pool health_check_grace_period = var.health_check_grace_period - - force_delete = var.force_delete - termination_policies = var.termination_policies - suspended_processes = var.suspended_processes - max_instance_lifetime = var.max_instance_lifetime - - enabled_metrics = var.enabled_metrics - metrics_granularity = var.metrics_granularity - service_linked_role_arn = var.service_linked_role_arn + health_check_type = var.health_check_type dynamic "initial_lifecycle_hook" { for_each = var.initial_lifecycle_hooks + content { - name = initial_lifecycle_hook.value.name - default_result = lookup(initial_lifecycle_hook.value, "default_result", null) - heartbeat_timeout = lookup(initial_lifecycle_hook.value, "heartbeat_timeout", null) + default_result = try(initial_lifecycle_hook.value.default_result, null) + heartbeat_timeout = try(initial_lifecycle_hook.value.heartbeat_timeout, null) lifecycle_transition = initial_lifecycle_hook.value.lifecycle_transition - notification_metadata = lookup(initial_lifecycle_hook.value, "notification_metadata", null) - notification_target_arn = lookup(initial_lifecycle_hook.value, "notification_target_arn", null) - role_arn = lookup(initial_lifecycle_hook.value, "role_arn", null) + name = initial_lifecycle_hook.value.name + notification_metadata = try(initial_lifecycle_hook.value.notification_metadata, null) + notification_target_arn = try(initial_lifecycle_hook.value.notification_target_arn, null) + role_arn = try(initial_lifecycle_hook.value.role_arn, null) } } dynamic "instance_refresh" { for_each = length(var.instance_refresh) > 0 ? [var.instance_refresh] : [] - content { - strategy = instance_refresh.value.strategy - triggers = lookup(instance_refresh.value, "triggers", null) + content { dynamic "preferences" { - for_each = length(lookup(instance_refresh.value, "preferences", {})) > 0 ? [instance_refresh.value.preferences] : [] + for_each = try([instance_refresh.value.preferences], []) + content { - instance_warmup = lookup(preferences.value, "instance_warmup", null) - min_healthy_percentage = lookup(preferences.value, "min_healthy_percentage", null) - checkpoint_delay = lookup(preferences.value, "checkpoint_delay", null) - checkpoint_percentages = lookup(preferences.value, "checkpoint_percentages", null) + checkpoint_delay = try(preferences.value.checkpoint_delay, null) + checkpoint_percentages = try(preferences.value.checkpoint_percentages, null) + instance_warmup = try(preferences.value.instance_warmup, null) + min_healthy_percentage = try(preferences.value.min_healthy_percentage, null) + skip_matching = try(preferences.value.skip_matching, null) } } + + strategy = instance_refresh.value.strategy + triggers = try(instance_refresh.value.triggers, null) + } + } + + dynamic "launch_template" { + for_each = var.use_mixed_instances_policy ? [] : [1] + + content { + id = local.launch_template_id + version = local.launch_template_version } } + max_instance_lifetime = var.max_instance_lifetime + max_size = var.max_size + metrics_granularity = var.metrics_granularity + min_elb_capacity = var.min_elb_capacity + min_size = var.min_size + dynamic "mixed_instances_policy" { for_each = var.use_mixed_instances_policy ? [var.mixed_instances_policy] : [] + content { dynamic "instances_distribution" { for_each = try([mixed_instances_policy.value.instances_distribution], []) + content { - on_demand_allocation_strategy = lookup(instances_distribution.value, "on_demand_allocation_strategy", null) - on_demand_base_capacity = lookup(instances_distribution.value, "on_demand_base_capacity", null) - on_demand_percentage_above_base_capacity = lookup(instances_distribution.value, "on_demand_percentage_above_base_capacity", null) - spot_allocation_strategy = lookup(instances_distribution.value, "spot_allocation_strategy", null) - spot_instance_pools = lookup(instances_distribution.value, "spot_instance_pools", null) - spot_max_price = lookup(instances_distribution.value, "spot_max_price", null) + on_demand_allocation_strategy = try(instances_distribution.value.on_demand_allocation_strategy, null) + on_demand_base_capacity = try(instances_distribution.value.on_demand_base_capacity, null) + on_demand_percentage_above_base_capacity = try(instances_distribution.value.on_demand_percentage_above_base_capacity, null) + spot_allocation_strategy = try(instances_distribution.value.spot_allocation_strategy, null) + spot_instance_pools = try(instances_distribution.value.spot_instance_pools, null) + spot_max_price = try(instances_distribution.value.spot_max_price, null) } } launch_template { launch_template_specification { - launch_template_name = local.launch_template_name - version = local.launch_template_version + launch_template_id = local.launch_template_id + version = local.launch_template_version } dynamic "override" { for_each = try(mixed_instances_policy.value.override, []) + content { - instance_type = lookup(override.value, "instance_type", null) - weighted_capacity = lookup(override.value, "weighted_capacity", null) + dynamic "instance_requirements" { + for_each = try([override.value.instance_requirements], []) + + content { + + dynamic "accelerator_count" { + for_each = try([instance_requirements.value.accelerator_count], []) + + content { + max = try(accelerator_count.value.max, null) + min = try(accelerator_count.value.min, null) + } + } + + accelerator_manufacturers = try(instance_requirements.value.accelerator_manufacturers, []) + accelerator_names = try(instance_requirements.value.accelerator_names, []) + + dynamic "accelerator_total_memory_mib" { + for_each = try([instance_requirements.value.accelerator_total_memory_mib], []) + + content { + max = try(accelerator_total_memory_mib.value.max, null) + min = try(accelerator_total_memory_mib.value.min, null) + } + } + + accelerator_types = try(instance_requirements.value.accelerator_types, []) + bare_metal = try(instance_requirements.value.bare_metal, null) + + dynamic "baseline_ebs_bandwidth_mbps" { + for_each = try([instance_requirements.value.baseline_ebs_bandwidth_mbps], []) + + content { + max = try(baseline_ebs_bandwidth_mbps.value.max, null) + min = try(baseline_ebs_bandwidth_mbps.value.min, null) + } + } + + burstable_performance = try(instance_requirements.value.burstable_performance, null) + cpu_manufacturers = try(instance_requirements.value.cpu_manufacturers, []) + excluded_instance_types = try(instance_requirements.value.excluded_instance_types, []) + instance_generations = try(instance_requirements.value.instance_generations, []) + local_storage = try(instance_requirements.value.local_storage, null) + local_storage_types = try(instance_requirements.value.local_storage_types, []) + + dynamic "memory_gib_per_vcpu" { + for_each = try([instance_requirements.value.memory_gib_per_vcpu], []) + + content { + max = try(memory_gib_per_vcpu.value.max, null) + min = try(memory_gib_per_vcpu.value.min, null) + } + } + + dynamic "memory_mib" { + for_each = [instance_requirements.value.memory_mib] + + content { + max = try(memory_mib.value.max, null) + min = memory_mib.value.min + } + } + + dynamic "network_interface_count" { + for_each = try([instance_requirements.value.network_interface_count], []) + + content { + max = try(network_interface_count.value.max, null) + min = try(network_interface_count.value.min, null) + } + } + + on_demand_max_price_percentage_over_lowest_price = try(instance_requirements.value.on_demand_max_price_percentage_over_lowest_price, null) + require_hibernate_support = try(instance_requirements.value.require_hibernate_support, null) + spot_max_price_percentage_over_lowest_price = try(instance_requirements.value.spot_max_price_percentage_over_lowest_price, null) + + dynamic "total_local_storage_gb" { + for_each = try([instance_requirements.value.total_local_storage_gb], []) + + content { + max = try(total_local_storage_gb.value.max, null) + min = try(total_local_storage_gb.value.min, null) + } + } + + dynamic "vcpu_count" { + for_each = [instance_requirements.value.vcpu_count] + + content { + max = try(vcpu_count.value.max, null) + min = vcpu_count.value.min + } + } + } + } + + instance_type = try(override.value.instance_type, null) dynamic "launch_template_specification" { - for_each = length(lookup(override.value, "launch_template_specification", {})) > 0 ? override.value.launch_template_specification : [] + for_each = try([override.value.launch_template_specification], []) + content { - launch_template_id = lookup(launch_template_specification.value, "launch_template_id", null) + launch_template_id = try(launch_template_specification.value.launch_template_id, null) + version = try(launch_template_specification.value.version, null) } } + + weighted_capacity = try(override.value.weighted_capacity, null) } } } } } - dynamic "warm_pool" { - for_each = length(var.warm_pool) > 0 ? [var.warm_pool] : [] - content { - pool_state = lookup(warm_pool.value, "pool_state", null) - min_size = lookup(warm_pool.value, "min_size", null) - max_group_prepared_capacity = lookup(warm_pool.value, "max_group_prepared_capacity", null) - } - } + name = var.use_name_prefix ? null : var.name + name_prefix = var.use_name_prefix ? "${var.name}-" : null + placement_group = var.placement_group + protect_from_scale_in = var.protect_from_scale_in + service_linked_role_arn = var.service_linked_role_arn + suspended_processes = var.suspended_processes dynamic "tag" { for_each = merge( @@ -390,7 +619,7 @@ resource "aws_autoscaling_group" "this" { "kubernetes.io/cluster/${var.cluster_name}" = "owned" "k8s.io/cluster/${var.cluster_name}" = "owned" }, - var.use_default_tags ? merge(data.aws_default_tags.current.tags, var.tags) : var.tags + var.tags ) content { @@ -410,6 +639,30 @@ resource "aws_autoscaling_group" "this" { } } + target_group_arns = var.target_group_arns + termination_policies = var.termination_policies + vpc_zone_identifier = var.subnet_ids + wait_for_capacity_timeout = var.wait_for_capacity_timeout + wait_for_elb_capacity = var.wait_for_elb_capacity + + dynamic "warm_pool" { + for_each = length(var.warm_pool) > 0 ? [var.warm_pool] : [] + + content { + dynamic "instance_reuse_policy" { + for_each = try([warm_pool.value.instance_reuse_policy], []) + + content { + reuse_on_scale_in = try(instance_reuse_policy.value.reuse_on_scale_in, null) + } + } + + max_group_prepared_capacity = try(warm_pool.value.max_group_prepared_capacity, null) + min_size = try(warm_pool.value.min_size, null) + pool_state = try(warm_pool.value.pool_state, null) + } + } + timeouts { delete = var.delete_timeout } @@ -432,70 +685,16 @@ resource "aws_autoscaling_schedule" "this" { scheduled_action_name = each.key autoscaling_group_name = aws_autoscaling_group.this[0].name - min_size = lookup(each.value, "min_size", null) - max_size = lookup(each.value, "max_size", null) - desired_capacity = lookup(each.value, "desired_size", null) - start_time = lookup(each.value, "start_time", null) - end_time = lookup(each.value, "end_time", null) - time_zone = lookup(each.value, "time_zone", null) + min_size = try(each.value.min_size, null) + max_size = try(each.value.max_size, null) + desired_capacity = try(each.value.desired_size, null) + start_time = try(each.value.start_time, null) + end_time = try(each.value.end_time, null) + time_zone = try(each.value.time_zone, null) # [Minute] [Hour] [Day_of_Month] [Month_of_Year] [Day_of_Week] # Cron examples: https://crontab.guru/examples.html - recurrence = lookup(each.value, "recurrence", null) -} - -################################################################################ -# Security Group -################################################################################ - -locals { - security_group_name = coalesce(var.security_group_name, "${var.name}-node-group") - create_security_group = var.create && var.create_security_group -} - -resource "aws_security_group" "this" { - count = local.create_security_group ? 1 : 0 - - name = var.security_group_use_name_prefix ? null : local.security_group_name - name_prefix = var.security_group_use_name_prefix ? "${local.security_group_name}-" : null - description = var.security_group_description - vpc_id = var.vpc_id - - tags = merge( - var.tags, - { - "Name" = local.security_group_name - }, - var.security_group_tags - ) - - # https://github.com/hashicorp/terraform-provider-aws/issues/2445 - # https://github.com/hashicorp/terraform-provider-aws/issues/9692 - lifecycle { - create_before_destroy = true - } -} - -resource "aws_security_group_rule" "this" { - for_each = { for k, v in var.security_group_rules : k => v if local.create_security_group } - - # Required - security_group_id = aws_security_group.this[0].id - protocol = each.value.protocol - from_port = each.value.from_port - to_port = each.value.to_port - type = each.value.type - - # Optional - description = try(each.value.description, null) - cidr_blocks = try(each.value.cidr_blocks, null) - ipv6_cidr_blocks = try(each.value.ipv6_cidr_blocks, null) - prefix_list_ids = try(each.value.prefix_list_ids, []) - self = try(each.value.self, null) - source_security_group_id = try( - each.value.source_security_group_id, - try(each.value.source_cluster_security_group, false) ? var.cluster_security_group_id : null - ) + recurrence = try(each.value.recurrence, null) } ################################################################################ @@ -503,11 +702,9 @@ resource "aws_security_group_rule" "this" { ################################################################################ locals { - iam_role_name = coalesce(var.iam_role_name, "${var.name}-node-group") - + iam_role_name = coalesce(var.iam_role_name, "${var.name}-node-group") iam_role_policy_prefix = "arn:${data.aws_partition.current.partition}:iam::aws:policy" - - cni_policy = var.cluster_ip_family == "ipv6" ? "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:policy/AmazonEKS_CNI_IPv6_Policy" : "${local.iam_role_policy_prefix}/AmazonEKS_CNI_Policy" + cni_policy = var.cluster_ip_family == "ipv6" ? "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:policy/AmazonEKS_CNI_IPv6_Policy" : "${local.iam_role_policy_prefix}/AmazonEKS_CNI_Policy" } data "aws_iam_policy_document" "assume_role_policy" { @@ -540,17 +737,23 @@ resource "aws_iam_role" "this" { } resource "aws_iam_role_policy_attachment" "this" { - for_each = var.create && var.create_iam_instance_profile ? toset(compact(distinct(concat([ + for_each = { for k, v in toset(compact([ "${local.iam_role_policy_prefix}/AmazonEKSWorkerNodePolicy", "${local.iam_role_policy_prefix}/AmazonEC2ContainerRegistryReadOnly", var.iam_role_attach_cni_policy ? local.cni_policy : "", - ], var.iam_role_additional_policies)))) : toset([]) + ])) : k => v if var.create && var.create_iam_instance_profile } + + policy_arn = each.value + role = aws_iam_role.this[0].name +} + +resource "aws_iam_role_policy_attachment" "additional" { + for_each = { for k, v in var.iam_role_additional_policies : k => v if var.create && var.create_iam_instance_profile } policy_arn = each.value role = aws_iam_role.this[0].name } -# Only self-managed node group requires instance profile resource "aws_iam_instance_profile" "this" { count = var.create && var.create_iam_instance_profile ? 1 : 0 @@ -560,9 +763,9 @@ resource "aws_iam_instance_profile" "this" { name_prefix = var.iam_role_use_name_prefix ? "${local.iam_role_name}-" : null path = var.iam_role_path + tags = merge(var.tags, var.iam_role_tags) + lifecycle { create_before_destroy = true } - - tags = merge(var.tags, var.iam_role_tags) } diff --git a/modules/self-managed-node-group/outputs.tf b/modules/self-managed-node-group/outputs.tf index e9f52db14f..c9c9b75254 100644 --- a/modules/self-managed-node-group/outputs.tf +++ b/modules/self-managed-node-group/outputs.tf @@ -4,22 +4,22 @@ output "launch_template_id" { description = "The ID of the launch template" - value = try(aws_launch_template.this[0].id, "") + value = try(aws_launch_template.this[0].id, null) } output "launch_template_arn" { description = "The ARN of the launch template" - value = try(aws_launch_template.this[0].arn, "") + value = try(aws_launch_template.this[0].arn, null) } output "launch_template_latest_version" { description = "The latest version of the launch template" - value = try(aws_launch_template.this[0].latest_version, "") + value = try(aws_launch_template.this[0].latest_version, null) } output "launch_template_name" { description = "The name of the launch template" - value = try(aws_launch_template.this[0].name, "") + value = try(aws_launch_template.this[0].name, null) } ################################################################################ @@ -28,57 +28,57 @@ output "launch_template_name" { output "autoscaling_group_arn" { description = "The ARN for this autoscaling group" - value = try(aws_autoscaling_group.this[0].arn, "") + value = try(aws_autoscaling_group.this[0].arn, null) } output "autoscaling_group_id" { description = "The autoscaling group id" - value = try(aws_autoscaling_group.this[0].id, "") + value = try(aws_autoscaling_group.this[0].id, null) } output "autoscaling_group_name" { description = "The autoscaling group name" - value = try(aws_autoscaling_group.this[0].name, "") + value = try(aws_autoscaling_group.this[0].name, null) } output "autoscaling_group_min_size" { description = "The minimum size of the autoscaling group" - value = try(aws_autoscaling_group.this[0].min_size, "") + value = try(aws_autoscaling_group.this[0].min_size, null) } output "autoscaling_group_max_size" { description = "The maximum size of the autoscaling group" - value = try(aws_autoscaling_group.this[0].max_size, "") + value = try(aws_autoscaling_group.this[0].max_size, null) } output "autoscaling_group_desired_capacity" { description = "The number of Amazon EC2 instances that should be running in the group" - value = try(aws_autoscaling_group.this[0].desired_capacity, "") + value = try(aws_autoscaling_group.this[0].desired_capacity, null) } output "autoscaling_group_default_cooldown" { description = "Time between a scaling activity and the succeeding scaling activity" - value = try(aws_autoscaling_group.this[0].default_cooldown, "") + value = try(aws_autoscaling_group.this[0].default_cooldown, null) } output "autoscaling_group_health_check_grace_period" { description = "Time after instance comes into service before checking health" - value = try(aws_autoscaling_group.this[0].health_check_grace_period, "") + value = try(aws_autoscaling_group.this[0].health_check_grace_period, null) } output "autoscaling_group_health_check_type" { description = "EC2 or ELB. Controls how health checking is done" - value = try(aws_autoscaling_group.this[0].health_check_type, "") + value = try(aws_autoscaling_group.this[0].health_check_type, null) } output "autoscaling_group_availability_zones" { description = "The availability zones of the autoscaling group" - value = try(aws_autoscaling_group.this[0].availability_zones, "") + value = try(aws_autoscaling_group.this[0].availability_zones, null) } output "autoscaling_group_vpc_zone_identifier" { description = "The VPC zone identifier" - value = try(aws_autoscaling_group.this[0].vpc_zone_identifier, "") + value = try(aws_autoscaling_group.this[0].vpc_zone_identifier, null) } ################################################################################ @@ -90,37 +90,23 @@ output "autoscaling_group_schedule_arns" { value = { for k, v in aws_autoscaling_schedule.this : k => v.arn } } -################################################################################ -# Security Group -################################################################################ - -output "security_group_arn" { - description = "Amazon Resource Name (ARN) of the security group" - value = try(aws_security_group.this[0].arn, "") -} - -output "security_group_id" { - description = "ID of the security group" - value = try(aws_security_group.this[0].id, "") -} - ################################################################################ # IAM Role ################################################################################ output "iam_role_name" { description = "The name of the IAM role" - value = try(aws_iam_role.this[0].name, "") + value = try(aws_iam_role.this[0].name, null) } output "iam_role_arn" { description = "The Amazon Resource Name (ARN) specifying the IAM role" - value = try(aws_iam_role.this[0].arn, "") + value = try(aws_iam_role.this[0].arn, null) } output "iam_role_unique_id" { description = "Stable and unique string identifying the IAM role" - value = try(aws_iam_role.this[0].unique_id, "") + value = try(aws_iam_role.this[0].unique_id, null) } ################################################################################ @@ -134,12 +120,12 @@ output "iam_instance_profile_arn" { output "iam_instance_profile_id" { description = "Instance profile's ID" - value = try(aws_iam_instance_profile.this[0].id, "") + value = try(aws_iam_instance_profile.this[0].id, null) } output "iam_instance_profile_unique" { description = "Stable and unique string identifying the IAM instance profile" - value = try(aws_iam_instance_profile.this[0].unique_id, "") + value = try(aws_iam_instance_profile.this[0].unique_id, null) } ################################################################################ @@ -153,10 +139,10 @@ output "platform" { output "image_id" { description = "ID of the image" - value = try(aws_launch_template.this[0].image_id, "") + value = try(aws_launch_template.this[0].image_id, null) } output "user_data" { description = "Base64 encoded user data" - value = try(module.user_data.user_data, "") + value = try(module.user_data.user_data, null) } diff --git a/modules/self-managed-node-group/variables.tf b/modules/self-managed-node-group/variables.tf index 3734d9068c..c2850b659e 100644 --- a/modules/self-managed-node-group/variables.tf +++ b/modules/self-managed-node-group/variables.tf @@ -72,8 +72,14 @@ variable "create_launch_template" { default = true } +variable "launch_template_id" { + description = "The ID of an existing launch template to use. Required when `create_launch_template` = `false`" + type = string + default = "" +} + variable "launch_template_name" { - description = "Launch template name - either to be created (`var.create_launch_template` = `true`) or existing (`var.create_launch_template` = `false`)" + description = "Name of launch template to be created" type = string default = null } @@ -152,7 +158,7 @@ variable "credit_specification" { variable "elastic_gpu_specifications" { description = "The elastic GPU to attach to the instance" - type = map(string) + type = any default = {} } @@ -180,9 +186,15 @@ variable "instance_market_options" { default = {} } +variable "maintenance_options" { + description = "The maintenance options for the instance" + type = any + default = {} +} + variable "license_specifications" { - description = "A list of license specifications to associate with" - type = map(string) + description = "A map of license specifications to associate with" + type = any default = {} } @@ -198,6 +210,12 @@ variable "placement" { default = {} } +variable "private_dns_name_options" { + description = "The options for the instance hostname. The default values are inherited from the subnet" + type = map(string) + default = {} +} + variable "ebs_optimized" { description = "If true, the launched EC2 instance will be EBS-optimized" type = bool @@ -216,6 +234,12 @@ variable "cluster_version" { default = null } +variable "instance_requirements" { + description = "The attribute requirements for the type of instance. If present then `instance_type` cannot be present" + type = any + default = {} +} + variable "instance_type" { description = "The type of the instance to launch" type = string @@ -320,6 +344,12 @@ variable "desired_size" { default = 1 } +variable "context" { + description = "Reserved" + type = string + default = null +} + variable "capacity_rebalance" { description = "Indicates whether capacity rebalance is enabled" type = bool @@ -350,6 +380,12 @@ variable "default_cooldown" { default = null } +variable "default_instance_warmup" { + description = "Amount of time, in seconds, until a newly launched instance can contribute to the Amazon CloudWatch metrics. This delay lets an instance finish initializing before Amazon EC2 Auto Scaling aggregates instance metrics, resulting in more reliable usage data" + type = number + default = null +} + variable "protect_from_scale_in" { description = "Allows setting instance protection. The autoscaling group will not select instances with this setting for termination during scale in events." type = bool @@ -386,6 +422,12 @@ variable "force_delete" { default = null } +variable "force_delete_warm_pool" { + description = "Allows deleting the Auto Scaling Group without waiting for all instances in the warm pool to terminate" + type = bool + default = null +} + variable "termination_policies" { description = "A list of policies to decide how the instances in the Auto Scaling Group should be terminated. The allowed values are `OldestInstance`, `NewestInstance`, `OldestLaunchConfiguration`, `ClosestToNextInstanceHour`, `OldestLaunchTemplate`, `AllocationStrategy`, `Default`" type = list(string) @@ -431,7 +473,12 @@ variable "initial_lifecycle_hooks" { variable "instance_refresh" { description = "If this block is configured, start an Instance Refresh when this Auto Scaling Group is updated" type = any - default = {} + default = { + strategy = "Rolling" + preferences = { + min_healthy_percentage = 66 + } + } } variable "use_mixed_instances_policy" { @@ -458,12 +505,6 @@ variable "delete_timeout" { default = null } -variable "use_default_tags" { - description = "Enables/disables the use of provider default tags in the tag_specifications of the Auto Scaling group" - type = bool - default = false -} - variable "autoscaling_group_tags" { description = "A map of additional tags to add to the autoscaling group created. Tags are applied to the autoscaling group only and are NOT propagated to instances" type = map(string) @@ -486,58 +527,6 @@ variable "schedules" { default = {} } -################################################################################ -# Security Group -################################################################################ - -variable "create_security_group" { - description = "Determines whether to create a security group" - type = bool - default = true -} - -variable "security_group_name" { - description = "Name to use on security group created" - type = string - default = null -} - -variable "security_group_use_name_prefix" { - description = "Determines whether the security group name (`security_group_name`) is used as a prefix" - type = bool - default = true -} - -variable "security_group_description" { - description = "Description for the security group created" - type = string - default = "EKS self-managed node group security group" -} - -variable "vpc_id" { - description = "ID of the VPC where the security group/nodes will be provisioned" - type = string - default = null -} - -variable "security_group_rules" { - description = "List of security group rules to add to the security group created" - type = any - default = {} -} - -variable "cluster_security_group_id" { - description = "Cluster control plane security group ID" - type = string - default = null -} - -variable "security_group_tags" { - description = "A map of additional tags to add to the security group created" - type = map(string) - default = {} -} - ################################################################################ # IAM Role ################################################################################ @@ -598,8 +587,8 @@ variable "iam_role_attach_cni_policy" { variable "iam_role_additional_policies" { description = "Additional policies to be added to the IAM role" - type = list(string) - default = [] + type = map(string) + default = {} } variable "iam_role_tags" { diff --git a/modules/self-managed-node-group/versions.tf b/modules/self-managed-node-group/versions.tf index 22e8d7265f..325eee94e1 100644 --- a/modules/self-managed-node-group/versions.tf +++ b/modules/self-managed-node-group/versions.tf @@ -1,10 +1,10 @@ terraform { - required_version = ">= 0.13.1" + required_version = ">= 1.0" required_providers { aws = { source = "hashicorp/aws" - version = ">= 3.72" + version = ">= 4.45" } } } diff --git a/node_groups.tf b/node_groups.tf index 402191f162..36f071610c 100644 --- a/node_groups.tf +++ b/node_groups.tf @@ -4,6 +4,19 @@ locals { http_tokens = "required" http_put_response_hop_limit = 2 } + + # EKS managed node group + default_update_config = { + max_unavailable_percentage = 33 + } + + # Self-managed node group + default_instance_refresh = { + strategy = "Rolling" + preferences = { + min_healthy_percentage = 66 + } + } } ################################################################################ @@ -59,14 +72,6 @@ locals { node_security_group_id = local.create_node_sg ? aws_security_group.node[0].id : var.node_security_group_id node_security_group_rules = { - egress_cluster_443 = { - description = "Node groups to cluster API" - protocol = "tcp" - from_port = 443 - to_port = 443 - type = "egress" - source_cluster_security_group = true - } ingress_cluster_443 = { description = "Cluster API to node groups" protocol = "tcp" @@ -91,58 +96,51 @@ locals { type = "ingress" self = true } - egress_self_coredns_tcp = { - description = "Node to node CoreDNS" - protocol = "tcp" - from_port = 53 - to_port = 53 - type = "egress" - self = true - } ingress_self_coredns_udp = { - description = "Node to node CoreDNS" + description = "Node to node CoreDNS UDP" protocol = "udp" from_port = 53 to_port = 53 type = "ingress" self = true } - egress_self_coredns_udp = { - description = "Node to node CoreDNS" - protocol = "udp" - from_port = 53 - to_port = 53 - type = "egress" + } + + node_secuirty_group_recommended_rules = { for k, v in { + ingress_nodes_ephemeral = { + description = "Node to node ingress on ephemeral ports" + protocol = "tcp" + from_port = 1025 + to_port = 65535 + type = "ingress" self = true } - egress_https = { - description = "Egress all HTTPS to internet" - protocol = "tcp" - from_port = 443 - to_port = 443 - type = "egress" - cidr_blocks = ["0.0.0.0/0"] - ipv6_cidr_blocks = var.cluster_ip_family == "ipv6" ? ["::/0"] : null + ingress_cluster_8443_webhook = { + description = "Cluster API to node 8443/tcp webhook" + protocol = "tcp" + from_port = 8443 + to_port = 8443 + type = "ingress" + source_cluster_security_group = true } - egress_ntp_tcp = { - description = "Egress NTP/TCP to internet" - protocol = "tcp" - from_port = 123 - to_port = 123 - type = "egress" - cidr_blocks = var.node_security_group_ntp_ipv4_cidr_block - ipv6_cidr_blocks = var.cluster_ip_family == "ipv6" ? var.node_security_group_ntp_ipv6_cidr_block : null + ingress_cluster_9443_webhook = { + description = "Cluster API to node 9443/tcp webhook" + protocol = "tcp" + from_port = 9443 + to_port = 9443 + type = "ingress" + source_cluster_security_group = true } - egress_ntp_udp = { - description = "Egress NTP/UDP to internet" - protocol = "udp" - from_port = 123 - to_port = 123 + egress_all = { + description = "Allow all egress" + protocol = "-1" + from_port = 0 + to_port = 0 type = "egress" - cidr_blocks = var.node_security_group_ntp_ipv4_cidr_block - ipv6_cidr_blocks = var.cluster_ip_family == "ipv6" ? var.node_security_group_ntp_ipv6_cidr_block : null + cidr_blocks = ["0.0.0.0/0"] + ipv6_cidr_blocks = var.cluster_ip_family == "ipv6" ? ["::/0"] : null } - } + } : k => v if var.node_security_group_enable_recommended_rules } } resource "aws_security_group" "node" { @@ -168,7 +166,11 @@ resource "aws_security_group" "node" { } resource "aws_security_group_rule" "node" { - for_each = { for k, v in merge(local.node_security_group_rules, var.node_security_group_additional_rules) : k => v if local.create_node_sg } + for_each = { for k, v in merge( + local.node_security_group_rules, + local.node_secuirty_group_recommended_rules, + var.node_security_group_additional_rules, + ) : k => v if local.create_node_sg } # Required security_group_id = aws_security_group.node[0].id @@ -178,15 +180,13 @@ resource "aws_security_group_rule" "node" { type = each.value.type # Optional - description = try(each.value.description, null) - cidr_blocks = try(each.value.cidr_blocks, null) - ipv6_cidr_blocks = try(each.value.ipv6_cidr_blocks, null) - prefix_list_ids = try(each.value.prefix_list_ids, []) - self = try(each.value.self, null) - source_security_group_id = try( - each.value.source_security_group_id, - try(each.value.source_cluster_security_group, false) ? local.cluster_security_group_id : null - ) + description = lookup(each.value, "description", null) + cidr_blocks = lookup(each.value, "cidr_blocks", null) + ipv6_cidr_blocks = lookup(each.value, "ipv6_cidr_blocks", null) + prefix_list_ids = lookup(each.value, "prefix_list_ids", []) + self = lookup(each.value, "self", null) + source_security_group_id = lookup(each.value, "source_security_group_id", + lookup(each.value, "source_cluster_security_group", false)) ? local.cluster_security_group_id : null } ################################################################################ @@ -196,7 +196,7 @@ resource "aws_security_group_rule" "node" { module "fargate_profile" { source = "./modules/fargate-profile" - for_each = { for k, v in var.fargate_profiles : k => v if var.create } + for_each = { for k, v in var.fargate_profiles : k => v if var.create && !local.create_outposts_local_cluster } create = try(each.value.create, true) @@ -218,7 +218,9 @@ module "fargate_profile" { iam_role_permissions_boundary = try(each.value.iam_role_permissions_boundary, var.fargate_profile_defaults.iam_role_permissions_boundary, null) iam_role_tags = try(each.value.iam_role_tags, var.fargate_profile_defaults.iam_role_tags, {}) iam_role_attach_cni_policy = try(each.value.iam_role_attach_cni_policy, var.fargate_profile_defaults.iam_role_attach_cni_policy, true) - iam_role_additional_policies = try(each.value.iam_role_additional_policies, var.fargate_profile_defaults.iam_role_additional_policies, []) + # To better understand why this `lookup()` logic is required, see: + # https://github.com/hashicorp/terraform/issues/31646#issuecomment-1217279031 + iam_role_additional_policies = lookup(each.value, "iam_role_additional_policies", lookup(var.fargate_profile_defaults, "iam_role_additional_policies", {})) tags = merge(var.tags, try(each.value.tags, var.fargate_profile_defaults.tags, {})) } @@ -230,14 +232,13 @@ module "fargate_profile" { module "eks_managed_node_group" { source = "./modules/eks-managed-node-group" - for_each = { for k, v in var.eks_managed_node_groups : k => v if var.create } + for_each = { for k, v in var.eks_managed_node_groups : k => v if var.create && !local.create_outposts_local_cluster } create = try(each.value.create, true) - cluster_name = aws_eks_cluster.this[0].name - cluster_version = try(each.value.cluster_version, var.eks_managed_node_group_defaults.cluster_version, aws_eks_cluster.this[0].version) - cluster_security_group_id = local.cluster_security_group_id - cluster_ip_family = var.cluster_ip_family + cluster_name = aws_eks_cluster.this[0].name + cluster_version = try(each.value.cluster_version, var.eks_managed_node_group_defaults.cluster_version, aws_eks_cluster.this[0].version) + cluster_ip_family = var.cluster_ip_family # EKS Managed Node Group name = try(each.value.name, each.key) @@ -261,7 +262,7 @@ module "eks_managed_node_group" { remote_access = try(each.value.remote_access, var.eks_managed_node_group_defaults.remote_access, {}) taints = try(each.value.taints, var.eks_managed_node_group_defaults.taints, {}) - update_config = try(each.value.update_config, var.eks_managed_node_group_defaults.update_config, {}) + update_config = try(each.value.update_config, var.eks_managed_node_group_defaults.update_config, local.default_update_config) timeouts = try(each.value.timeouts, var.eks_managed_node_group_defaults.timeouts, {}) # User data @@ -276,20 +277,22 @@ module "eks_managed_node_group" { user_data_template_path = try(each.value.user_data_template_path, var.eks_managed_node_group_defaults.user_data_template_path, "") # Launch Template - create_launch_template = try(each.value.create_launch_template, var.eks_managed_node_group_defaults.create_launch_template, true) - launch_template_name = try(each.value.launch_template_name, var.eks_managed_node_group_defaults.launch_template_name, each.key) - launch_template_use_name_prefix = try(each.value.launch_template_use_name_prefix, var.eks_managed_node_group_defaults.launch_template_use_name_prefix, true) - launch_template_version = try(each.value.launch_template_version, var.eks_managed_node_group_defaults.launch_template_version, null) - launch_template_description = try(each.value.launch_template_description, var.eks_managed_node_group_defaults.launch_template_description, "Custom launch template for ${try(each.value.name, each.key)} EKS managed node group") - launch_template_tags = try(each.value.launch_template_tags, var.eks_managed_node_group_defaults.launch_template_tags, {}) - - ebs_optimized = try(each.value.ebs_optimized, var.eks_managed_node_group_defaults.ebs_optimized, null) - key_name = try(each.value.key_name, var.eks_managed_node_group_defaults.key_name, null) + create_launch_template = try(each.value.create_launch_template, var.eks_managed_node_group_defaults.create_launch_template, true) + use_custom_launch_template = try(each.value.use_custom_launch_template, var.eks_managed_node_group_defaults.use_custom_launch_template, true) + launch_template_id = try(each.value.launch_template_id, var.eks_managed_node_group_defaults.launch_template_id, "") + launch_template_name = try(each.value.launch_template_name, var.eks_managed_node_group_defaults.launch_template_name, each.key) + launch_template_use_name_prefix = try(each.value.launch_template_use_name_prefix, var.eks_managed_node_group_defaults.launch_template_use_name_prefix, true) + launch_template_version = try(each.value.launch_template_version, var.eks_managed_node_group_defaults.launch_template_version, null) launch_template_default_version = try(each.value.launch_template_default_version, var.eks_managed_node_group_defaults.launch_template_default_version, null) update_launch_template_default_version = try(each.value.update_launch_template_default_version, var.eks_managed_node_group_defaults.update_launch_template_default_version, true) - disable_api_termination = try(each.value.disable_api_termination, var.eks_managed_node_group_defaults.disable_api_termination, null) - kernel_id = try(each.value.kernel_id, var.eks_managed_node_group_defaults.kernel_id, null) - ram_disk_id = try(each.value.ram_disk_id, var.eks_managed_node_group_defaults.ram_disk_id, null) + launch_template_description = try(each.value.launch_template_description, var.eks_managed_node_group_defaults.launch_template_description, "Custom launch template for ${try(each.value.name, each.key)} EKS managed node group") + launch_template_tags = try(each.value.launch_template_tags, var.eks_managed_node_group_defaults.launch_template_tags, {}) + + ebs_optimized = try(each.value.ebs_optimized, var.eks_managed_node_group_defaults.ebs_optimized, null) + key_name = try(each.value.key_name, var.eks_managed_node_group_defaults.key_name, null) + disable_api_termination = try(each.value.disable_api_termination, var.eks_managed_node_group_defaults.disable_api_termination, null) + kernel_id = try(each.value.kernel_id, var.eks_managed_node_group_defaults.kernel_id, null) + ram_disk_id = try(each.value.ram_disk_id, var.eks_managed_node_group_defaults.ram_disk_id, null) block_device_mappings = try(each.value.block_device_mappings, var.eks_managed_node_group_defaults.block_device_mappings, {}) capacity_reservation_specification = try(each.value.capacity_reservation_specification, var.eks_managed_node_group_defaults.capacity_reservation_specification, {}) @@ -304,6 +307,8 @@ module "eks_managed_node_group" { enable_monitoring = try(each.value.enable_monitoring, var.eks_managed_node_group_defaults.enable_monitoring, true) network_interfaces = try(each.value.network_interfaces, var.eks_managed_node_group_defaults.network_interfaces, []) placement = try(each.value.placement, var.eks_managed_node_group_defaults.placement, {}) + maintenance_options = try(each.value.maintenance_options, var.eks_managed_node_group_defaults.maintenance_options, {}) + private_dns_name_options = try(each.value.private_dns_name_options, var.eks_managed_node_group_defaults.private_dns_name_options, {}) # IAM role create_iam_role = try(each.value.create_iam_role, var.eks_managed_node_group_defaults.create_iam_role, true) @@ -315,18 +320,13 @@ module "eks_managed_node_group" { iam_role_permissions_boundary = try(each.value.iam_role_permissions_boundary, var.eks_managed_node_group_defaults.iam_role_permissions_boundary, null) iam_role_tags = try(each.value.iam_role_tags, var.eks_managed_node_group_defaults.iam_role_tags, {}) iam_role_attach_cni_policy = try(each.value.iam_role_attach_cni_policy, var.eks_managed_node_group_defaults.iam_role_attach_cni_policy, true) - iam_role_additional_policies = try(each.value.iam_role_additional_policies, var.eks_managed_node_group_defaults.iam_role_additional_policies, []) + # To better understand why this `lookup()` logic is required, see: + # https://github.com/hashicorp/terraform/issues/31646#issuecomment-1217279031 + iam_role_additional_policies = lookup(each.value, "iam_role_additional_policies", lookup(var.eks_managed_node_group_defaults, "iam_role_additional_policies", {})) # Security group vpc_security_group_ids = compact(concat([local.node_security_group_id], try(each.value.vpc_security_group_ids, var.eks_managed_node_group_defaults.vpc_security_group_ids, []))) cluster_primary_security_group_id = try(each.value.attach_cluster_primary_security_group, var.eks_managed_node_group_defaults.attach_cluster_primary_security_group, false) ? aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id : null - create_security_group = try(each.value.create_security_group, var.eks_managed_node_group_defaults.create_security_group, true) - security_group_name = try(each.value.security_group_name, var.eks_managed_node_group_defaults.security_group_name, null) - security_group_use_name_prefix = try(each.value.security_group_use_name_prefix, var.eks_managed_node_group_defaults.security_group_use_name_prefix, true) - security_group_description = try(each.value.security_group_description, var.eks_managed_node_group_defaults.security_group_description, "EKS managed node group security group") - vpc_id = try(each.value.vpc_id, var.eks_managed_node_group_defaults.vpc_id, var.vpc_id) - security_group_rules = try(each.value.security_group_rules, var.eks_managed_node_group_defaults.security_group_rules, {}) - security_group_tags = try(each.value.security_group_tags, var.eks_managed_node_group_defaults.security_group_tags, {}) tags = merge(var.tags, try(each.value.tags, var.eks_managed_node_group_defaults.tags, {})) } @@ -362,24 +362,27 @@ module "self_managed_node_group" { wait_for_elb_capacity = try(each.value.wait_for_elb_capacity, var.self_managed_node_group_defaults.wait_for_elb_capacity, null) wait_for_capacity_timeout = try(each.value.wait_for_capacity_timeout, var.self_managed_node_group_defaults.wait_for_capacity_timeout, null) default_cooldown = try(each.value.default_cooldown, var.self_managed_node_group_defaults.default_cooldown, null) + default_instance_warmup = try(each.value.default_instance_warmup, var.self_managed_node_group_defaults.default_instance_warmup, null) protect_from_scale_in = try(each.value.protect_from_scale_in, var.self_managed_node_group_defaults.protect_from_scale_in, null) + context = try(each.value.context, var.self_managed_node_group_defaults.context, null) target_group_arns = try(each.value.target_group_arns, var.self_managed_node_group_defaults.target_group_arns, []) placement_group = try(each.value.placement_group, var.self_managed_node_group_defaults.placement_group, null) health_check_type = try(each.value.health_check_type, var.self_managed_node_group_defaults.health_check_type, null) health_check_grace_period = try(each.value.health_check_grace_period, var.self_managed_node_group_defaults.health_check_grace_period, null) - force_delete = try(each.value.force_delete, var.self_managed_node_group_defaults.force_delete, null) - termination_policies = try(each.value.termination_policies, var.self_managed_node_group_defaults.termination_policies, []) - suspended_processes = try(each.value.suspended_processes, var.self_managed_node_group_defaults.suspended_processes, []) - max_instance_lifetime = try(each.value.max_instance_lifetime, var.self_managed_node_group_defaults.max_instance_lifetime, null) + force_delete = try(each.value.force_delete, var.self_managed_node_group_defaults.force_delete, null) + force_delete_warm_pool = try(each.value.force_delete_warm_pool, var.self_managed_node_group_defaults.force_delete_warm_pool, null) + termination_policies = try(each.value.termination_policies, var.self_managed_node_group_defaults.termination_policies, []) + suspended_processes = try(each.value.suspended_processes, var.self_managed_node_group_defaults.suspended_processes, []) + max_instance_lifetime = try(each.value.max_instance_lifetime, var.self_managed_node_group_defaults.max_instance_lifetime, null) enabled_metrics = try(each.value.enabled_metrics, var.self_managed_node_group_defaults.enabled_metrics, []) metrics_granularity = try(each.value.metrics_granularity, var.self_managed_node_group_defaults.metrics_granularity, null) service_linked_role_arn = try(each.value.service_linked_role_arn, var.self_managed_node_group_defaults.service_linked_role_arn, null) initial_lifecycle_hooks = try(each.value.initial_lifecycle_hooks, var.self_managed_node_group_defaults.initial_lifecycle_hooks, []) - instance_refresh = try(each.value.instance_refresh, var.self_managed_node_group_defaults.instance_refresh, {}) + instance_refresh = try(each.value.instance_refresh, var.self_managed_node_group_defaults.instance_refresh, local.default_instance_refresh) use_mixed_instances_policy = try(each.value.use_mixed_instances_policy, var.self_managed_node_group_defaults.use_mixed_instances_policy, false) mixed_instances_policy = try(each.value.mixed_instances_policy, var.self_managed_node_group_defaults.mixed_instances_policy, null) warm_pool = try(each.value.warm_pool, var.self_managed_node_group_defaults.warm_pool, {}) @@ -388,7 +391,6 @@ module "self_managed_node_group" { schedules = try(each.value.schedules, var.self_managed_node_group_defaults.schedules, {}) delete_timeout = try(each.value.delete_timeout, var.self_managed_node_group_defaults.delete_timeout, null) - use_default_tags = try(each.value.use_default_tags, var.self_managed_node_group_defaults.use_default_tags, false) autoscaling_group_tags = try(each.value.autoscaling_group_tags, var.self_managed_node_group_defaults.autoscaling_group_tags, {}) # User data @@ -401,12 +403,15 @@ module "self_managed_node_group" { user_data_template_path = try(each.value.user_data_template_path, var.self_managed_node_group_defaults.user_data_template_path, "") # Launch Template - create_launch_template = try(each.value.create_launch_template, var.self_managed_node_group_defaults.create_launch_template, true) - launch_template_name = try(each.value.launch_template_name, var.self_managed_node_group_defaults.launch_template_name, each.key) - launch_template_use_name_prefix = try(each.value.launch_template_use_name_prefix, var.self_managed_node_group_defaults.launch_template_use_name_prefix, true) - launch_template_version = try(each.value.launch_template_version, var.self_managed_node_group_defaults.launch_template_version, null) - launch_template_description = try(each.value.launch_template_description, var.self_managed_node_group_defaults.launch_template_description, "Custom launch template for ${try(each.value.name, each.key)} self managed node group") - launch_template_tags = try(each.value.launch_template_tags, var.self_managed_node_group_defaults.launch_template_tags, {}) + create_launch_template = try(each.value.create_launch_template, var.self_managed_node_group_defaults.create_launch_template, true) + launch_template_id = try(each.value.launch_template_id, var.self_managed_node_group_defaults.launch_template_id, "") + launch_template_name = try(each.value.launch_template_name, var.self_managed_node_group_defaults.launch_template_name, each.key) + launch_template_use_name_prefix = try(each.value.launch_template_use_name_prefix, var.self_managed_node_group_defaults.launch_template_use_name_prefix, true) + launch_template_version = try(each.value.launch_template_version, var.self_managed_node_group_defaults.launch_template_version, null) + launch_template_default_version = try(each.value.launch_template_default_version, var.self_managed_node_group_defaults.launch_template_default_version, null) + update_launch_template_default_version = try(each.value.update_launch_template_default_version, var.self_managed_node_group_defaults.update_launch_template_default_version, true) + launch_template_description = try(each.value.launch_template_description, var.self_managed_node_group_defaults.launch_template_description, "Custom launch template for ${try(each.value.name, each.key)} self managed node group") + launch_template_tags = try(each.value.launch_template_tags, var.self_managed_node_group_defaults.launch_template_tags, {}) ebs_optimized = try(each.value.ebs_optimized, var.self_managed_node_group_defaults.ebs_optimized, null) ami_id = try(each.value.ami_id, var.self_managed_node_group_defaults.ami_id, "") @@ -414,12 +419,10 @@ module "self_managed_node_group" { instance_type = try(each.value.instance_type, var.self_managed_node_group_defaults.instance_type, "m6i.large") key_name = try(each.value.key_name, var.self_managed_node_group_defaults.key_name, null) - launch_template_default_version = try(each.value.launch_template_default_version, var.self_managed_node_group_defaults.launch_template_default_version, null) - update_launch_template_default_version = try(each.value.update_launch_template_default_version, var.self_managed_node_group_defaults.update_launch_template_default_version, true) - disable_api_termination = try(each.value.disable_api_termination, var.self_managed_node_group_defaults.disable_api_termination, null) - instance_initiated_shutdown_behavior = try(each.value.instance_initiated_shutdown_behavior, var.self_managed_node_group_defaults.instance_initiated_shutdown_behavior, null) - kernel_id = try(each.value.kernel_id, var.self_managed_node_group_defaults.kernel_id, null) - ram_disk_id = try(each.value.ram_disk_id, var.self_managed_node_group_defaults.ram_disk_id, null) + disable_api_termination = try(each.value.disable_api_termination, var.self_managed_node_group_defaults.disable_api_termination, null) + instance_initiated_shutdown_behavior = try(each.value.instance_initiated_shutdown_behavior, var.self_managed_node_group_defaults.instance_initiated_shutdown_behavior, null) + kernel_id = try(each.value.kernel_id, var.self_managed_node_group_defaults.kernel_id, null) + ram_disk_id = try(each.value.ram_disk_id, var.self_managed_node_group_defaults.ram_disk_id, null) block_device_mappings = try(each.value.block_device_mappings, var.self_managed_node_group_defaults.block_device_mappings, {}) capacity_reservation_specification = try(each.value.capacity_reservation_specification, var.self_managed_node_group_defaults.capacity_reservation_specification, {}) @@ -429,12 +432,15 @@ module "self_managed_node_group" { elastic_inference_accelerator = try(each.value.elastic_inference_accelerator, var.self_managed_node_group_defaults.elastic_inference_accelerator, {}) enclave_options = try(each.value.enclave_options, var.self_managed_node_group_defaults.enclave_options, {}) hibernation_options = try(each.value.hibernation_options, var.self_managed_node_group_defaults.hibernation_options, {}) + instance_requirements = try(each.value.instance_requirements, var.self_managed_node_group_defaults.instance_requirements, {}) instance_market_options = try(each.value.instance_market_options, var.self_managed_node_group_defaults.instance_market_options, {}) license_specifications = try(each.value.license_specifications, var.self_managed_node_group_defaults.license_specifications, {}) metadata_options = try(each.value.metadata_options, var.self_managed_node_group_defaults.metadata_options, local.metadata_options) enable_monitoring = try(each.value.enable_monitoring, var.self_managed_node_group_defaults.enable_monitoring, true) network_interfaces = try(each.value.network_interfaces, var.self_managed_node_group_defaults.network_interfaces, []) placement = try(each.value.placement, var.self_managed_node_group_defaults.placement, {}) + maintenance_options = try(each.value.maintenance_options, var.self_managed_node_group_defaults.maintenance_options, {}) + private_dns_name_options = try(each.value.private_dns_name_options, var.self_managed_node_group_defaults.private_dns_name_options, {}) # IAM role create_iam_instance_profile = try(each.value.create_iam_instance_profile, var.self_managed_node_group_defaults.create_iam_instance_profile, true) @@ -446,19 +452,13 @@ module "self_managed_node_group" { iam_role_permissions_boundary = try(each.value.iam_role_permissions_boundary, var.self_managed_node_group_defaults.iam_role_permissions_boundary, null) iam_role_tags = try(each.value.iam_role_tags, var.self_managed_node_group_defaults.iam_role_tags, {}) iam_role_attach_cni_policy = try(each.value.iam_role_attach_cni_policy, var.self_managed_node_group_defaults.iam_role_attach_cni_policy, true) - iam_role_additional_policies = try(each.value.iam_role_additional_policies, var.self_managed_node_group_defaults.iam_role_additional_policies, []) + # To better understand why this `lookup()` logic is required, see: + # https://github.com/hashicorp/terraform/issues/31646#issuecomment-1217279031 + iam_role_additional_policies = lookup(each.value, "iam_role_additional_policies", lookup(var.self_managed_node_group_defaults, "iam_role_additional_policies", {})) # Security group vpc_security_group_ids = compact(concat([local.node_security_group_id], try(each.value.vpc_security_group_ids, var.self_managed_node_group_defaults.vpc_security_group_ids, []))) - cluster_security_group_id = local.cluster_security_group_id cluster_primary_security_group_id = try(each.value.attach_cluster_primary_security_group, var.self_managed_node_group_defaults.attach_cluster_primary_security_group, false) ? aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id : null - create_security_group = try(each.value.create_security_group, var.self_managed_node_group_defaults.create_security_group, true) - security_group_name = try(each.value.security_group_name, var.self_managed_node_group_defaults.security_group_name, null) - security_group_use_name_prefix = try(each.value.security_group_use_name_prefix, var.self_managed_node_group_defaults.security_group_use_name_prefix, true) - security_group_description = try(each.value.security_group_description, var.self_managed_node_group_defaults.security_group_description, "Self managed node group security group") - vpc_id = try(each.value.vpc_id, var.self_managed_node_group_defaults.vpc_id, var.vpc_id) - security_group_rules = try(each.value.security_group_rules, var.self_managed_node_group_defaults.security_group_rules, {}) - security_group_tags = try(each.value.security_group_tags, var.self_managed_node_group_defaults.security_group_tags, {}) tags = merge(var.tags, try(each.value.tags, var.self_managed_node_group_defaults.tags, {})) } diff --git a/outputs.tf b/outputs.tf index eadf2d0cd2..dec0540174 100644 --- a/outputs.tf +++ b/outputs.tf @@ -4,52 +4,52 @@ output "cluster_arn" { description = "The Amazon Resource Name (ARN) of the cluster" - value = try(aws_eks_cluster.this[0].arn, "") + value = try(aws_eks_cluster.this[0].arn, null) } output "cluster_certificate_authority_data" { description = "Base64 encoded certificate data required to communicate with the cluster" - value = try(aws_eks_cluster.this[0].certificate_authority[0].data, "") + value = try(aws_eks_cluster.this[0].certificate_authority[0].data, null) } output "cluster_endpoint" { description = "Endpoint for your Kubernetes API server" - value = try(aws_eks_cluster.this[0].endpoint, "") + value = try(aws_eks_cluster.this[0].endpoint, null) } -output "cluster_name" { - description = "The name of the EKS cluster. Will block on cluster creation until the cluster is really ready" - value = try(aws_eks_cluster.this[0].name, "") +output "cluster_id" { + description = "The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts" + value = try(aws_eks_cluster.this[0].cluster_id, null) } -output "cluster_id" { - description = "The id of the EKS cluster. Will block on cluster creation until the cluster is really ready" - value = try(aws_eks_cluster.this[0].id, "") +output "cluster_name" { + description = "The name of the EKS cluster" + value = try(aws_eks_cluster.this[0].name, null) } output "cluster_oidc_issuer_url" { description = "The URL on the EKS cluster for the OpenID Connect identity provider" - value = try(aws_eks_cluster.this[0].identity[0].oidc[0].issuer, "") + value = try(aws_eks_cluster.this[0].identity[0].oidc[0].issuer, null) } output "cluster_version" { description = "The Kubernetes version for the cluster" - value = try(aws_eks_cluster.this[0].version, "") + value = try(aws_eks_cluster.this[0].version, null) } output "cluster_platform_version" { description = "Platform version for the cluster" - value = try(aws_eks_cluster.this[0].platform_version, "") + value = try(aws_eks_cluster.this[0].platform_version, null) } output "cluster_status" { description = "Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED`" - value = try(aws_eks_cluster.this[0].status, "") + value = try(aws_eks_cluster.this[0].status, null) } output "cluster_primary_security_group_id" { description = "Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console" - value = try(aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id, "") + value = try(aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id, null) } ################################################################################ @@ -77,12 +77,12 @@ output "kms_key_policy" { output "cluster_security_group_arn" { description = "Amazon Resource Name (ARN) of the cluster security group" - value = try(aws_security_group.cluster[0].arn, "") + value = try(aws_security_group.cluster[0].arn, null) } output "cluster_security_group_id" { description = "ID of the cluster security group" - value = try(aws_security_group.cluster[0].id, "") + value = try(aws_security_group.cluster[0].id, null) } ################################################################################ @@ -91,12 +91,12 @@ output "cluster_security_group_id" { output "node_security_group_arn" { description = "Amazon Resource Name (ARN) of the node shared security group" - value = try(aws_security_group.node[0].arn, "") + value = try(aws_security_group.node[0].arn, null) } output "node_security_group_id" { description = "ID of the node shared security group" - value = try(aws_security_group.node[0].id, "") + value = try(aws_security_group.node[0].id, null) } ################################################################################ @@ -105,17 +105,17 @@ output "node_security_group_id" { output "oidc_provider" { description = "The OpenID Connect identity provider (issuer URL without leading `https://`)" - value = try(replace(aws_eks_cluster.this[0].identity[0].oidc[0].issuer, "https://", ""), "") + value = try(replace(aws_eks_cluster.this[0].identity[0].oidc[0].issuer, "https://", null), null) } output "oidc_provider_arn" { description = "The ARN of the OIDC Provider if `enable_irsa = true`" - value = try(aws_iam_openid_connect_provider.oidc_provider[0].arn, "") + value = try(aws_iam_openid_connect_provider.oidc_provider[0].arn, null) } output "cluster_tls_certificate_sha1_fingerprint" { description = "The SHA1 fingerprint of the public key of the cluster's certificate" - value = try(data.tls_certificate.this[0].certificates[0].sha1_fingerprint, "") + value = try(data.tls_certificate.this[0].certificates[0].sha1_fingerprint, null) } ################################################################################ @@ -124,17 +124,17 @@ output "cluster_tls_certificate_sha1_fingerprint" { output "cluster_iam_role_name" { description = "IAM role name of the EKS cluster" - value = try(aws_iam_role.this[0].name, "") + value = try(aws_iam_role.this[0].name, null) } output "cluster_iam_role_arn" { description = "IAM role ARN of the EKS cluster" - value = try(aws_iam_role.this[0].arn, "") + value = try(aws_iam_role.this[0].arn, null) } output "cluster_iam_role_unique_id" { description = "Stable and unique string identifying the IAM role" - value = try(aws_iam_role.this[0].unique_id, "") + value = try(aws_iam_role.this[0].unique_id, null) } ################################################################################ @@ -161,12 +161,12 @@ output "cluster_identity_providers" { output "cloudwatch_log_group_name" { description = "Name of cloudwatch log group created" - value = try(aws_cloudwatch_log_group.this[0].name, "") + value = try(aws_cloudwatch_log_group.this[0].name, null) } output "cloudwatch_log_group_arn" { description = "Arn of cloudwatch log group created" - value = try(aws_cloudwatch_log_group.this[0].arn, "") + value = try(aws_cloudwatch_log_group.this[0].arn, null) } ################################################################################ diff --git a/variables.tf b/variables.tf index 6bce0560e2..d0047ff1c0 100644 --- a/variables.tf +++ b/variables.tf @@ -59,13 +59,13 @@ variable "subnet_ids" { variable "cluster_endpoint_private_access" { description = "Indicates whether or not the Amazon EKS private API server endpoint is enabled" type = bool - default = false + default = true } variable "cluster_endpoint_public_access" { description = "Indicates whether or not the Amazon EKS public API server endpoint is enabled" type = bool - default = true + default = false } variable "cluster_endpoint_public_access_cidrs" { @@ -86,10 +86,24 @@ variable "cluster_service_ipv4_cidr" { default = null } +variable "cluster_service_ipv6_cidr" { + description = "The CIDR block to assign Kubernetes pod and service IP addresses from if `ipv6` was specified when the cluster was created. Kubernetes assigns service addresses from the unique local address range (fc00::/7) because you can't specify a custom IPv6 CIDR block when you create the cluster" + type = string + default = null +} + +variable "outpost_config" { + description = "Configuration for the AWS Outpost to provision the cluster on" + type = any + default = {} +} + variable "cluster_encryption_config" { description = "Configuration block with encryption configuration for the cluster" - type = list(any) - default = [] + type = any + default = { + resources = ["secrets"] + } } variable "attach_cluster_encryption_policy" { @@ -123,7 +137,7 @@ variable "cluster_timeouts" { variable "create_kms_key" { description = "Controls if a KMS key for cluster encryption should be created" type = bool - default = false + default = true } variable "kms_key_description" { @@ -219,19 +233,19 @@ variable "cloudwatch_log_group_kms_key_id" { ################################################################################ variable "create_cluster_security_group" { - description = "Determines if a security group is created for the cluster or use the existing `cluster_security_group_id`" + description = "Determines if a security group is created for the cluster. Note: the EKS service creates a primary security group for the cluster by default" type = bool default = true } variable "cluster_security_group_id" { - description = "Existing security group ID to be attached to the cluster. Required if `create_cluster_security_group` = `false`" + description = "Existing security group ID to be attached to the cluster" type = string default = "" } variable "vpc_id" { - description = "ID of the VPC where the cluster and its nodes will be provisioned" + description = "ID of the VPC where the cluster security group will be provisioned" type = string default = null } @@ -316,26 +330,18 @@ variable "node_security_group_additional_rules" { default = {} } +variable "node_security_group_enable_recommended_rules" { + description = "Determines whether to enable recommended security group rules for the node security group created. This includes node-to-node TCP ingress on ephemeral ports and allows all egress traffic" + type = bool + default = true +} + variable "node_security_group_tags" { description = "A map of additional tags to add to the node security group created" type = map(string) default = {} } -# TODO - at next breaking change, make 169.254.169.123/32 the default -variable "node_security_group_ntp_ipv4_cidr_block" { - description = "IPv4 CIDR block to allow NTP egress. Default is public IP space, but [Amazon Time Sync Service](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/set-time.html) can be used as well with `[\"169.254.169.123/32\"]`" - type = list(string) - default = ["0.0.0.0/0"] -} - -# TODO - at next breaking change, make fd00:ec2::123/128 the default -variable "node_security_group_ntp_ipv6_cidr_block" { - description = "IPv4 CIDR block to allow NTP egress. Default is public IP space, but [Amazon Time Sync Service](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/set-time.html) can be used as well with `[\"fd00:ec2::123/128\"]`" - type = list(string) - default = ["::/0"] -} - ################################################################################ # IRSA ################################################################################ @@ -406,8 +412,8 @@ variable "iam_role_permissions_boundary" { variable "iam_role_additional_policies" { description = "Additional policies to be added to the IAM role" - type = list(string) - default = [] + type = map(string) + default = {} } # TODO - hopefully this can be removed once the AWS endpoint is named properly in China @@ -464,6 +470,12 @@ variable "cluster_addons" { default = {} } +variable "cluster_addons_timeouts" { + description = "Create, update, and delete timeout configurations for the cluster addons" + type = map(string) + default = {} +} + ################################################################################ # EKS Identity Provider ################################################################################ diff --git a/versions.tf b/versions.tf index fde7af0f23..fdc407c2ed 100644 --- a/versions.tf +++ b/versions.tf @@ -1,10 +1,10 @@ terraform { - required_version = ">= 0.13.1" + required_version = ">= 1.0" required_providers { aws = { source = "hashicorp/aws" - version = ">= 3.72" + version = ">= 4.45" } tls = { source = "hashicorp/tls"