From 99d289988df56cd8469a2e17f2bfb220dc87b701 Mon Sep 17 00:00:00 2001 From: Dawid Rogaczewski <20227477+daroga0002@users.noreply.github.com> Date: Tue, 12 Oct 2021 15:20:14 +0200 Subject: [PATCH] fix: Rebuild examples (#1625) --- README.md | 14 +- examples/_bootstrap/main.tf | 50 ----- examples/_bootstrap/outputs.tf | 14 -- examples/_bootstrap/variables.tf | 0 examples/_bootstrap/versions.tf | 9 - examples/bottlerocket/README.md | 12 +- examples/bottlerocket/main.tf | 102 ++++++++-- examples/bottlerocket/outputs.tf | 5 - examples/bottlerocket/versions.tf | 8 +- examples/complete/README.md | 9 +- examples/complete/main.tf | 94 ++++++--- examples/complete/outputs.tf | 5 - examples/complete/variables.tf | 1 - examples/complete/versions.tf | 2 +- examples/fargate/README.md | 14 +- examples/fargate/main.tf | 155 ++++++++------- examples/fargate/versions.tf | 2 +- examples/instance_refresh/README.md | 82 ++++++++ examples/instance_refresh/main.tf | 160 +++++++++------ examples/instance_refresh/variables.tf | 17 -- examples/instance_refresh/versions.tf | 4 +- examples/irsa/README.md | 93 ++++----- .../irsa/cluster-autoscaler-chart-values.yaml | 14 -- examples/irsa/irsa.tf | 58 +++++- examples/irsa/locals.tf | 5 - examples/irsa/main.tf | 120 ++++++++---- examples/irsa/versions.tf | 3 +- .../README.md | 19 +- examples/launch_templates/main.tf | 119 +++++++---- examples/launch_templates/outputs.tf | 38 ++-- examples/launch_templates/pre_userdata.sh | 2 +- examples/launch_templates/versions.tf | 3 +- .../README.md | 70 +++++++ .../launchtemplate.tf | 12 +- .../main.tf | 139 +++++++++---- .../variables.tf | 6 - .../versions.tf | 2 +- examples/managed_node_groups/README.md | 73 +++++++ examples/managed_node_groups/main.tf | 184 ++++++++++-------- examples/managed_node_groups/versions.tf | 4 +- examples/secrets_encryption/README.md | 66 +++++++ examples/secrets_encryption/main.tf | 126 +++++++----- examples/secrets_encryption/variables.tf | 48 ----- examples/secrets_encryption/versions.tf | 2 +- variables.tf | 6 +- 45 files changed, 1272 insertions(+), 699 deletions(-) delete mode 100644 examples/_bootstrap/main.tf delete mode 100644 examples/_bootstrap/outputs.tf delete mode 100644 examples/_bootstrap/variables.tf delete mode 100644 examples/_bootstrap/versions.tf create mode 100644 examples/instance_refresh/README.md delete mode 100644 examples/irsa/cluster-autoscaler-chart-values.yaml delete mode 100644 examples/irsa/locals.tf rename examples/{_bootstrap => launch_templates}/README.md (53%) create mode 100644 examples/launch_templates_with_managed_node_groups/README.md create mode 100644 examples/managed_node_groups/README.md create mode 100644 examples/secrets_encryption/README.md diff --git a/README.md b/README.md index 1e5399e672..ab4fe8b1ec 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,6 @@ Terraform module which creates Kubernetes cluster resources on AWS EKS. - Support AWS EKS Optimized or Custom AMI - Create or manage security groups that allow communication and coordination - ## Important note Kubernetes is evolving a lot, and each minor version includes new features, fixes, or changes. @@ -24,7 +23,6 @@ Kubernetes is evolving a lot, and each minor version includes new features, fixe You also need to ensure that your applications and add ons are updated, or workloads could fail after the upgrade is complete. For action, you may need to take before upgrading, see the steps in the [EKS documentation](https://docs.aws.amazon.com/eks/latest/userguide/update-cluster.html). - ## Usage example ```hcl @@ -61,7 +59,6 @@ module "eks" { There is also a [complete example](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/complete) which shows large set of features available in the module. - ## Submodules Root module calls these modules which can also be used separately to create independent resources: @@ -71,14 +68,12 @@ Root module calls these modules which can also be used separately to create inde - [node_groups](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/modules/node_groups) - creates Managed Node Group resources --> - ## Notes - By default, this module manages the `aws-auth` configmap for you (`manage_aws_auth=true`). To avoid the following [issue](https://github.com/aws/containers-roadmap/issues/654) where the EKS creation is `ACTIVE` but not ready. We implemented a "retry" logic with a [fork of the http provider](https://github.com/terraform-aws-modules/terraform-provider-http). This fork adds the support of a self-signed CA certificate. The original PR can be found [here](https://github.com/hashicorp/terraform-provider-http/pull/29). - Setting `instance_refresh_enabled = true` will recreate your worker nodes without draining them first. It is recommended to install [aws-node-termination-handler](https://github.com/aws/aws-node-termination-handler) for proper node draining. Find the complete example here [instance_refresh](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/instance_refresh). - ## Documentation ### Official docs @@ -93,7 +88,6 @@ Root module calls these modules which can also be used separately to create inde - [IAM Permissions](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/iam-permissions.md): Minimum IAM permissions needed to setup EKS Cluster. - [FAQ](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/faq.md): Frequently Asked Questions - ## Examples There are detailed examples available for you to see how certain features of this module can be used in a straightforward way. Make sure to check them and run them before opening an issue. [Here](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/iam-permissions.md) you can find the list of the minimum IAM Permissions required to create EKS cluster. @@ -102,14 +96,12 @@ There are detailed examples available for you to see how certain features of thi - [Bottlerocket](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/bottlerocket) - Create EKS cluster using [Bottlerocket AMI](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami-bottlerocket.html). - [Fargate](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/fargate) - Create EKS cluster with [Fargate profiles](https://docs.aws.amazon.com/eks/latest/userguide/fargate.html) and attach Fargate profiles to an existing EKS cluster. - ## Contributing Report issues/questions/feature requests on in the [issues](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/new) section. Full contributing [guidelines are covered here](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/.github/CONTRIBUTING.md). - ## Authors This module has been originally created by [Brandon O'Connor](https://github.com/brandoconnor), and was maintained by [Max Williams](https://github.com/max-rocket-internet), [Thierno IB. BARRY](https://github.com/barryib) and many more [contributors listed here](https://github.com/terraform-aws-modules/terraform-aws-eks/graphs/contributors)! @@ -243,9 +235,9 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf | [manage\_aws\_auth](#input\_manage\_aws\_auth) | Whether to apply the aws-auth configmap file. | `bool` | `true` | no | | [manage\_cluster\_iam\_resources](#input\_manage\_cluster\_iam\_resources) | Whether to let the module manage cluster IAM resources. If set to false, cluster\_iam\_role\_name must be specified. | `bool` | `true` | no | | [manage\_worker\_iam\_resources](#input\_manage\_worker\_iam\_resources) | Whether to let the module manage worker IAM resources. If set to false, iam\_instance\_profile\_name must be specified for workers. | `bool` | `true` | no | -| [map\_accounts](#input\_map\_accounts) | Additional AWS account numbers to add to the aws-auth configmap. See examples/basic/variables.tf for example format. | `list(string)` | `[]` | no | -| [map\_roles](#input\_map\_roles) | Additional IAM roles to add to the aws-auth configmap. See examples/basic/variables.tf for example format. |
list(object({
rolearn = string
username = string
groups = list(string)
}))
| `[]` | no | -| [map\_users](#input\_map\_users) | Additional IAM users to add to the aws-auth configmap. See examples/basic/variables.tf for example format. |
list(object({
userarn = string
username = string
groups = list(string)
}))
| `[]` | no | +| [map\_accounts](#input\_map\_accounts) | Additional AWS account numbers to add to the aws-auth configmap. | `list(string)` | `[]` | no | +| [map\_roles](#input\_map\_roles) | Additional IAM roles to add to the aws-auth configmap. |
list(object({
rolearn = string
username = string
groups = list(string)
}))
| `[]` | no | +| [map\_users](#input\_map\_users) | Additional IAM users to add to the aws-auth configmap. |
list(object({
userarn = string
username = string
groups = list(string)
}))
| `[]` | no | | [node\_groups](#input\_node\_groups) | Map of map of node groups to create. See `node_groups` module's documentation for more details | `any` | `{}` | no | | [node\_groups\_defaults](#input\_node\_groups\_defaults) | Map of values to be applied to all node groups. See `node_groups` module's documentation for more details | `any` | `{}` | no | | [openid\_connect\_audiences](#input\_openid\_connect\_audiences) | List of OpenID Connect audience client IDs to add to the IRSA provider. | `list(string)` | `[]` | no | diff --git a/examples/_bootstrap/main.tf b/examples/_bootstrap/main.tf deleted file mode 100644 index 8492741f6b..0000000000 --- a/examples/_bootstrap/main.tf +++ /dev/null @@ -1,50 +0,0 @@ -provider "aws" { - region = local.region -} - -locals { - region = "eu-west-1" - name = "bootstrap-example" - vpc_cidr = "10.0.0.0/16" - - cluster_name = "test-eks-${random_string.suffix.result}" -} - -data "aws_availability_zones" "available" {} - -resource "random_string" "suffix" { - length = 8 - special = false -} - -################################################################################ -# Supporting Resources -################################################################################ - -module "vpc" { - source = "terraform-aws-modules/vpc/aws" - version = "~> 3.0" - - name = local.name - cidr = "10.0.0.0/16" - - azs = data.aws_availability_zones.available.names - public_subnets = [for k, v in data.aws_availability_zones.available.names : cidrsubnet(local.vpc_cidr, 8, k)] - private_subnets = [for k, v in data.aws_availability_zones.available.names : cidrsubnet(local.vpc_cidr, 8, k + 10)] - - # NAT Gateway is disabled in the examples primarily to save costs and be able to recreate VPC faster. - enable_nat_gateway = false - single_nat_gateway = false - - enable_dns_hostnames = true - - public_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/elb" = "1" - } - - private_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/internal-elb" = "1" - } -} diff --git a/examples/_bootstrap/outputs.tf b/examples/_bootstrap/outputs.tf deleted file mode 100644 index 87a2e49621..0000000000 --- a/examples/_bootstrap/outputs.tf +++ /dev/null @@ -1,14 +0,0 @@ -output "region" { - description = "AWS region" - value = local.region -} - -output "cluster_name" { - description = "Name of EKS Cluster used in tags for subnets" - value = local.cluster_name -} - -output "vpc" { - description = "Complete output of VPC module" - value = module.vpc -} diff --git a/examples/_bootstrap/variables.tf b/examples/_bootstrap/variables.tf deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/examples/_bootstrap/versions.tf b/examples/_bootstrap/versions.tf deleted file mode 100644 index 5539f13af9..0000000000 --- a/examples/_bootstrap/versions.tf +++ /dev/null @@ -1,9 +0,0 @@ -terraform { - required_version = ">= 0.13.1" - - required_providers { - aws = ">= 3.22.0" - random = ">= 2.1" - kubernetes = ">= 1.11" - } -} diff --git a/examples/bottlerocket/README.md b/examples/bottlerocket/README.md index f51a0b6952..e02687da98 100644 --- a/examples/bottlerocket/README.md +++ b/examples/bottlerocket/README.md @@ -1,6 +1,6 @@ # AWS EKS cluster running Bottlerocket AMI -Configuration in this directory creates EKS cluster with nodes running [AWS Bottlerocket OS](https://github.com/bottlerocket-os/bottlerocket) +Configuration in this directory creates EKS cluster with workers group running [AWS Bottlerocket OS](https://github.com/bottlerocket-os/bottlerocket) This is a minimalistic example which shows what knobs to turn to make Bottlerocket work. @@ -25,6 +25,8 @@ Note that this example may create resources which cost money. Run `terraform des |------|---------| | [terraform](#requirement\_terraform) | >= 0.13.1 | | [aws](#requirement\_aws) | >= 3.22.0 | +| [kubernetes](#requirement\_kubernetes) | ~> 2.0 | +| [local](#requirement\_local) | >= 1.4 | | [random](#requirement\_random) | >= 2.1 | | [tls](#requirement\_tls) | >= 2.0 | @@ -41,6 +43,7 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Source | Version | |------|--------|---------| | [eks](#module\_eks) | ../.. | | +| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 | ## Resources @@ -51,8 +54,10 @@ Note that this example may create resources which cost money. Run `terraform des | [random_string.suffix](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/string) | resource | | [tls_private_key.nodes](https://registry.terraform.io/providers/hashicorp/tls/latest/docs/resources/private_key) | resource | | [aws_ami.bottlerocket_ami](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source | -| [aws_subnet_ids.default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/subnet_ids) | data source | -| [aws_vpc.default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/vpc) | data source | +| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | +| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source | +| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | +| [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source | ## Inputs @@ -67,5 +72,4 @@ No inputs. | [config\_map\_aws\_auth](#output\_config\_map\_aws\_auth) | A kubernetes configuration to authenticate to this EKS cluster. | | [kubectl\_config](#output\_kubectl\_config) | kubectl config as generated by the module. | | [node\_groups](#output\_node\_groups) | Outputs from node groups | -| [region](#output\_region) | AWS region. | diff --git a/examples/bottlerocket/main.tf b/examples/bottlerocket/main.tf index 9631ef5bc7..bffa83238b 100644 --- a/examples/bottlerocket/main.tf +++ b/examples/bottlerocket/main.tf @@ -3,21 +3,30 @@ provider "aws" { } locals { - region = "eu-west-1" - k8s_version = "1.21" + name = "bottlerocket-${random_string.suffix.result}" + cluster_version = "1.20" + region = "eu-west-1" } +################################################################################ +# EKS Module +################################################################################ + module "eks" { source = "../.." - cluster_name = "bottlerocket-${random_string.suffix.result}" - cluster_version = local.k8s_version + cluster_name = local.name + cluster_version = local.cluster_version - vpc_id = data.aws_vpc.default.id - subnets = data.aws_subnet_ids.default.ids + vpc_id = module.vpc.vpc_id + subnets = [module.vpc.private_subnets[0], module.vpc.public_subnets[1]] + fargate_subnets = [module.vpc.private_subnets[2]] + + cluster_endpoint_private_access = true + cluster_endpoint_public_access = true write_kubeconfig = false - manage_aws_auth = false + manage_aws_auth = true worker_groups_launch_template = [ { @@ -40,7 +49,7 @@ module "eks" { userdata_template_extra_args = { enable_admin_container = false enable_control_container = true - aws_region = local.region + aws_region = data.aws_region.current.name } # example of k8s/kubelet configuration via additional_userdata additional_userdata = < [terraform](#requirement\_terraform) | >= 0.13.1 | | [aws](#requirement\_aws) | >= 3.22.0 | -| [kubernetes](#requirement\_kubernetes) | >= 1.11 | +| [kubernetes](#requirement\_kubernetes) | ~> 2.0 | | [local](#requirement\_local) | >= 1.4 | | [random](#requirement\_random) | >= 2.1 | @@ -34,7 +34,7 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Version | |------|---------| | [aws](#provider\_aws) | >= 3.22.0 | -| [terraform](#provider\_terraform) | n/a | +| [random](#provider\_random) | >= 2.1 | ## Modules @@ -44,6 +44,7 @@ Note that this example may create resources which cost money. Run `terraform des | [disabled\_fargate](#module\_disabled\_fargate) | ../../modules/fargate | | | [disabled\_node\_groups](#module\_disabled\_node\_groups) | ../../modules/node_groups | | | [eks](#module\_eks) | ../.. | | +| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 | ## Resources @@ -52,9 +53,10 @@ Note that this example may create resources which cost money. Run `terraform des | [aws_security_group.all_worker_mgmt](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | | [aws_security_group.worker_group_mgmt_one](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | | [aws_security_group.worker_group_mgmt_two](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | +| [random_string.suffix](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/string) | resource | +| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | | [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source | | [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | -| [terraform_remote_state.bootstrap](https://registry.terraform.io/providers/hashicorp/terraform/latest/docs/data-sources/remote_state) | data source | ## Inputs @@ -69,5 +71,4 @@ No inputs. | [config\_map\_aws\_auth](#output\_config\_map\_aws\_auth) | A kubernetes configuration to authenticate to this EKS cluster. | | [kubectl\_config](#output\_kubectl\_config) | kubectl config as generated by the module. | | [node\_groups](#output\_node\_groups) | Outputs from node groups | -| [region](#output\_region) | AWS region. | diff --git a/examples/complete/main.tf b/examples/complete/main.tf index c0e2c8b679..ba762e8e03 100644 --- a/examples/complete/main.tf +++ b/examples/complete/main.tf @@ -2,15 +2,30 @@ provider "aws" { region = local.region } +locals { + name = "complete-${random_string.suffix.result}" + cluster_version = "1.20" + region = "eu-west-1" +} + +################################################################################ +# EKS Module +################################################################################ + module "eks" { source = "../.." - cluster_name = local.cluster_name - cluster_version = "1.21" + cluster_name = local.name + cluster_version = local.cluster_version + + vpc_id = module.vpc.vpc_id + subnets = [module.vpc.private_subnets[0], module.vpc.public_subnets[1]] + fargate_subnets = [module.vpc.private_subnets[2]] + + cluster_endpoint_private_access = true + cluster_endpoint_public_access = true + - vpc_id = local.vpc.vpc_id - subnets = [local.vpc.private_subnets[0], local.vpc.public_subnets[1]] - fargate_subnets = [local.vpc.private_subnets[2]] worker_additional_security_group_ids = [aws_security_group.all_worker_mgmt.id] @@ -130,15 +145,15 @@ module "eks" { ] tags = { - Environment = "test" - GithubRepo = "terraform-aws-eks" - GithubOrg = "terraform-aws-modules" + Example = local.name + GithubRepo = "terraform-aws-eks" + GithubOrg = "terraform-aws-modules" } } -#################### +################################################################################ # Disabled creation -#################### +################################################################################ module "disabled_eks" { source = "../.." @@ -158,9 +173,9 @@ module "disabled_node_groups" { create_eks = false } -############# -# Kubernetes -############# +################################################################################ +# Kubernetes provider configuration +################################################################################ data "aws_eks_cluster" "cluster" { name = module.eks.cluster_id @@ -177,12 +192,12 @@ provider "kubernetes" { } ################################################################################ -# Supporting resources +# Additional security groups for workers ################################################################################ resource "aws_security_group" "worker_group_mgmt_one" { name_prefix = "worker_group_mgmt_one" - vpc_id = local.vpc.vpc_id + vpc_id = module.vpc.vpc_id ingress { from_port = 22 @@ -197,7 +212,7 @@ resource "aws_security_group" "worker_group_mgmt_one" { resource "aws_security_group" "worker_group_mgmt_two" { name_prefix = "worker_group_mgmt_two" - vpc_id = local.vpc.vpc_id + vpc_id = module.vpc.vpc_id ingress { from_port = 22 @@ -212,7 +227,7 @@ resource "aws_security_group" "worker_group_mgmt_two" { resource "aws_security_group" "all_worker_mgmt" { name_prefix = "all_worker_management" - vpc_id = local.vpc.vpc_id + vpc_id = module.vpc.vpc_id ingress { from_port = 22 @@ -227,21 +242,44 @@ resource "aws_security_group" "all_worker_mgmt" { } } - ################################################################################ -# Supporting resources (managed in "_bootstrap" directory) +# Supporting resources ################################################################################ -data "terraform_remote_state" "bootstrap" { - backend = "local" +data "aws_availability_zones" "available" { +} - config = { - path = "../_bootstrap/terraform.tfstate" - } +resource "random_string" "suffix" { + length = 8 + special = false } -locals { - region = data.terraform_remote_state.bootstrap.outputs.region - cluster_name = data.terraform_remote_state.bootstrap.outputs.cluster_name - vpc = data.terraform_remote_state.bootstrap.outputs.vpc +module "vpc" { + source = "terraform-aws-modules/vpc/aws" + version = "~> 3.0" + + name = local.name + cidr = "10.0.0.0/16" + azs = data.aws_availability_zones.available.names + private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"] + public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"] + enable_nat_gateway = true + single_nat_gateway = true + enable_dns_hostnames = true + + public_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/elb" = "1" + } + + private_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/internal-elb" = "1" + } + + tags = { + Example = local.name + GithubRepo = "terraform-aws-eks" + GithubOrg = "terraform-aws-modules" + } } diff --git a/examples/complete/outputs.tf b/examples/complete/outputs.tf index 356410219f..10a3a96604 100644 --- a/examples/complete/outputs.tf +++ b/examples/complete/outputs.tf @@ -1,8 +1,3 @@ -output "region" { - description = "AWS region." - value = local.region -} - output "cluster_endpoint" { description = "Endpoint for EKS control plane." value = module.eks.cluster_endpoint diff --git a/examples/complete/variables.tf b/examples/complete/variables.tf index 8b13789179..e69de29bb2 100644 --- a/examples/complete/variables.tf +++ b/examples/complete/variables.tf @@ -1 +0,0 @@ - diff --git a/examples/complete/versions.tf b/examples/complete/versions.tf index 120d873e2f..bbcf893252 100644 --- a/examples/complete/versions.tf +++ b/examples/complete/versions.tf @@ -5,6 +5,6 @@ terraform { aws = ">= 3.22.0" local = ">= 1.4" random = ">= 2.1" - kubernetes = ">= 1.11" + kubernetes = "~> 2.0" } } diff --git a/examples/fargate/README.md b/examples/fargate/README.md index 6abd31b524..b4e79abdb7 100644 --- a/examples/fargate/README.md +++ b/examples/fargate/README.md @@ -1,8 +1,9 @@ # AWS EKS Cluster with Fargate profiles Configuration in this directory creates EKS cluster with Fargate profiles in two different ways: + - Using a root module, where EKS Cluster and Fargate profiles should be created at once. This is the default behaviour for most users. -- Using `modules/fargate` submodule where Fargate profiles should be attached to the barebone EKS Cluster. +- Using `modules/fargate` submodule where Fargate profiles should be attached to the existing EKS Cluster. ## Usage @@ -23,7 +24,7 @@ Note that this example may create resources which cost money. Run `terraform des |------|---------| | [terraform](#requirement\_terraform) | >= 0.13.1 | | [aws](#requirement\_aws) | >= 3.22.0 | -| [kubernetes](#requirement\_kubernetes) | >= 1.11 | +| [kubernetes](#requirement\_kubernetes) | ~> 2.0 | | [local](#requirement\_local) | >= 1.4 | | [random](#requirement\_random) | >= 2.1 | @@ -32,25 +33,24 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Version | |------|---------| | [aws](#provider\_aws) | >= 3.22.0 | -| [terraform](#provider\_terraform) | n/a | +| [random](#provider\_random) | >= 2.1 | ## Modules | Name | Source | Version | |------|--------|---------| -| [barebone\_eks](#module\_barebone\_eks) | ../.. | | | [eks](#module\_eks) | ../.. | | | [fargate\_profile\_existing\_cluster](#module\_fargate\_profile\_existing\_cluster) | ../../modules/fargate | | +| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 | ## Resources | Name | Type | |------|------| -| [aws_eks_cluster.barebone](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source | +| [random_string.suffix](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/string) | resource | +| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | | [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source | -| [aws_eks_cluster_auth.barebone](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | | [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | -| [terraform_remote_state.bootstrap](https://registry.terraform.io/providers/hashicorp/terraform/latest/docs/data-sources/remote_state) | data source | ## Inputs diff --git a/examples/fargate/main.tf b/examples/fargate/main.tf index 0901843445..869ed7e991 100644 --- a/examples/fargate/main.tf +++ b/examples/fargate/main.tf @@ -2,15 +2,50 @@ provider "aws" { region = local.region } +locals { + name = "fargate-${random_string.suffix.result}" + cluster_version = "1.20" + region = "eu-west-1" +} + +################################################################################ +# EKS Module +################################################################################ + module "eks" { source = "../.." - cluster_name = local.cluster_name - cluster_version = "1.21" + cluster_name = local.name + cluster_version = local.cluster_version + + vpc_id = module.vpc.vpc_id + subnets = [module.vpc.private_subnets[0], module.vpc.public_subnets[1]] + fargate_subnets = [module.vpc.private_subnets[2]] + + cluster_endpoint_private_access = true + cluster_endpoint_public_access = true - vpc_id = local.vpc.vpc_id - subnets = [local.vpc.private_subnets[0], local.vpc.public_subnets[1]] - fargate_subnets = [local.vpc.private_subnets[2]] + # You require a node group to schedule coredns which is critical for running correctly internal DNS. + # If you want to use only fargate you must follow docs `(Optional) Update CoreDNS` + # available under https://docs.aws.amazon.com/eks/latest/userguide/fargate-getting-started.html + node_groups = { + example = { + desired_capacity = 1 + + instance_types = ["t3.large"] + k8s_labels = { + Example = "managed_node_groups" + GithubRepo = "terraform-aws-eks" + GithubOrg = "terraform-aws-modules" + } + additional_tags = { + ExtraTag = "example" + } + update_config = { + max_unavailable_percentage = 50 # or set `max_unavailable` + } + } + } fargate_profiles = { default = { @@ -49,7 +84,7 @@ module "eks" { ] # Using specific subnets instead of the ones configured in EKS (`subnets` and `fargate_subnets`) - subnets = [local.vpc.private_subnets[1]] + subnets = [module.vpc.private_subnets[1]] tags = { Owner = "secondary" @@ -60,12 +95,13 @@ module "eks" { manage_aws_auth = false tags = { - Environment = "test" - GithubRepo = "terraform-aws-eks" - GithubOrg = "terraform-aws-modules" + Example = local.name + GithubRepo = "terraform-aws-eks" + GithubOrg = "terraform-aws-modules" } } + ############################################## # Calling submodule with existing EKS cluster ############################################## @@ -73,8 +109,8 @@ module "eks" { module "fargate_profile_existing_cluster" { source = "../../modules/fargate" - cluster_name = module.barebone_eks.cluster_id - subnets = [local.vpc.private_subnets[0], local.vpc.private_subnets[1]] + cluster_name = module.eks.cluster_id + subnets = [module.vpc.private_subnets[0], module.vpc.private_subnets[2]] fargate_profiles = { profile1 = { @@ -95,7 +131,8 @@ module "fargate_profile_existing_cluster" { ] tags = { - Owner = "profile1" + Owner = "profile1" + submodule = "true" } } @@ -111,22 +148,25 @@ module "fargate_profile_existing_cluster" { ] # Using specific subnets instead of the ones configured in EKS (`subnets` and `fargate_subnets`) - subnets = [local.vpc.private_subnets[1]] + subnets = [module.vpc.private_subnets[0]] tags = { - Owner = "profile2" + Owner = "profile2" + submodule = "true" } } } tags = { - DoYouLoveFargate = "Yes" + Example = local.name + GithubRepo = "terraform-aws-eks" + GithubOrg = "terraform-aws-modules" } } -############# -# Kubernetes -############# +################################################################################ +# Kubernetes provider configuration +################################################################################ data "aws_eks_cluster" "cluster" { name = module.eks.cluster_id @@ -142,60 +182,45 @@ provider "kubernetes" { token = data.aws_eks_cluster_auth.cluster.token } -############################################################ -# Barebone EKS Cluster where submodules can add extra stuff -############################################################ - -module "barebone_eks" { - source = "../.." - - cluster_name = "barebone-${local.cluster_name}" - cluster_version = "1.21" - - vpc_id = local.vpc.vpc_id - subnets = local.vpc.private_subnets - - tags = { - Environment = "test" - Barebone = "yes_please" - } -} - -############# -# Kubernetes -############# - -data "aws_eks_cluster" "barebone" { - name = module.barebone_eks.cluster_id -} +################################################################################ +# Supporting Resources +################################################################################ -data "aws_eks_cluster_auth" "barebone" { - name = module.barebone_eks.cluster_id +data "aws_availability_zones" "available" { } -provider "kubernetes" { - alias = "barebone" - - host = data.aws_eks_cluster.barebone.endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.barebone.certificate_authority[0].data) - token = data.aws_eks_cluster_auth.barebone.token +resource "random_string" "suffix" { + length = 8 + special = false } +module "vpc" { + source = "terraform-aws-modules/vpc/aws" + version = "~> 3.0" + + name = local.name + cidr = "10.0.0.0/16" + azs = data.aws_availability_zones.available.names + private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"] + public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"] + enable_nat_gateway = true + single_nat_gateway = true + enable_dns_hostnames = true + + public_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/elb" = "1" + } -################################################################################ -# Supporting resources (managed in "_bootstrap" directory) -################################################################################ - -data "terraform_remote_state" "bootstrap" { - backend = "local" + private_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/internal-elb" = "1" + } - config = { - path = "../_bootstrap/terraform.tfstate" + tags = { + Example = local.name + GithubRepo = "terraform-aws-eks" + GithubOrg = "terraform-aws-modules" } } -locals { - region = data.terraform_remote_state.bootstrap.outputs.region - cluster_name = data.terraform_remote_state.bootstrap.outputs.cluster_name - vpc = data.terraform_remote_state.bootstrap.outputs.vpc -} diff --git a/examples/fargate/versions.tf b/examples/fargate/versions.tf index 120d873e2f..bbcf893252 100644 --- a/examples/fargate/versions.tf +++ b/examples/fargate/versions.tf @@ -5,6 +5,6 @@ terraform { aws = ">= 3.22.0" local = ">= 1.4" random = ">= 2.1" - kubernetes = ">= 1.11" + kubernetes = "~> 2.0" } } diff --git a/examples/instance_refresh/README.md b/examples/instance_refresh/README.md new file mode 100644 index 0000000000..73b836d689 --- /dev/null +++ b/examples/instance_refresh/README.md @@ -0,0 +1,82 @@ +# Instance refresh example + +This is EKS example using [instance refresh](https://aws.amazon.com/blogs/compute/introducing-instance-refresh-for-ec2-auto-scaling/) feature for worker groups. + +See [the official documentation](https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-instance-refresh.html) for more details. + +## Usage + +To run this example you need to execute: + +```bash +$ terraform init +$ terraform plan +$ terraform apply +``` + +Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources. + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 0.13.1 | +| [aws](#requirement\_aws) | >= 3.22.0 | +| [helm](#requirement\_helm) | ~> 2.0 | +| [kubernetes](#requirement\_kubernetes) | ~> 2.0 | +| [local](#requirement\_local) | >= 1.4 | +| [random](#requirement\_random) | >= 2.1 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 3.22.0 | +| [helm](#provider\_helm) | ~> 2.0 | +| [random](#provider\_random) | >= 2.1 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [aws\_node\_termination\_handler\_role](#module\_aws\_node\_termination\_handler\_role) | terraform-aws-modules/iam/aws//modules/iam-assumable-role-with-oidc | 4.1.0 | +| [aws\_node\_termination\_handler\_sqs](#module\_aws\_node\_termination\_handler\_sqs) | terraform-aws-modules/sqs/aws | ~> 3.0.0 | +| [eks](#module\_eks) | ../.. | | +| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 | + +## Resources + +| Name | Type | +|------|------| +| [aws_autoscaling_lifecycle_hook.aws_node_termination_handler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/autoscaling_lifecycle_hook) | resource | +| [aws_cloudwatch_event_rule.aws_node_termination_handler_asg](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_rule) | resource | +| [aws_cloudwatch_event_rule.aws_node_termination_handler_spot](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_rule) | resource | +| [aws_cloudwatch_event_target.aws_node_termination_handler_asg](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_target) | resource | +| [aws_cloudwatch_event_target.aws_node_termination_handler_spot](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_target) | resource | +| [aws_iam_policy.aws_node_termination_handler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | +| [helm_release.aws_node_termination_handler](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource | +| [random_string.suffix](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/string) | resource | +| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | +| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | +| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source | +| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | +| [aws_iam_policy_document.aws_node_termination_handler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_iam_policy_document.aws_node_termination_handler_events](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source | + +## Inputs + +No inputs. + +## Outputs + +| Name | Description | +|------|-------------| +| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for EKS control plane. | +| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Security group ids attached to the cluster control plane. | +| [config\_map\_aws\_auth](#output\_config\_map\_aws\_auth) | A kubernetes configuration to authenticate to this EKS cluster. | +| [kubectl\_config](#output\_kubectl\_config) | kubectl config as generated by the module. | +| [sqs\_queue\_asg\_notification\_arn](#output\_sqs\_queue\_asg\_notification\_arn) | SQS queue ASG notification ARN | +| [sqs\_queue\_asg\_notification\_url](#output\_sqs\_queue\_asg\_notification\_url) | SQS queue ASG notification URL | + diff --git a/examples/instance_refresh/main.tf b/examples/instance_refresh/main.tf index 54735fcddd..f32964b1d7 100644 --- a/examples/instance_refresh/main.tf +++ b/examples/instance_refresh/main.tf @@ -1,24 +1,18 @@ -# Based on the official aws-node-termination-handler setup guide at https://github.com/aws/aws-node-termination-handler#infrastructure-setup - provider "aws" { region = local.region } -data "aws_caller_identity" "current" {} - -data "aws_eks_cluster" "cluster" { - name = module.eks.cluster_id +locals { + name = "instance_refresh-${random_string.suffix.result}" + cluster_version = "1.20" + region = "eu-west-1" } -data "aws_eks_cluster_auth" "cluster" { - name = module.eks.cluster_id -} +################################################################################ +# EKS Module +################################################################################ -provider "kubernetes" { - host = data.aws_eks_cluster.cluster.endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data) - token = data.aws_eks_cluster_auth.cluster.token -} +# Based on the official aws-node-termination-handler setup guide at https://github.com/aws/aws-node-termination-handler#infrastructure-setup provider "helm" { kubernetes { @@ -28,29 +22,7 @@ provider "helm" { } } -data "aws_availability_zones" "available" { -} - -locals { - cluster_name = "test-refresh-${random_string.suffix.result}" - region = "eu-west-1" -} - -resource "random_string" "suffix" { - length = 8 - special = false -} - -module "vpc" { - source = "terraform-aws-modules/vpc/aws" - version = "~> 3.0.0" - - name = local.cluster_name - cidr = "10.0.0.0/16" - azs = data.aws_availability_zones.available.names - public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"] - enable_dns_hostnames = true -} +data "aws_caller_identity" "current" {} data "aws_iam_policy_document" "aws_node_termination_handler" { statement { @@ -84,10 +56,12 @@ data "aws_iam_policy_document" "aws_node_termination_handler" { } resource "aws_iam_policy" "aws_node_termination_handler" { - name = "${local.cluster_name}-aws-node-termination-handler" + name = "${local.name}-aws-node-termination-handler" policy = data.aws_iam_policy_document.aws_node_termination_handler.json } +data "aws_region" "current" {} + data "aws_iam_policy_document" "aws_node_termination_handler_events" { statement { effect = "Allow" @@ -102,7 +76,7 @@ data "aws_iam_policy_document" "aws_node_termination_handler_events" { "sqs:SendMessage", ] resources = [ - "arn:aws:sqs:${local.region}:${data.aws_caller_identity.current.account_id}:${local.cluster_name}", + "arn:aws:sqs:${data.aws_region.current.name}:${data.aws_caller_identity.current.account_id}:${local.name}", ] } } @@ -110,13 +84,13 @@ data "aws_iam_policy_document" "aws_node_termination_handler_events" { module "aws_node_termination_handler_sqs" { source = "terraform-aws-modules/sqs/aws" version = "~> 3.0.0" - name = local.cluster_name + name = local.name message_retention_seconds = 300 policy = data.aws_iam_policy_document.aws_node_termination_handler_events.json } resource "aws_cloudwatch_event_rule" "aws_node_termination_handler_asg" { - name = "${local.cluster_name}-asg-termination" + name = "${local.name}-asg-termination" description = "Node termination event rule" event_pattern = jsonencode( { @@ -132,13 +106,13 @@ resource "aws_cloudwatch_event_rule" "aws_node_termination_handler_asg" { } resource "aws_cloudwatch_event_target" "aws_node_termination_handler_asg" { - target_id = "${local.cluster_name}-asg-termination" + target_id = "${local.name}-asg-termination" rule = aws_cloudwatch_event_rule.aws_node_termination_handler_asg.name arn = module.aws_node_termination_handler_sqs.sqs_queue_arn } resource "aws_cloudwatch_event_rule" "aws_node_termination_handler_spot" { - name = "${local.cluster_name}-spot-termination" + name = "${local.name}-spot-termination" description = "Node termination event rule" event_pattern = jsonencode( { @@ -154,7 +128,7 @@ resource "aws_cloudwatch_event_rule" "aws_node_termination_handler_spot" { } resource "aws_cloudwatch_event_target" "aws_node_termination_handler_spot" { - target_id = "${local.cluster_name}-spot-termination" + target_id = "${local.name}-spot-termination" rule = aws_cloudwatch_event_rule.aws_node_termination_handler_spot.name arn = module.aws_node_termination_handler_sqs.sqs_queue_arn } @@ -163,11 +137,11 @@ module "aws_node_termination_handler_role" { source = "terraform-aws-modules/iam/aws//modules/iam-assumable-role-with-oidc" version = "4.1.0" create_role = true - role_description = "IRSA role for ANTH, cluster ${local.cluster_name}" - role_name_prefix = local.cluster_name + role_description = "IRSA role for ANTH, cluster ${local.name}" + role_name_prefix = local.name provider_url = replace(module.eks.cluster_oidc_issuer_url, "https://", "") role_policy_arns = [aws_iam_policy.aws_node_termination_handler.arn] - oidc_fully_qualified_subjects = ["system:serviceaccount:${var.namespace}:${var.serviceaccount}"] + oidc_fully_qualified_subjects = ["system:serviceaccount:kube-system:aws-node-termination-handler"] } resource "helm_release" "aws_node_termination_handler" { @@ -176,19 +150,19 @@ resource "helm_release" "aws_node_termination_handler" { ] name = "aws-node-termination-handler" - namespace = var.namespace + namespace = "kube-system" repository = "https://aws.github.io/eks-charts" chart = "aws-node-termination-handler" - version = var.aws_node_termination_handler_chart_version + version = "0.15.0" create_namespace = true set { name = "awsRegion" - value = local.region + value = data.aws_region.current.name } set { name = "serviceAccount.name" - value = var.serviceaccount + value = "aws-node-termination-handler" } set { name = "serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn" @@ -226,12 +200,18 @@ resource "aws_autoscaling_lifecycle_hook" "aws_node_termination_handler" { } module "eks" { - source = "../.." - cluster_name = local.cluster_name - cluster_version = "1.20" - subnets = module.vpc.public_subnets - vpc_id = module.vpc.vpc_id - enable_irsa = true + source = "../.." + + cluster_name = local.name + cluster_version = local.cluster_version + + vpc_id = module.vpc.vpc_id + subnets = module.vpc.private_subnets + + cluster_endpoint_private_access = true + cluster_endpoint_public_access = true + + enable_irsa = true worker_groups_launch_template = [ { name = "refresh" @@ -257,4 +237,70 @@ module "eks" { ] } ] + + tags = { + Example = local.name + GithubRepo = "terraform-aws-eks" + GithubOrg = "terraform-aws-modules" + } +} + +################################################################################ +# Kubernetes provider configuration +################################################################################ + +data "aws_eks_cluster" "cluster" { + name = module.eks.cluster_id +} + +data "aws_eks_cluster_auth" "cluster" { + name = module.eks.cluster_id +} + +provider "kubernetes" { + host = data.aws_eks_cluster.cluster.endpoint + cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data) + token = data.aws_eks_cluster_auth.cluster.token +} + +################################################################################ +# Supporting Resources +################################################################################ + +data "aws_availability_zones" "available" { +} + +resource "random_string" "suffix" { + length = 8 + special = false +} + +module "vpc" { + source = "terraform-aws-modules/vpc/aws" + version = "~> 3.0" + + name = local.name + cidr = "10.0.0.0/16" + azs = data.aws_availability_zones.available.names + private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"] + public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"] + enable_nat_gateway = true + single_nat_gateway = true + enable_dns_hostnames = true + + public_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/elb" = "1" + } + + private_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/internal-elb" = "1" + } + + tags = { + Example = local.name + GithubRepo = "terraform-aws-eks" + GithubOrg = "terraform-aws-modules" + } } diff --git a/examples/instance_refresh/variables.tf b/examples/instance_refresh/variables.tf index 60b24336b3..e69de29bb2 100644 --- a/examples/instance_refresh/variables.tf +++ b/examples/instance_refresh/variables.tf @@ -1,17 +0,0 @@ -variable "aws_node_termination_handler_chart_version" { - description = "Version of the aws-node-termination-handler Helm chart to install." - type = string - default = "0.15.0" -} - -variable "namespace" { - description = "Namespace for the aws-node-termination-handler." - type = string - default = "kube-system" -} - -variable "serviceaccount" { - description = "Serviceaccount for the aws-node-termination-handler." - type = string - default = "aws-node-termination-handler" -} diff --git a/examples/instance_refresh/versions.tf b/examples/instance_refresh/versions.tf index 67281c8d51..f546ca0cf0 100644 --- a/examples/instance_refresh/versions.tf +++ b/examples/instance_refresh/versions.tf @@ -5,7 +5,7 @@ terraform { aws = ">= 3.22.0" local = ">= 1.4" random = ">= 2.1" - kubernetes = "~> 1.11" - helm = "~> 2.1.2" + kubernetes = "~> 2.0" + helm = "~> 2.0" } } diff --git a/examples/irsa/README.md b/examples/irsa/README.md index ab81f649d7..770ca3f902 100644 --- a/examples/irsa/README.md +++ b/examples/irsa/README.md @@ -2,64 +2,69 @@ This example shows how to create an IAM role to be used for a Kubernetes `ServiceAccount`. It will create a policy and role to be used by the [cluster-autoscaler](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler) using the [public Helm chart](https://github.com/kubernetes/autoscaler/tree/master/charts/cluster-autoscaler). -The AWS documentation for IRSA is here: https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html +See [the official documentation](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html) for more details. -## Setup +## Usage -Run Terraform: +To run this example you need to execute: -``` -terraform init -terraform apply +```bash +$ terraform init +$ terraform plan +$ terraform apply ``` -Set kubectl context to the new cluster: `export KUBECONFIG=kubeconfig_test-eks-irsa` +Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources. -Check that there is a node that is `Ready`: + +## Requirements -``` -$ kubectl get nodes -NAME STATUS ROLES AGE VERSION -ip-10-0-2-190.us-west-2.compute.internal Ready 6m39s v1.14.8-eks-b8860f -``` +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 0.13.1 | +| [aws](#requirement\_aws) | >= 3.22.0 | +| [helm](#requirement\_helm) | ~> 2.0 | +| [kubernetes](#requirement\_kubernetes) | ~> 2.0 | +| [local](#requirement\_local) | >= 1.4 | +| [random](#requirement\_random) | >= 2.1 | -Replace `` with your AWS account ID in `cluster-autoscaler-chart-values.yaml`. There is output from terraform for this. +## Providers -Install the chart using the provided values file: +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 3.22.0 | +| [helm](#provider\_helm) | ~> 2.0 | +| [random](#provider\_random) | >= 2.1 | -``` -$ helm repo add autoscaler https://kubernetes.github.io/autoscaler -$ helm repo update -$ helm install cluster-autoscaler --namespace kube-system autoscaler/cluster-autoscaler --values cluster-autoscaler-chart-values.yaml -``` +## Modules -## Verify +| Name | Source | Version | +|------|--------|---------| +| [eks](#module\_eks) | ../.. | | +| [iam\_assumable\_role\_admin](#module\_iam\_assumable\_role\_admin) | terraform-aws-modules/iam/aws//modules/iam-assumable-role-with-oidc | ~> 4.0 | +| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 | -Ensure the cluster-autoscaler pod is running: +## Resources -``` -$ kubectl --namespace=kube-system get pods -l "app.kubernetes.io/name=aws-cluster-autoscaler-chart" -NAME READY STATUS RESTARTS AGE -cluster-autoscaler-aws-cluster-autoscaler-chart-5545d4b97-9ztpm 1/1 Running 0 3m -``` +| Name | Type | +|------|------| +| [aws_iam_policy.cluster_autoscaler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | +| [helm_release.cluster-autoscaler](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource | +| [random_string.suffix](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/string) | resource | +| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | +| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | +| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source | +| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | +| [aws_iam_policy_document.cluster_autoscaler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source | -Observe the `AWS_*` environment variables that were added to the pod automatically by EKS: +## Inputs -``` -kubectl --namespace=kube-system get pods -l "app.kubernetes.io/name=aws-cluster-autoscaler-chart" -o yaml | grep -A3 AWS_ROLE_ARN - -- name: AWS_ROLE_ARN - value: arn:aws:iam::xxxxxxxxx:role/cluster-autoscaler -- name: AWS_WEB_IDENTITY_TOKEN_FILE - value: /var/run/secrets/eks.amazonaws.com/serviceaccount/token -``` +No inputs. -Verify it is working by checking the logs, you should see that it has discovered the autoscaling group successfully: +## Outputs -``` -kubectl --namespace=kube-system logs -l "app.kubernetes.io/name=aws-cluster-autoscaler-chart" - -I0128 14:59:00.901513 1 auto_scaling_groups.go:354] Regenerating instance to ASG map for ASGs: [test-eks-irsa-worker-group-12020012814125354700000000e] -I0128 14:59:00.969875 1 auto_scaling_groups.go:138] Registering ASG test-eks-irsa-worker-group-12020012814125354700000000e -I0128 14:59:00.969906 1 aws_manager.go:263] Refreshed ASG list, next refresh after 2020-01-28 15:00:00.969901767 +0000 UTC m=+61.310501783 -``` +| Name | Description | +|------|-------------| +| [aws\_account\_id](#output\_aws\_account\_id) | IAM AWS account id | + diff --git a/examples/irsa/cluster-autoscaler-chart-values.yaml b/examples/irsa/cluster-autoscaler-chart-values.yaml deleted file mode 100644 index 4e5494de30..0000000000 --- a/examples/irsa/cluster-autoscaler-chart-values.yaml +++ /dev/null @@ -1,14 +0,0 @@ -awsRegion: eu-west-1 - -rbac: - create: true - serviceAccount: - # This value should match local.k8s_service_account_name in locals.tf - name: cluster-autoscaler-aws-cluster-autoscaler-chart - annotations: - # This value should match the ARN of the role created by module.iam_assumable_role_admin in irsa.tf - eks.amazonaws.com/role-arn: "arn:aws:iam:::role/cluster-autoscaler" - -autoDiscovery: - clusterName: test-eks-irsa - enabled: true diff --git a/examples/irsa/irsa.tf b/examples/irsa/irsa.tf index 7bb9f7f258..a36d0e3394 100644 --- a/examples/irsa/irsa.tf +++ b/examples/irsa/irsa.tf @@ -1,6 +1,62 @@ +data "aws_caller_identity" "current" {} + +data "aws_region" "current" {} + +locals { + k8s_service_account_namespace = "kube-system" + k8s_service_account_name = "cluster-autoscaler-aws" +} + +provider "helm" { + kubernetes { + host = data.aws_eks_cluster.cluster.endpoint + cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data) + token = data.aws_eks_cluster_auth.cluster.token + } +} + +resource "helm_release" "cluster-autoscaler" { + depends_on = [ + module.eks + ] + + name = "cluster-autoscaler" + namespace = local.k8s_service_account_namespace + repository = "https://kubernetes.github.io/autoscaler" + chart = "cluster-autoscaler" + version = "9.10.7" + create_namespace = false + + set { + name = "awsRegion" + value = data.aws_region.current.name + } + set { + name = "rbac.serviceAccount.name" + value = local.k8s_service_account_name + } + set { + name = "rbac.serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn" + value = module.iam_assumable_role_admin.iam_role_arn + type = "string" + } + set { + name = "autoDiscovery.clusterName" + value = local.name + } + set { + name = "autoDiscovery.enabled" + value = "true" + } + set { + name = "rbac.create" + value = "true" + } +} + module "iam_assumable_role_admin" { source = "terraform-aws-modules/iam/aws//modules/iam-assumable-role-with-oidc" - version = "3.6.0" + version = "~> 4.0" create_role = true role_name = "cluster-autoscaler" diff --git a/examples/irsa/locals.tf b/examples/irsa/locals.tf deleted file mode 100644 index a0e5da0c28..0000000000 --- a/examples/irsa/locals.tf +++ /dev/null @@ -1,5 +0,0 @@ -locals { - cluster_name = "test-eks-irsa" - k8s_service_account_namespace = "kube-system" - k8s_service_account_name = "cluster-autoscaler-aws-cluster-autoscaler-chart" -} diff --git a/examples/irsa/main.tf b/examples/irsa/main.tf index c9ea505900..ebe2eeffea 100644 --- a/examples/irsa/main.tf +++ b/examples/irsa/main.tf @@ -1,53 +1,37 @@ provider "aws" { - region = "eu-west-1" + region = local.region } -data "aws_eks_cluster" "cluster" { - name = module.eks.cluster_id +locals { + name = "irsa-${random_string.suffix.result}" + cluster_version = "1.20" + region = "eu-west-1" } -data "aws_eks_cluster_auth" "cluster" { - name = module.eks.cluster_id -} +################################################################################ +# EKS Module +################################################################################ -provider "kubernetes" { - host = data.aws_eks_cluster.cluster.endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data) - token = data.aws_eks_cluster_auth.cluster.token -} +module "eks" { + source = "../.." -data "aws_availability_zones" "available" {} + cluster_name = local.name + cluster_version = local.cluster_version -data "aws_caller_identity" "current" {} + vpc_id = module.vpc.vpc_id + subnets = module.vpc.private_subnets -module "vpc" { - source = "terraform-aws-modules/vpc/aws" - version = "2.64.0" - name = "test-vpc" - cidr = "10.0.0.0/16" - azs = data.aws_availability_zones.available.names - public_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"] - enable_dns_hostnames = true + cluster_endpoint_private_access = true + cluster_endpoint_public_access = true - public_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/elb" = "1" - } -} - -module "eks" { - source = "../.." - cluster_name = local.cluster_name - cluster_version = "1.20" - subnets = module.vpc.public_subnets - vpc_id = module.vpc.vpc_id - enable_irsa = true + enable_irsa = true worker_groups = [ { name = "worker-group-1" instance_type = "t3.medium" asg_desired_capacity = 1 + asg_max_size = 4 tags = [ { "key" = "k8s.io/cluster-autoscaler/enabled" @@ -55,11 +39,77 @@ module "eks" { "value" = "true" }, { - "key" = "k8s.io/cluster-autoscaler/${local.cluster_name}" + "key" = "k8s.io/cluster-autoscaler/${local.name}" "propagate_at_launch" = "false" "value" = "owned" } ] } ] + tags = { + Example = local.name + GithubRepo = "terraform-aws-eks" + GithubOrg = "terraform-aws-modules" + } +} + +################################################################################ +# Kubernetes provider configuration +################################################################################ + +data "aws_eks_cluster" "cluster" { + name = module.eks.cluster_id +} + +data "aws_eks_cluster_auth" "cluster" { + name = module.eks.cluster_id } + +provider "kubernetes" { + host = data.aws_eks_cluster.cluster.endpoint + cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data) + token = data.aws_eks_cluster_auth.cluster.token +} + +################################################################################ +# Supporting Resources +################################################################################ + +data "aws_availability_zones" "available" { +} + +resource "random_string" "suffix" { + length = 8 + special = false +} + +module "vpc" { + source = "terraform-aws-modules/vpc/aws" + version = "~> 3.0" + + name = local.name + cidr = "10.0.0.0/16" + azs = data.aws_availability_zones.available.names + private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"] + public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"] + enable_nat_gateway = true + single_nat_gateway = true + enable_dns_hostnames = true + + public_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/elb" = "1" + } + + private_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/internal-elb" = "1" + } + + tags = { + Example = local.name + GithubRepo = "terraform-aws-eks" + GithubOrg = "terraform-aws-modules" + } +} + diff --git a/examples/irsa/versions.tf b/examples/irsa/versions.tf index 6e29ae8f1b..f546ca0cf0 100644 --- a/examples/irsa/versions.tf +++ b/examples/irsa/versions.tf @@ -5,6 +5,7 @@ terraform { aws = ">= 3.22.0" local = ">= 1.4" random = ">= 2.1" - kubernetes = "~> 1.11" + kubernetes = "~> 2.0" + helm = "~> 2.0" } } diff --git a/examples/_bootstrap/README.md b/examples/launch_templates/README.md similarity index 53% rename from examples/_bootstrap/README.md rename to examples/launch_templates/README.md index 28e34f9c8f..6d1d5e2549 100644 --- a/examples/_bootstrap/README.md +++ b/examples/launch_templates/README.md @@ -1,8 +1,8 @@ -# Various bootstrap resources required for other EKS examples +# Launch templates example -Configuration in this directory creates some resources required in other EKS examples (such as VPC). +This is EKS example using workers launch template with worker groups feature. -The resources created here are free (no NAT gateways here) and they can reside in test AWS account. +See [the official documentation](https://docs.aws.amazon.com/eks/latest/userguide/worker.html) for more details. ## Usage @@ -23,7 +23,8 @@ Note that this example may create resources which cost money. Run `terraform des |------|---------| | [terraform](#requirement\_terraform) | >= 0.13.1 | | [aws](#requirement\_aws) | >= 3.22.0 | -| [kubernetes](#requirement\_kubernetes) | >= 1.11 | +| [kubernetes](#requirement\_kubernetes) | ~> 2.0 | +| [local](#requirement\_local) | >= 1.4 | | [random](#requirement\_random) | >= 2.1 | ## Providers @@ -37,6 +38,7 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Source | Version | |------|--------|---------| +| [eks](#module\_eks) | ../.. | | | [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 | ## Resources @@ -45,6 +47,8 @@ Note that this example may create resources which cost money. Run `terraform des |------|------| | [random_string.suffix](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/string) | resource | | [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | +| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source | +| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | ## Inputs @@ -54,7 +58,8 @@ No inputs. | Name | Description | |------|-------------| -| [cluster\_name](#output\_cluster\_name) | Name of EKS Cluster used in tags for subnets | -| [region](#output\_region) | AWS region | -| [vpc](#output\_vpc) | Complete output of VPC module | +| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for EKS control plane. | +| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Security group ids attached to the cluster control plane. | +| [config\_map\_aws\_auth](#output\_config\_map\_aws\_auth) | A kubernetes configuration to authenticate to this EKS cluster. | +| [kubectl\_config](#output\_kubectl\_config) | kubectl config as generated by the module. | diff --git a/examples/launch_templates/main.tf b/examples/launch_templates/main.tf index 68fc20599b..476ca13d68 100644 --- a/examples/launch_templates/main.tf +++ b/examples/launch_templates/main.tf @@ -1,50 +1,25 @@ provider "aws" { - region = "eu-west-1" -} - -data "aws_eks_cluster" "cluster" { - name = module.eks.cluster_id -} - -data "aws_eks_cluster_auth" "cluster" { - name = module.eks.cluster_id -} - -provider "kubernetes" { - host = data.aws_eks_cluster.cluster.endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data) - token = data.aws_eks_cluster_auth.cluster.token -} - -data "aws_availability_zones" "available" { + region = local.region } locals { - cluster_name = "test-eks-lt-${random_string.suffix.result}" -} - -resource "random_string" "suffix" { - length = 8 - special = false + name = "launch_template-${random_string.suffix.result}" + cluster_version = "1.20" + region = "eu-west-1" } -module "vpc" { - source = "terraform-aws-modules/vpc/aws" - version = "~> 2.47" - - name = "test-vpc-lt" - cidr = "10.0.0.0/16" - azs = data.aws_availability_zones.available.names - public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"] - enable_dns_hostnames = true -} +################################################################################ +# EKS Module +################################################################################ module "eks" { - source = "../.." - cluster_name = local.cluster_name - cluster_version = "1.20" - subnets = module.vpc.public_subnets - vpc_id = module.vpc.vpc_id + source = "../.." + cluster_name = local.name + cluster_version = local.cluster_version + vpc_id = module.vpc.vpc_id + subnets = module.vpc.private_subnets + cluster_endpoint_private_access = true + cluster_endpoint_public_access = true worker_groups_launch_template = [ { @@ -90,4 +65,70 @@ module "eks" { ] }, ] + + tags = { + Example = local.name + GithubRepo = "terraform-aws-eks" + GithubOrg = "terraform-aws-modules" + } +} + +################################################################################ +# Kubernetes provider configuration +################################################################################ + +data "aws_eks_cluster" "cluster" { + name = module.eks.cluster_id +} + +data "aws_eks_cluster_auth" "cluster" { + name = module.eks.cluster_id +} + +provider "kubernetes" { + host = data.aws_eks_cluster.cluster.endpoint + cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data) + token = data.aws_eks_cluster_auth.cluster.token +} + +################################################################################ +# Supporting Resources +################################################################################ + +data "aws_availability_zones" "available" { +} + +resource "random_string" "suffix" { + length = 8 + special = false +} + +module "vpc" { + source = "terraform-aws-modules/vpc/aws" + version = "~> 3.0" + + name = local.name + cidr = "10.0.0.0/16" + azs = data.aws_availability_zones.available.names + private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"] + public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"] + enable_nat_gateway = true + single_nat_gateway = true + enable_dns_hostnames = true + + public_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/elb" = "1" + } + + private_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/internal-elb" = "1" + } + + tags = { + Example = local.name + GithubRepo = "terraform-aws-eks" + GithubOrg = "terraform-aws-modules" + } } diff --git a/examples/launch_templates/outputs.tf b/examples/launch_templates/outputs.tf index 359db3a481..b778ec7926 100644 --- a/examples/launch_templates/outputs.tf +++ b/examples/launch_templates/outputs.tf @@ -1,19 +1,19 @@ -output "cluster_endpoint" { - description = "Endpoint for EKS control plane." - value = module.eks.cluster_endpoint -} - -output "cluster_security_group_id" { - description = "Security group ids attached to the cluster control plane." - value = module.eks.cluster_security_group_id -} - -output "kubectl_config" { - description = "kubectl config as generated by the module." - value = module.eks.kubeconfig -} - -output "config_map_aws_auth" { - description = "A kubernetes configuration to authenticate to this EKS cluster." - value = module.eks.config_map_aws_auth -} +output "cluster_endpoint" { + description = "Endpoint for EKS control plane." + value = module.eks.cluster_endpoint +} + +output "cluster_security_group_id" { + description = "Security group ids attached to the cluster control plane." + value = module.eks.cluster_security_group_id +} + +output "kubectl_config" { + description = "kubectl config as generated by the module." + value = module.eks.kubeconfig +} + +output "config_map_aws_auth" { + description = "A kubernetes configuration to authenticate to this EKS cluster." + value = module.eks.config_map_aws_auth +} diff --git a/examples/launch_templates/pre_userdata.sh b/examples/launch_templates/pre_userdata.sh index 52dd50f28c..4cbf0d114b 100644 --- a/examples/launch_templates/pre_userdata.sh +++ b/examples/launch_templates/pre_userdata.sh @@ -1 +1 @@ -yum update -y +yum update -y diff --git a/examples/launch_templates/versions.tf b/examples/launch_templates/versions.tf index 6e29ae8f1b..9c1dbfa3e8 100644 --- a/examples/launch_templates/versions.tf +++ b/examples/launch_templates/versions.tf @@ -5,6 +5,7 @@ terraform { aws = ">= 3.22.0" local = ">= 1.4" random = ">= 2.1" - kubernetes = "~> 1.11" + kubernetes = "~> 2.0" } } + diff --git a/examples/launch_templates_with_managed_node_groups/README.md b/examples/launch_templates_with_managed_node_groups/README.md new file mode 100644 index 0000000000..e79ff558f9 --- /dev/null +++ b/examples/launch_templates_with_managed_node_groups/README.md @@ -0,0 +1,70 @@ +# Launch template with managed groups example + +This is EKS example using workers custom launch template with managed groups feature in two different ways: + +- Using a defined existing launch template created outside module +- Using dlaunch template which will be created by module with user customization + +See [the official documentation](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) for more details. + +## Usage + +To run this example you need to execute: + +```bash +$ terraform init +$ terraform plan +$ terraform apply +``` + +Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources. + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 0.13.1 | +| [aws](#requirement\_aws) | >= 3.22.0 | +| [kubernetes](#requirement\_kubernetes) | ~> 2.0 | +| [local](#requirement\_local) | >= 1.4 | +| [random](#requirement\_random) | >= 2.1 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 3.22.0 | +| [random](#provider\_random) | >= 2.1 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [eks](#module\_eks) | ../.. | | +| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 | + +## Resources + +| Name | Type | +|------|------| +| [aws_iam_service_linked_role.autoscaling](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_service_linked_role) | resource | +| [aws_launch_template.default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource | +| [random_string.suffix](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/string) | resource | +| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | +| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source | +| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | + +## Inputs + +No inputs. + +## Outputs + +| Name | Description | +|------|-------------| +| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for EKS control plane. | +| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Security group ids attached to the cluster control plane. | +| [config\_map\_aws\_auth](#output\_config\_map\_aws\_auth) | A kubernetes configuration to authenticate to this EKS cluster. | +| [kubectl\_config](#output\_kubectl\_config) | kubectl config as generated by the module. | + diff --git a/examples/launch_templates_with_managed_node_groups/launchtemplate.tf b/examples/launch_templates_with_managed_node_groups/launchtemplate.tf index a2840ebc77..0f0e4ebf31 100644 --- a/examples/launch_templates_with_managed_node_groups/launchtemplate.tf +++ b/examples/launch_templates_with_managed_node_groups/launchtemplate.tf @@ -2,7 +2,7 @@ # template = file("${path.module}/templates/userdata.sh.tpl") # # vars = { -# cluster_name = local.cluster_name +# cluster_name = local.name # endpoint = module.eks.cluster_endpoint # cluster_auth_base64 = module.eks.cluster_certificate_authority_data # @@ -17,6 +17,7 @@ # # Trivia: AWS transparently creates a copy of your LaunchTemplate and actually uses that copy then for the node group. If you DONT use a custom AMI, # then the default user-data for bootstrapping a cluster is merged in the copy. + resource "aws_launch_template" "default" { name_prefix = "eks-example-" description = "Default Launch-Template" @@ -59,22 +60,21 @@ resource "aws_launch_template" "default" { # data.template_file.launch_template_userdata.rendered, # ) - # Supplying custom tags to EKS instances is another use-case for LaunchTemplates tag_specifications { resource_type = "instance" tags = { - CustomTag = "EKS example" + CustomTag = "Instance custom tag" } } - # Supplying custom tags to EKS instances root volumes is another use-case for LaunchTemplates. (doesnt add tags to dynamically provisioned volumes via PVC tho) + # Supplying custom tags to EKS instances root volumes is another use-case for LaunchTemplates. (doesnt add tags to dynamically provisioned volumes via PVC) tag_specifications { resource_type = "volume" tags = { - CustomTag = "EKS example" + CustomTag = "Volume custom tag" } } @@ -89,7 +89,7 @@ resource "aws_launch_template" "default" { # Tag the LT itself tags = { - CustomTag = "EKS example" + CustomTag = "Launch template custom tag" } lifecycle { diff --git a/examples/launch_templates_with_managed_node_groups/main.tf b/examples/launch_templates_with_managed_node_groups/main.tf index 833b4b9f29..5d70e97615 100644 --- a/examples/launch_templates_with_managed_node_groups/main.tf +++ b/examples/launch_templates_with_managed_node_groups/main.tf @@ -1,7 +1,91 @@ provider "aws" { - region = "eu-west-1" + region = local.region } +locals { + name = "lt_with_mng-${random_string.suffix.result}" + cluster_version = "1.20" + region = "eu-west-1" +} + +################################################################################ +# EKS Module +################################################################################ + +module "eks" { + source = "../.." + + cluster_name = local.name + cluster_version = local.cluster_version + + vpc_id = module.vpc.vpc_id + subnets = module.vpc.private_subnets + + cluster_endpoint_private_access = true + cluster_endpoint_public_access = true + + node_groups = { + # use arleady defined launch template + example1 = { + name_prefix = "example1" + desired_capacity = 1 + max_capacity = 15 + min_capacity = 1 + + launch_template_id = aws_launch_template.default.id + launch_template_version = aws_launch_template.default.default_version + + instance_types = ["t3.small"] + + additional_tags = { + ExtraTag = "example1" + } + } + # create launch template + example2 = { + create_launch_template = true + desired_capacity = 1 + max_capacity = 10 + min_capacity = 1 + + disk_size = 50 + disk_type = "gp3" + disk_throughput = 150 + disk_iops = 3000 + + instance_types = ["t3.large"] + capacity_type = "SPOT" + k8s_labels = { + GithubRepo = "terraform-aws-eks" + GithubOrg = "terraform-aws-modules" + } + additional_tags = { + ExtraTag = "example2" + } + taints = [ + { + key = "dedicated" + value = "gpuGroup" + effect = "NO_SCHEDULE" + } + ] + update_config = { + max_unavailable_percentage = 50 # or set `max_unavailable` + } + } + } + + tags = { + Example = local.name + GithubRepo = "terraform-aws-eks" + GithubOrg = "terraform-aws-modules" + } +} + +################################################################################ +# Kubernetes provider configuration +################################################################################ + data "aws_eks_cluster" "cluster" { name = module.eks.cluster_id } @@ -16,11 +100,11 @@ provider "kubernetes" { token = data.aws_eks_cluster_auth.cluster.token } -data "aws_availability_zones" "available" { -} +################################################################################ +# Supporting Resources +################################################################################ -locals { - cluster_name = "test-eks-lt-${random_string.suffix.result}" +data "aws_availability_zones" "available" { } resource "random_string" "suffix" { @@ -30,43 +114,30 @@ resource "random_string" "suffix" { module "vpc" { source = "terraform-aws-modules/vpc/aws" - version = "~> 2.47" + version = "~> 3.0" - name = "test-vpc" - cidr = "172.16.0.0/16" + name = local.name + cidr = "10.0.0.0/16" azs = data.aws_availability_zones.available.names - private_subnets = ["172.16.1.0/24", "172.16.2.0/24", "172.16.3.0/24"] - public_subnets = ["172.16.4.0/24", "172.16.5.0/24", "172.16.6.0/24"] + private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"] + public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"] enable_nat_gateway = true single_nat_gateway = true enable_dns_hostnames = true - private_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" # EKS adds this and TF would want to remove then later + public_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/elb" = "1" } -} - -module "eks" { - source = "../.." - cluster_name = local.cluster_name - cluster_version = "1.20" - subnets = module.vpc.private_subnets - vpc_id = module.vpc.vpc_id - - node_groups = { - example = { - desired_capacity = 1 - max_capacity = 15 - min_capacity = 1 - - launch_template_id = aws_launch_template.default.id - launch_template_version = aws_launch_template.default.default_version - instance_types = var.instance_types + private_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/internal-elb" = "1" + } - additional_tags = { - CustomTag = "EKS example" - } - } + tags = { + Example = local.name + GithubRepo = "terraform-aws-eks" + GithubOrg = "terraform-aws-modules" } } diff --git a/examples/launch_templates_with_managed_node_groups/variables.tf b/examples/launch_templates_with_managed_node_groups/variables.tf index 9bd936c6f8..e69de29bb2 100644 --- a/examples/launch_templates_with_managed_node_groups/variables.tf +++ b/examples/launch_templates_with_managed_node_groups/variables.tf @@ -1,6 +0,0 @@ -variable "instance_types" { - description = "Instance types" - # Smallest recommended, where ~1.1Gb of 2Gb memory is available for the Kubernetes pods after ‘warming up’ Docker, Kubelet, and OS - type = list(string) - default = ["t3.small"] -} diff --git a/examples/launch_templates_with_managed_node_groups/versions.tf b/examples/launch_templates_with_managed_node_groups/versions.tf index 6e29ae8f1b..bbcf893252 100644 --- a/examples/launch_templates_with_managed_node_groups/versions.tf +++ b/examples/launch_templates_with_managed_node_groups/versions.tf @@ -5,6 +5,6 @@ terraform { aws = ">= 3.22.0" local = ">= 1.4" random = ">= 2.1" - kubernetes = "~> 1.11" + kubernetes = "~> 2.0" } } diff --git a/examples/managed_node_groups/README.md b/examples/managed_node_groups/README.md new file mode 100644 index 0000000000..8cc2ecd8ae --- /dev/null +++ b/examples/managed_node_groups/README.md @@ -0,0 +1,73 @@ +# Managed groups example + +This is EKS example using managed groups feature in two different ways: + +- Using SPOT instances in node group +- Using ON_DEMAND instance in node group + +See [the official documentation](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html) for more details. + +## Usage + +To run this example you need to execute: + +```bash +$ terraform init +$ terraform plan +$ terraform apply +``` + +Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources. + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 0.13.1 | +| [aws](#requirement\_aws) | >= 3.22.0 | +| [kubernetes](#requirement\_kubernetes) | ~> 2.0 | +| [local](#requirement\_local) | >= 1.4 | +| [random](#requirement\_random) | >= 2.1 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 3.22.0 | +| [random](#provider\_random) | >= 2.1 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [eks](#module\_eks) | ../.. | | +| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 | + +## Resources + +| Name | Type | +|------|------| +| [random_string.suffix](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/string) | resource | +| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | +| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source | +| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [map\_accounts](#input\_map\_accounts) | Additional AWS account numbers to add to the aws-auth configmap. | `list(string)` |
[
"777777777777",
"888888888888"
]
| no | +| [map\_roles](#input\_map\_roles) | Additional IAM roles to add to the aws-auth configmap. |
list(object({
rolearn = string
username = string
groups = list(string)
}))
|
[
{
"groups": [
"system:masters"
],
"rolearn": "arn:aws:iam::66666666666:role/role1",
"username": "role1"
}
]
| no | +| [map\_users](#input\_map\_users) | Additional IAM users to add to the aws-auth configmap. |
list(object({
userarn = string
username = string
groups = list(string)
}))
|
[
{
"groups": [
"system:masters"
],
"userarn": "arn:aws:iam::66666666666:user/user1",
"username": "user1"
},
{
"groups": [
"system:masters"
],
"userarn": "arn:aws:iam::66666666666:user/user2",
"username": "user2"
}
]
| no | + +## Outputs + +| Name | Description | +|------|-------------| +| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for EKS control plane. | +| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Security group ids attached to the cluster control plane. | +| [config\_map\_aws\_auth](#output\_config\_map\_aws\_auth) | A kubernetes configuration to authenticate to this EKS cluster. | +| [kubectl\_config](#output\_kubectl\_config) | kubectl config as generated by the module. | +| [node\_groups](#output\_node\_groups) | Outputs from node groups | + diff --git a/examples/managed_node_groups/main.tf b/examples/managed_node_groups/main.tf index 99cc1fd730..56a2b05346 100644 --- a/examples/managed_node_groups/main.tf +++ b/examples/managed_node_groups/main.tf @@ -1,70 +1,28 @@ provider "aws" { - region = "eu-west-1" -} - -data "aws_eks_cluster" "cluster" { - name = module.eks.cluster_id -} - -data "aws_eks_cluster_auth" "cluster" { - name = module.eks.cluster_id -} - -provider "kubernetes" { - host = data.aws_eks_cluster.cluster.endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data) - token = data.aws_eks_cluster_auth.cluster.token -} - -data "aws_availability_zones" "available" { + region = local.region } locals { - cluster_name = "test-eks-${random_string.suffix.result}" -} - -resource "random_string" "suffix" { - length = 8 - special = false + name = "managed_node_groups-${random_string.suffix.result}" + cluster_version = "1.20" + region = "eu-west-1" } -module "vpc" { - source = "terraform-aws-modules/vpc/aws" - version = "~> 2.47" - - name = "test-vpc" - cidr = "172.16.0.0/16" - azs = data.aws_availability_zones.available.names - private_subnets = ["172.16.1.0/24", "172.16.2.0/24", "172.16.3.0/24"] - public_subnets = ["172.16.4.0/24", "172.16.5.0/24", "172.16.6.0/24"] - enable_nat_gateway = true - single_nat_gateway = true - enable_dns_hostnames = true - - public_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/elb" = "1" - } - - private_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/internal-elb" = "1" - } -} +################################################################################ +# EKS Module +################################################################################ module "eks" { - source = "../.." - cluster_name = local.cluster_name - cluster_version = "1.20" - subnets = module.vpc.private_subnets + source = "../.." - tags = { - Environment = "test" - GithubRepo = "terraform-aws-eks" - GithubOrg = "terraform-aws-modules" - } + cluster_name = local.name + cluster_version = local.cluster_version + + vpc_id = module.vpc.vpc_id + subnets = module.vpc.private_subnets - vpc_id = module.vpc.vpc_id + cluster_endpoint_private_access = true + cluster_endpoint_public_access = true node_groups_defaults = { ami_type = "AL2_x86_64" @@ -73,23 +31,16 @@ module "eks" { node_groups = { example = { - create_launch_template = true - desired_capacity = 1 max_capacity = 10 min_capacity = 1 - disk_size = 50 - disk_type = "gp3" - disk_throughput = 150 - disk_iops = 3000 - instance_types = ["t3.large"] capacity_type = "SPOT" k8s_labels = { - Environment = "test" - GithubRepo = "terraform-aws-eks" - GithubOrg = "terraform-aws-modules" + Example = "managed_node_groups" + GithubRepo = "terraform-aws-eks" + GithubOrg = "terraform-aws-modules" } additional_tags = { ExtraTag = "example" @@ -105,24 +56,93 @@ module "eks" { max_unavailable_percentage = 50 # or set `max_unavailable` } } - } - - # Create security group rules to allow communication between pods on workers and pods in managed node groups. - # Set this to true if you have AWS-Managed node groups and Self-Managed worker groups. - # See https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1089 - - # worker_create_cluster_primary_security_group_rules = true + example2 = { + desired_capacity = 1 + max_capacity = 10 + min_capacity = 1 - # worker_groups_launch_template = [ - # { - # name = "worker-group-1" - # instance_type = "t3.small" - # asg_desired_capacity = 2 - # public_ip = true - # } - # ] + instance_types = ["t3.medium"] + k8s_labels = { + Example = "managed_node_groups" + GithubRepo = "terraform-aws-eks" + GithubOrg = "terraform-aws-modules" + } + additional_tags = { + ExtraTag = "example2" + } + update_config = { + max_unavailable_percentage = 50 # or set `max_unavailable` + } + } + } map_roles = var.map_roles map_users = var.map_users map_accounts = var.map_accounts + + tags = { + Example = local.name + GithubRepo = "terraform-aws-eks" + GithubOrg = "terraform-aws-modules" + } +} + +################################################################################ +# Kubernetes provider configuration +################################################################################ + +data "aws_eks_cluster" "cluster" { + name = module.eks.cluster_id +} + +data "aws_eks_cluster_auth" "cluster" { + name = module.eks.cluster_id +} + +provider "kubernetes" { + host = data.aws_eks_cluster.cluster.endpoint + cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data) + token = data.aws_eks_cluster_auth.cluster.token +} + +################################################################################ +# Supporting Resources +################################################################################ + +data "aws_availability_zones" "available" { +} + +resource "random_string" "suffix" { + length = 8 + special = false +} + +module "vpc" { + source = "terraform-aws-modules/vpc/aws" + version = "~> 3.0" + + name = local.name + cidr = "10.0.0.0/16" + azs = data.aws_availability_zones.available.names + private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"] + public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"] + enable_nat_gateway = true + single_nat_gateway = true + enable_dns_hostnames = true + + public_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/elb" = "1" + } + + private_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/internal-elb" = "1" + } + + tags = { + Example = local.name + GithubRepo = "terraform-aws-eks" + GithubOrg = "terraform-aws-modules" + } } diff --git a/examples/managed_node_groups/versions.tf b/examples/managed_node_groups/versions.tf index 47f7c05e43..bbcf893252 100644 --- a/examples/managed_node_groups/versions.tf +++ b/examples/managed_node_groups/versions.tf @@ -2,9 +2,9 @@ terraform { required_version = ">= 0.13.1" required_providers { - aws = ">= 3.56.0" + aws = ">= 3.22.0" local = ">= 1.4" random = ">= 2.1" - kubernetes = "~> 1.11" + kubernetes = "~> 2.0" } } diff --git a/examples/secrets_encryption/README.md b/examples/secrets_encryption/README.md new file mode 100644 index 0000000000..c3ee731f2a --- /dev/null +++ b/examples/secrets_encryption/README.md @@ -0,0 +1,66 @@ +# Managed groups example + +This is EKS using [secrets encryption](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/) feature. + +See [the official blog](https://aws.amazon.com/blogs/containers/using-eks-encryption-provider-support-for-defense-in-depth/) for more details. + +## Usage + +To run this example you need to execute: + +```bash +$ terraform init +$ terraform plan +$ terraform apply +``` + +Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources. + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 0.13.1 | +| [aws](#requirement\_aws) | >= 3.22.0 | +| [kubernetes](#requirement\_kubernetes) | ~> 2.0 | +| [local](#requirement\_local) | >= 1.4 | +| [random](#requirement\_random) | >= 2.1 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 3.22.0 | +| [random](#provider\_random) | >= 2.1 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [eks](#module\_eks) | ../.. | | +| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 | + +## Resources + +| Name | Type | +|------|------| +| [aws_kms_key.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource | +| [random_string.suffix](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/string) | resource | +| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | +| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source | +| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | + +## Inputs + +No inputs. + +## Outputs + +| Name | Description | +|------|-------------| +| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for EKS control plane. | +| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Security group ids attached to the cluster control plane. | +| [config\_map\_aws\_auth](#output\_config\_map\_aws\_auth) | A kubernetes configuration to authenticate to this EKS cluster. | +| [kubectl\_config](#output\_kubectl\_config) | kubectl config as generated by the module. | + diff --git a/examples/secrets_encryption/main.tf b/examples/secrets_encryption/main.tf index 76fa2b2392..49d9a7b029 100644 --- a/examples/secrets_encryption/main.tf +++ b/examples/secrets_encryption/main.tf @@ -1,7 +1,57 @@ provider "aws" { - region = "eu-west-1" + region = local.region } +locals { + name = "secrets_encryption-${random_string.suffix.result}" + cluster_version = "1.20" + region = "eu-west-1" +} + +################################################################################ +# EKS Module +################################################################################ + +module "eks" { + source = "../.." + + cluster_name = local.name + cluster_version = local.cluster_version + + vpc_id = module.vpc.vpc_id + subnets = module.vpc.private_subnets + + cluster_endpoint_private_access = true + cluster_endpoint_public_access = true + + + cluster_encryption_config = [ + { + provider_key_arn = aws_kms_key.eks.arn + resources = ["secrets"] + } + ] + + worker_groups = [ + { + name = "worker-group-1" + instance_type = "t3.small" + additional_userdata = "echo foo bar" + asg_desired_capacity = 2 + }, + ] + + tags = { + Example = local.name + GithubRepo = "terraform-aws-eks" + GithubOrg = "terraform-aws-modules" + } +} + +################################################################################ +# Kubernetes provider configuration +################################################################################ + data "aws_eks_cluster" "cluster" { name = module.eks.cluster_id } @@ -16,11 +66,28 @@ provider "kubernetes" { token = data.aws_eks_cluster_auth.cluster.token } -data "aws_availability_zones" "available" { +################################################################################ +# KMS for encrypting secrets +################################################################################ + +resource "aws_kms_key" "eks" { + description = "EKS Secret Encryption Key" + deletion_window_in_days = 7 + enable_key_rotation = true + + tags = { + Example = local.name + GithubRepo = "terraform-aws-eks" + GithubOrg = "terraform-aws-modules" + } } -locals { - cluster_name = "test-eks-${random_string.suffix.result}" + +################################################################################ +# Supporting Resources +################################################################################ + +data "aws_availability_zones" "available" { } resource "random_string" "suffix" { @@ -28,15 +95,11 @@ resource "random_string" "suffix" { special = false } -resource "aws_kms_key" "eks" { - description = "EKS Secret Encryption Key" -} - module "vpc" { source = "terraform-aws-modules/vpc/aws" - version = "~> 2.47" + version = "~> 3.0" - name = "test-vpc" + name = local.name cidr = "10.0.0.0/16" azs = data.aws_availability_zones.available.names private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"] @@ -46,47 +109,18 @@ module "vpc" { enable_dns_hostnames = true public_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/elb" = "1" + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/elb" = "1" } private_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/internal-elb" = "1" + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/internal-elb" = "1" } -} - -module "eks" { - source = "../.." - cluster_name = local.cluster_name - cluster_version = "1.20" - subnets = module.vpc.private_subnets - - cluster_encryption_config = [ - { - provider_key_arn = aws_kms_key.eks.arn - resources = ["secrets"] - } - ] tags = { - Environment = "test" - GithubRepo = "terraform-aws-eks" - GithubOrg = "terraform-aws-modules" + Example = local.name + GithubRepo = "terraform-aws-eks" + GithubOrg = "terraform-aws-modules" } - - vpc_id = module.vpc.vpc_id - - worker_groups = [ - { - name = "worker-group-1" - instance_type = "t3.small" - additional_userdata = "echo foo bar" - asg_desired_capacity = 2 - }, - ] - - map_roles = var.map_roles - map_users = var.map_users - map_accounts = var.map_accounts } diff --git a/examples/secrets_encryption/variables.tf b/examples/secrets_encryption/variables.tf index 57853d8b4d..e69de29bb2 100644 --- a/examples/secrets_encryption/variables.tf +++ b/examples/secrets_encryption/variables.tf @@ -1,48 +0,0 @@ -variable "map_accounts" { - description = "Additional AWS account numbers to add to the aws-auth configmap." - type = list(string) - - default = [ - "777777777777", - "888888888888", - ] -} - -variable "map_roles" { - description = "Additional IAM roles to add to the aws-auth configmap." - type = list(object({ - rolearn = string - username = string - groups = list(string) - })) - - default = [ - { - rolearn = "arn:aws:iam::66666666666:role/role1" - username = "role1" - groups = ["system:masters"] - }, - ] -} - -variable "map_users" { - description = "Additional IAM users to add to the aws-auth configmap." - type = list(object({ - userarn = string - username = string - groups = list(string) - })) - - default = [ - { - userarn = "arn:aws:iam::66666666666:user/user1" - username = "user1" - groups = ["system:masters"] - }, - { - userarn = "arn:aws:iam::66666666666:user/user2" - username = "user2" - groups = ["system:masters"] - }, - ] -} diff --git a/examples/secrets_encryption/versions.tf b/examples/secrets_encryption/versions.tf index 120d873e2f..bbcf893252 100644 --- a/examples/secrets_encryption/versions.tf +++ b/examples/secrets_encryption/versions.tf @@ -5,6 +5,6 @@ terraform { aws = ">= 3.22.0" local = ">= 1.4" random = ">= 2.1" - kubernetes = ">= 1.11" + kubernetes = "~> 2.0" } } diff --git a/variables.tf b/variables.tf index a5d9adefe3..a830e20a62 100644 --- a/variables.tf +++ b/variables.tf @@ -71,13 +71,13 @@ variable "aws_auth_additional_labels" { } variable "map_accounts" { - description = "Additional AWS account numbers to add to the aws-auth configmap. See examples/basic/variables.tf for example format." + description = "Additional AWS account numbers to add to the aws-auth configmap." type = list(string) default = [] } variable "map_roles" { - description = "Additional IAM roles to add to the aws-auth configmap. See examples/basic/variables.tf for example format." + description = "Additional IAM roles to add to the aws-auth configmap." type = list(object({ rolearn = string username = string @@ -87,7 +87,7 @@ variable "map_roles" { } variable "map_users" { - description = "Additional IAM users to add to the aws-auth configmap. See examples/basic/variables.tf for example format." + description = "Additional IAM users to add to the aws-auth configmap." type = list(object({ userarn = string username = string